Compare commits

...

1091 Commits

Author SHA1 Message Date
Ruud
796aff4514 Remove login_opener 2014-01-11 13:58:15 +01:00
Ruud
2a2fe448e7 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/providers/torrent/bitsoup/main.py
	couchpotato/core/providers/torrent/iptorrents/main.py
	couchpotato/core/providers/torrent/sceneaccess/main.py
	couchpotato/core/providers/torrent/torrentleech/main.py
	couchpotato/core/providers/torrent/torrentshack/main.py
2014-01-11 13:55:12 +01:00
Ruud
516cbd73bd Catch timeout errors when xbmc isn't available 2014-01-11 13:41:41 +01:00
Ruud Burger
680ae53cf4 Merge pull request #2682 from techmunk/deluge_improvements
Only request needed torrent ids from deluge.
2014-01-11 04:06:32 -08:00
Ruud Burger
23967d11dd Merge pull request #2681 from fuzeman/tv_searcher
[TV] Cleanup retrieval of media query and title
2014-01-11 04:02:04 -08:00
Techmunk
99b99a992d Only request needed torrent ids from deluge. 2014-01-11 15:46:24 +10:00
Dean Gardiner
7a3251f649 Added new version of 'library.title' to return the title of the media excluding year and identifiers. 2014-01-11 16:36:29 +13:00
Dean Gardiner
9ba8910281 Renamed 'library.title' to 'library.query' 2014-01-11 16:23:33 +13:00
Dean Gardiner
e83a3cf263 Renamed movie library.title 'include_identifier' to 'include_year', show library.title defaults to 'condense' enabled now. 2014-01-11 15:55:20 +13:00
Dean Gardiner
b3c2945d9b 'related_libraries' and 'root_library' references are now added to child libraries. 2014-01-11 15:11:32 +13:00
Dean Gardiner
fc3cf08675 Moved 'searcher.get_search_title' to 'library.title', include_identifier is enabled by default now and title condensing can be enabled by the 'condense' parameter now. 2014-01-11 15:11:31 +13:00
Ruud
fb9d52c2b9 Don't search for movies with year to far in the future 2014-01-11 00:26:59 +01:00
Ruud
5cc471cc87 Remove path on fail 2014-01-11 00:05:24 +01:00
Ruud
07c7171fbb Image download wasn't working anymore 2014-01-11 00:05:02 +01:00
Ruud
c15dd2dec9 Disable verify for now 2014-01-10 23:17:04 +01:00
Ruud
a408cc0246 Update renamer to not trigger twice
Keep track of status support on releases
2014-01-10 22:54:23 +01:00
Ruud
c2568432e7 Use requests lib for openurl 2014-01-10 14:04:16 +01:00
Ruud
91f3cda995 Update requests lib 2014-01-10 13:16:12 +01:00
Ruud
28aa908513 Add category_id to api docs 2014-01-08 00:08:23 +01:00
Ruud
5e24b11c21 Don't continue with bitsoup if table isn't found. fix #2633 2014-01-06 22:36:51 +01:00
Ruud Burger
8162cd31b7 Merge pull request #2652 from nikagl/patch-1
Library object has media instead of movies
2014-01-06 13:27:12 -08:00
Ruud
4cdf71513f Clean tags from beginning of string. fix #2654 2014-01-06 22:24:34 +01:00
Ruud
7e6d9c02f6 Add quality test name. closes #2664 2014-01-06 21:53:29 +01:00
Ruud
afc4f73e36 Don't try wait when not between time is given 2014-01-05 23:46:42 +01:00
Ruud
5ef0c52277 Create reusable url opener 2014-01-05 22:17:16 +01:00
Ruud
c23b014cff Set default timeout 2014-01-05 22:02:39 +01:00
Ruud
f13cddfb26 Don't return empty actor roles 2014-01-05 18:55:51 +01:00
Ruud
623f6f3ed0 Limit title and actor search for tmdb 2014-01-05 18:07:06 +01:00
Ruud
a158716c8b Move actor images to dict 2014-01-05 17:57:15 +01:00
Ruud
9df7f7b22c Speed up userscript info getter by removing actor info 2014-01-05 13:10:27 +01:00
nikagl
1ea6fdc9a7 Library object has media instead of movies
Make the renamer work again by scanning the media instead of non-existent movies attribute in the library object (fixing error: AttributeError: 'Library' object has no attribute 'movies')
2014-01-01 20:54:34 +01:00
Ruud
8e5c24282e Disable themoviedb in search 2013-12-31 13:12:34 +01:00
Ruud Burger
1b0c9f40cc Merge pull request #2647 from nikagl/patch-1
Update main.py
2013-12-31 02:21:41 -08:00
nikagl
c0111a467b Update main.py
Release table has media_id column, not movie_id
2013-12-31 11:02:32 +01:00
Ruud
266429311b Update Tornado 2013-12-30 23:27:40 +01:00
Ruud Burger
64175151f8 Merge pull request #2634 from dkboy/tv_bitsoup
Updated Bitsoup provider to include TV support
2013-12-29 15:35:50 -08:00
Ruud
d74342adee Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-12-30 00:34:33 +01:00
Ruud
4408d99524 Typo 2013-12-30 00:33:41 +01:00
Ruud Burger
0168b9cbea Merge pull request #2642 from mano3m/develop_renamer
Fix 100% CPU bugs
2013-12-29 15:06:05 -08:00
mano3m
e69421226b Remove leading '//' from *NIX paths
Fixes #2506,  #2021
2013-12-29 23:42:55 +01:00
mano3m
f08d34b816 Add a trailing separator for windows drive path
Fixes  #2581, #2526
2013-12-29 23:25:53 +01:00
dkboy
586957e840 Updated Bitsoup provider to include TV support
Updated Bitsoup Provider to include TV support as well as Movies.
2013-12-28 21:30:49 +13:00
Ruud Burger
4a36c3b6a8 Merge pull request #2631 from mano3m/develop_try_next
Download fixed
2013-12-27 11:18:07 -08:00
mano3m
be0b708d32 Add user-agent to newznab request
Fixes #2611

Note that urlib2.urlopen should just follow redirects so I dont
understand why we need 3b519aeac9
2013-12-27 20:11:27 +01:00
mano3m
1cea50bcfb Added logging 2013-12-27 19:34:53 +01:00
mano3m
55483cf736 Consider try_next as failed 2013-12-27 19:09:39 +01:00
Joel Kåberg
16f8a1159f Merge pull request #2624 from mano3m/develop_fix
Complete nzbget https
2013-12-23 14:22:50 -08:00
mano3m
d4d03a846e Complete nzbget https
Fixes what went broken :(
2013-12-23 23:08:26 +01:00
Ruud Burger
7bccc46583 Merge pull request #2623 from mano3m/develop_https
Add https functionality for nzbget
2013-12-23 12:35:21 -08:00
mano3m
dc61e9916f Add https functionality for nzbget
Fixes #2622
2013-12-23 15:39:45 +01:00
Joel Kåberg
cf2b5f72ae Revert "Added delete files button, #2596 (manuall merge)"
This reverts commit 0b01bbc52e.
2013-12-21 13:29:02 +01:00
Joel Kåberg
f2fc775963 Revert "Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv"
This reverts commit b8bce948c8, reversing
changes made to 0a996857dd.
2013-12-20 02:29:15 +01:00
Joel Kåberg
b8bce948c8 Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/providers/torrent/yify/main.py
2013-12-20 02:15:08 +01:00
Joel Kåberg
fe397caafc better score forumla for seeding/leeching 2013-12-20 02:08:20 +01:00
Joel Kåberg
787405ae62 Updated YIFY provider to use proxies and magnet links, #2560 (manuall merge) 2013-12-19 22:14:29 +01:00
Joel Kåberg
0b01bbc52e Added delete files button, #2596 (manuall merge) 2013-12-19 22:12:22 +01:00
Joel Kåberg
dafa70b7e3 fix seed/lech score formula, fix #2605 2013-12-19 21:41:17 +01:00
Joel Kåberg
0a996857dd Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv
Conflicts:
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/renamer/main.py
2013-12-19 20:57:12 +01:00
Joel Kåberg
32b9bc3345 Merge pull request #2612 from RuudBurger/manual_scan
Manual scan
2013-12-19 10:30:41 -08:00
Joel Kåberg
a7b8f992d3 Merge pull request #2614 from mano3m/develop_stalled
Don't consider stalled as failed when seeding
2013-12-19 10:30:23 -08:00
Joel Kåberg
0c66b8067e Merge pull request #2607 from mano3m/develop_no_ren
Mark release as downloaded if renamer is disabled.
2013-12-19 10:30:12 -08:00
mano3m
7b3645ea7c Don't consider stalled as failed when seeding
Fixes the issue where Transmission is seeding but still considering the
torrent stalled (new functionality of Transmission). CPS marks it as
failed and a perfectly good torrent gets deleted. Several people on the
forum have this issue,
2013-12-17 21:41:26 +01:00
mano3m
69569758d9 Make sure we return true on success 2013-12-16 22:51:04 +01:00
mano3m
55777531d5 Clean-up and dont mark status twice 2013-12-16 22:43:05 +01:00
Joel Kåberg
99ce8dacbf added api calls for manual scan (kudos to @mano3m) 2013-12-16 17:07:34 +01:00
Joel Kåberg
d49c663c64 Merge branches 'develop' and 'manual_scan' of https://github.com/RuudBurger/CouchPotatoServer into manual_scan
Conflicts:
	couchpotato/core/plugins/renamer/main.py
2013-12-16 07:29:31 +01:00
mano3m
e9a457e263 mark release das downloaded if renamer is disabled.
if the renamer is not enabled and the quality of the downloaded release
is not the finish quality, the release did not get a status update.
2013-12-15 21:03:40 +01:00
Joel Kåberg
26509f614c use identifier instead 2013-12-15 11:12:47 +01:00
Joel Kåberg
3e28d5a936 use year as identifier for movies 2013-12-15 11:02:57 +01:00
Joel Kåberg
95ff427873 ignore series from omdbapi (for now?) 2013-12-15 10:05:51 +01:00
Joel Kåberg
8ed10037df Merge pull request #2602 from saxicek/tv_tsh
update TorrentShack for tv branch
2013-12-15 00:37:59 -08:00
sax
7a090dd4a2 update TorrentShack for tv branch 2013-12-15 00:08:18 +01:00
Joel Kåberg
49f34cb48d movie > media 2013-12-14 23:57:20 +01:00
Joel Kåberg
2a76de50dd Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv
Conflicts:
	couchpotato/core/plugins/renamer/main.py
2013-12-14 23:43:29 +01:00
Joel Kåberg
3b0e07100f Merge pull request #2545 from mano3m/develop_downloaders
Downloader and renamer improvements
2013-12-14 14:20:33 -08:00
Joel Kåberg
8adf7fc600 Merge remote-tracking branch 'remotes/origin/develop' into tv 2013-12-14 21:24:07 +01:00
Joel Kåberg
f4c053f56f fix season search for SCC provider 2013-12-14 21:22:46 +01:00
mano3m
74561500b5 Convert windows path to *nix path in sp
Fixes #2594

Note that os.path.normath converts '/' to '\\' on windows machines, but
unfortunately not the other way around...
2013-12-14 21:12:10 +01:00
Joel Kåberg
5cb5a1677d Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv 2013-12-14 20:35:03 +01:00
Joel Kåberg
9fb9f0ef5b Merge pull request #2599 from fuzeman/tv_searcher
[TV] Moved matcher to core/media, updated NZBIndex provider
2013-12-13 08:11:10 -08:00
Dean Gardiner
242d69a981 The nzbindex provider now uses the caper usenet parser to get release names from usenet subjects. 2013-12-13 16:31:26 +13:00
Dean Gardiner
eb151a4c5d Updated Caper to v0.3.1 2013-12-13 15:24:40 +13:00
Ruud
3b519aeac9 nzbmegasearch returns redirected url. fix #2597 2013-12-12 19:58:55 +01:00
mano3m
ea5d274f4d Add another check 2013-12-11 22:40:01 +01:00
mano3m
f57f2444fe Improved checking
Fixes #2539 ?
2013-12-11 22:11:33 +01:00
Dean Gardiner
2520b19798 Fixed bug in searcher where episode searches would be triggered if a season release has already been snatched at a better quality 2013-12-09 13:33:25 +13:00
Dean Gardiner
319c9e979a Split ShowMatcher into Episode and Season matchers, updated correctIdentifier method so there should be less false matches now. 2013-12-09 13:32:43 +13:00
Dean Gardiner
93aa5b1920 Updated Caper to v0.2.9 2013-12-09 11:23:57 +13:00
mano3m
fd768df9e5 Tabs to spaces 2013-12-08 17:35:05 +01:00
Joel Kåberg
4db68e4887 update contributing 2013-12-08 13:07:17 +01:00
Dean Gardiner
f648af66a6 Moved matcher plugin to core/media, moved some matcher related functions from ShowSearcher to ShowMatcher 2013-12-08 22:40:39 +13:00
Joel Kåberg
7c4185e1fa Merge branch 'develop' into tv 2013-12-07 23:40:42 +01:00
mano3m
6d4297a5fb Extend os.path.sep to all folder checks
Expands 50c5044fe8
2013-12-07 22:39:47 +01:00
mano3m
ab413f2f3e Dont remove historic data when doing a full scan.
Fixes #2572

Note that the dashboard already takes care of this and does it the right
way (keeping seeding and ignored releases).
2013-12-07 22:33:34 +01:00
mano3m
574255c4b6 Don't tag .ignore files 2013-12-07 22:33:34 +01:00
mano3m
008ba39856 Add backwards compatibility for the renamer API 2013-12-07 22:33:33 +01:00
mano3m
cff1b3abdb Provide IDs to check to all downloaders 2013-12-07 22:33:32 +01:00
mano3m
231c5b8ca1 Renamer rename to media 2013-12-07 22:33:31 +01:00
mano3m
640664494e Increase check_snatched readability
- Reduce rested if statements
- Add more comments
2013-12-07 22:31:16 +01:00
mano3m
951b7b8425 Update Synology and Pneumatic
As per black hole improvement
2013-12-07 22:31:16 +01:00
mano3m
c9980539f0 Improve black hole support
Also scan the 'from' folder if Black hole is used together with another
downloader.
2013-12-07 22:31:15 +01:00
Ruud Burger
7eb802b42a Merge pull request #2501 from mano3m/develop_xbmc
XBMC metadata update
2013-12-07 13:17:03 -08:00
Ruud Burger
2f4f3ce0fe Merge pull request #2578 from mano3m/develop_fnmatch
Fix fnmatch
2013-12-07 13:14:13 -08:00
mano3m
824ac86d18 Fix fnmatch
fnmatch does not accept regular expressions as presumed in
0c4851e436 See
http://docs.python.org/2/library/fnmatch.html

This patch actually completely broke tagging. All we need to do is make
sure any [ or ] used is conbverted into [[] or []].

Fixes #2557 and  #2362
2013-12-07 22:11:16 +01:00
mano3m
4553726423 [Notifications][XBMC] Add always do a full scan option to XBMC
Fixes #2498 (at least partially)
2013-12-07 15:09:30 +01:00
mano3m
f0bde7316d [Metadata][XBMC] Update new actors to actor_roles 2013-12-07 15:09:23 +01:00
Joel Kåberg
0fb06a3fd3 Merge pull request #2577 from fuzeman/tv_searcher
[TV] Season pack matching, better show search triggering
2013-12-07 01:10:27 -08:00
Dean Gardiner
1e39d643a8 Searching for a show now triggers searching for all the seasons, Season searches that fail to find anything now trigger individual episode searches. 2013-12-07 21:23:10 +13:00
Dean Gardiner
69d58663ef Profile and categories on seasons and episodes are now set to the same value as the Show 2013-12-07 21:21:15 +13:00
Dean Gardiner
e59b53fab2 Searching is now deferred until the entire show has been loaded into the database 2013-12-07 18:48:26 +13:00
Dean Gardiner
a66f6f0166 Fixed reference to 'movie.restatus' (should be 'media.restatus'), minor formatting changes 2013-12-07 18:11:06 +13:00
Dean Gardiner
1344f03b16 Fixed matcher bug when matching resolution on ['480p', None] 2013-12-07 18:11:05 +13:00
Dean Gardiner
a23c409939 Updated Caper to v0.2.6 2013-12-07 18:11:05 +13:00
Dean Gardiner
a6b1cc833f Added more TV qualities for testing (arh.. bit of a mess) 2013-12-07 18:11:04 +13:00
Joel Kåberg
d2c7e3ef56 update Nzbindex for tv branch 2013-12-06 12:55:02 +01:00
Joel Kåberg
6c87008d7b update Nzbclub for tv branch 2013-12-06 12:46:36 +01:00
Joel Kåberg
6b3af21e45 update Binsearch for tv branch 2013-12-06 12:38:54 +01:00
Joel Kåberg
5a5cc0005c Merge pull request #2574 from fuzeman/tv_searcher
[TV] Matching, serialization and UI notification fixes
2013-12-06 03:18:04 -08:00
Joel Kåberg
d65117c0e3 update TorrentPotato for tv branch 2013-12-06 12:16:58 +01:00
Dean Gardiner
d8884bb655 Changed '.searcher.single' call to use search_dict for media serialization 2013-12-06 23:58:47 +13:00
Dean Gardiner
afe9aed2eb Fixed bug where media default_dict contained related and root library attributes. 2013-12-06 23:58:45 +13:00
Dean Gardiner
01e64e989e Updated Caper to v0.2.5 - fixes 'H 264' tag bug 2013-12-06 23:58:44 +13:00
Dean Gardiner
9496df9e9d Fixed a bug where matching show names with a year would fail 2013-12-06 23:58:43 +13:00
Joel Kåberg
8b4c67b977 update Yify for tv branch 2013-12-06 11:44:59 +01:00
Joel Kåberg
f77a8f5573 update PassThePopcorn for tv branch 2013-12-06 11:40:58 +01:00
Joel Kåberg
de8aefebb7 update Bit-HDTV for tv branch 2013-12-06 11:03:24 +01:00
Joel Kåberg
8f0d22a6f2 update TPB for tv branch 2013-12-06 10:46:08 +01:00
Joel Kåberg
721190028b not needed 2013-12-06 09:34:02 +01:00
Joel Kåberg
50e565142e typo 2013-12-06 09:27:02 +01:00
Joel Kåberg
bead3e2b07 update PublicHD for tv branch 2013-12-06 09:26:20 +01:00
Joel Kåberg
71aa0cbb9a use buildUrl 2013-12-06 09:09:53 +01:00
Joel Kåberg
8de19cbd52 fixes 2013-12-06 08:19:13 +01:00
Joel Kåberg
8573832ff7 fixes 2013-12-06 08:05:34 +01:00
Joel Kåberg
7c1d3f8762 fixes 2013-12-06 08:05:15 +01:00
Joel Kåberg
9cd1adcdee fixes 2013-12-06 08:04:58 +01:00
Joel Kåberg
f017ac9dca use searcher.get_search_title 2013-12-06 07:46:50 +01:00
Joel Kåberg
907704e45f fix self.getSearchTitle() 2013-12-06 07:44:16 +01:00
Joel Kåberg
b17f937389 use include_identifier 2013-12-06 07:40:15 +01:00
Joel Kåberg
f591c56dd4 Merge pull request #2570 from fuzeman/tv_searcher
[TV] WEB-DL matcher fix, updated 'searcher.get_search_title'
2013-12-05 22:14:24 -08:00
Dean Gardiner
2fd54901e7 Added optional parameter 'include_identifier' to the 'searcher.get_search_title' event handler. 2013-12-06 14:04:00 +13:00
Dean Gardiner
1bf6c5a82e Changed 'searcher.get_search_title' to accept a 'library' instead of the 'media' as a parameter. 2013-12-06 13:53:56 +13:00
Dean Gardiner
45484461b5 Adjusted Matcher.chainMatch to support 'WEB DL' tags 2013-12-06 13:50:01 +13:00
Dean Gardiner
aa394f59ae Updated Caper to v0.2.4 2013-12-06 13:50:00 +13:00
Joel Kåberg
717111f5d2 cleanup Newznab provider 2013-12-05 21:11:45 +01:00
Joel Kåberg
e3461dc35f updated TorrentDay for tv branch 2013-12-05 17:13:34 +01:00
Joel Kåberg
9b834f62a9 updated Torrentleech for tv branch 2013-12-05 16:55:34 +01:00
Joel Kåberg
935938474c SCC Provider: remove debug info 2013-12-05 16:23:07 +01:00
Joel Kåberg
6573196186 update SCC for tv branch 2013-12-05 16:21:58 +01:00
Joel Kåberg
9a07f2ed65 use searcher.get_search_title and library.identifier (not present in movie library module?) 2013-12-05 15:40:21 +01:00
Joel Kåberg
613ff3b729 updated newznab provider for tv branch. see inline comments 2013-12-05 14:09:35 +01:00
Ruud Burger
def62fc865 Merge pull request #2568 from fuzeman/tv_searcher
[TV] Fixed bug with Library serialization when adding shows
2013-12-05 00:42:13 -08:00
Dean Gardiner
037c355836 Fixed bug with Library serialization when adding shows 2013-12-05 16:23:21 +13:00
Joel Kåberg
180b2bbffe Merge pull request #2549 from fuzeman/tv_searcher
[TV] Searcher cleanup and matcher updates
2013-12-03 23:16:11 -08:00
Dean Gardiner
143dcad4f3 Fixed incorrect reference to library 'season' and 'episode' attributes. 2013-12-04 19:50:48 +13:00
Dean Gardiner
b0e352ab6d Updated Caper to v0.2.3 and Logr to v0.2.2 to greatly improve matching performance 2013-12-04 19:29:02 +13:00
Dean Gardiner
5ea7dc5920 Moved 'searcher.get_media_identifier' into season and episode libraries as 'library.identifier' 2013-12-04 17:15:08 +13:00
Ruud
966f8c36b1 Make sure to use a valid cookie_secret. fix #2553 2013-12-02 12:09:14 +01:00
Dean Gardiner
3c675b5b8a searcher and matcher now uses the new related_libraries and root_library from media instead of using extra db queries 2013-12-02 23:27:26 +13:00
Dean Gardiner
11ea9b4e91 related_libraries are now only included on searches and added the root_library attribute 2013-12-02 23:26:31 +13:00
Dean Gardiner
e8a2139ecf Related libraries are now merged into {<type>: [<library>,...]} type 2013-12-02 21:18:48 +13:00
Ruud
50c5044fe8 Add path separator for check 2013-12-01 19:23:53 +01:00
Dean Gardiner
dc57d7b6d1 Added related_libraries to Library model. 2013-12-01 20:25:33 +13:00
Dean Gardiner
0925f1312d Fixed refresh action - changed show searcher to bind to 'season' and 'episode' media types for '.searcher.single' as well. 2013-12-01 20:22:07 +13:00
Dean Gardiner
efc02f66f5 Changed the IPTorrents show provider into a new season and episode provider, removed grouped cat_ids 2013-12-01 20:20:19 +13:00
Ruud
9ce8ffc14b movie_id > media_id 2013-11-30 16:52:08 +01:00
Ruud
bab07a05e7 Merge branch 'refs/heads/develop' into tv 2013-11-30 16:48:52 +01:00
Ruud
46b2d6ba6e movie_id > media_id 2013-11-30 16:48:46 +01:00
Ruud
1df9f7c83f Merge branch 'refs/heads/develop' into tv 2013-11-30 16:14:19 +01:00
Ruud
8aec5cf605 Better (custom) formhints 2013-11-30 14:59:52 +01:00
Ruud
54af80d5ad Don't wait for shutdown of scheduler 2013-11-30 12:51:35 +01:00
Ruud
8b2cd62211 Don't save stash on pull 2013-11-30 12:49:28 +01:00
Ruud
efdf77ef6c Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/renamer/main.py
2013-11-30 12:44:13 +01:00
Ruud
2fc4809821 Variable renaming movie to media 2013-11-30 12:41:06 +01:00
Ruud
bde6de1789 Move movie listing to media 2013-11-30 12:23:53 +01:00
Ruud
c72cca4ea2 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-30 11:52:56 +01:00
Ruud
0f071be762 Use Object.each for object looping 2013-11-30 11:52:41 +01:00
Joel Kåberg
cddf47f113 move long subtitle text into formhint 2013-11-29 22:28:51 +01:00
Joel Kåberg
76f3f5253a move long automation text into formhint 2013-11-29 22:14:28 +01:00
Joel Kåberg
d833a04293 move long texts into formhint 2013-11-29 22:10:02 +01:00
Joel Kåberg
2e96860380 directory properly removed 2013-11-27 07:58:23 +01:00
Ruud Burger
3e2e6385cf Properly split seed ratios and seed times 2013-11-26 17:10:56 +01:00
Joel Kåberg
ccc2028690 remove directory option in utorrent
doesn't behave as expected on windows
2013-11-26 15:51:12 +01:00
Joel Kåberg
81dbc1ca79 Merge pull request #2527 from RuudBurger/couchtart
TorrentPotato ready for prime time
2013-11-25 23:54:45 -08:00
Ruud
e9a3059be2 Allow longer description in formhint 2013-11-25 22:16:02 +01:00
Ruud Burger
a989c93505 Merge pull request #2523 from fuzeman/tv_searcher
[TV] Merge fixes, removed get_media_searcher_id event
2013-11-25 07:38:08 -08:00
Dean Gardiner
d122bd1b43 Removed 'searcher.download' (method was moved to the release plugin) 2013-11-25 19:47:05 +13:00
Dean Gardiner
ab81824f4c Minor changes to matcher and added extra show searcher logging 2013-11-25 19:29:29 +13:00
Dean Gardiner
4eb73e3609 Renamed Release.movie references to Release.media 2013-11-25 19:28:43 +13:00
Dean Gardiner
6bcb279f0e Updated Caper library 2013-11-25 17:37:08 +13:00
Dean Gardiner
f446c8ed33 Updated QueryCondenser library 2013-11-25 17:07:54 +13:00
Dean Gardiner
10a34f2b69 Removed the use of the 'searcher.get_media_searcher_id' event 2013-11-25 16:20:03 +13:00
Ruud
cc3ebd79e8 Remove extensions from qualities 2013-11-24 23:17:18 +01:00
Ruud
3e035f84b1 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/helpers/variable.py
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/movie/searcher/main.py
	couchpotato/core/plugins/quality/main.py
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/renamer/main.py
2013-11-24 23:13:25 +01:00
Ruud
3d5b33856f Add some quality tests 2013-11-24 22:45:17 +01:00
Ruud
8d2e3a1919 Add ratio and seed time styling 2013-11-24 21:43:54 +01:00
Joel Kåberg
f3380c4fed seed_time and seed_ratio 2013-11-24 19:33:29 +01:00
Joel Kåberg
8a58d7f973 use hostname instead of TorrentPotato (dashboard) 2013-11-24 14:51:03 +01:00
Ruud
37b98cb835 TorrentPotato styling of inputs 2013-11-24 00:52:51 +01:00
Ruud
50262112b8 Use release_name 2013-11-24 00:27:47 +01:00
Ruud
4b9f9862fc Change name and response 2013-11-23 12:07:00 +01:00
Ruud
df60d70592 Move it 2013-11-23 12:06:46 +01:00
mano3m
1b5bc1fa05 [Metadata][XBMC] Add fileinfo to nfo
Also fixed a int / int = int divide bug
2013-11-23 01:04:41 +01:00
mano3m
e4993eac24 [Metadata][XBMC] Add actors to CPS info and nfo 2013-11-23 01:04:40 +01:00
mano3m
bd1bb1ee91 [Metadata][XBMC] Add images to nfo 2013-11-23 01:04:40 +01:00
mano3m
2c1c57333c [Metadata][XBMC] Add trailer to nfo 2013-11-23 01:04:39 +01:00
mano3m
a466cbcf16 [Metadata][XBMC] Fix nfo data
Fixes #1412 and @Lennong MPAA section
2013-11-23 01:04:38 +01:00
Ruud
379f62a339 CouchTater fixes 2013-11-23 00:31:26 +01:00
Ruud
eaf2974f8d Better frontend notification and GUI updating 2013-11-22 23:00:33 +01:00
Ruud
99e641a30d Update dashboard when the search ends of added new movie 2013-11-22 16:47:55 +01:00
Ruud
88d6148500 Update libs 2013-11-22 16:09:15 +01:00
Ruud
f53364eb6c Update Tornado 2013-11-22 16:08:54 +01:00
Ruud
b8f78e311d Update scheduler module 2013-11-22 15:38:33 +01:00
Ruud
bb6e1e2909 Don't propagate core messages to other notification providers. 2013-11-22 15:17:35 +01:00
Ruud
c62c6664ce Merge branch 'refs/heads/fuzeman-feature/notifications/pushbullet' into develop 2013-11-22 01:44:41 +01:00
Ruud
8ae4e3be18 Merge branch 'feature/notifications/pushbullet' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-feature/notifications/pushbullet 2013-11-22 01:44:16 +01:00
Ruud
0065ff5086 Indentation cleanup 2013-11-22 01:34:50 +01:00
Ruud
28d073f934 Merge branch 'refs/heads/Damiya-fix2474' into develop 2013-11-22 01:30:10 +01:00
Ruud
df1cb0ae08 Merge branch 'fix2474' of git://github.com/Damiya/CouchPotatoServer into Damiya-fix2474 2013-11-22 01:29:57 +01:00
jchristi
31a1af43d5 Update fedora init file
This took me awhile to figure out when trying to install for the first time. Luckily, I had the sickbeard init file to reference.
2013-11-22 01:28:14 +01:00
Joel Kåberg
8951e9fc90 typo 2013-11-21 22:22:19 +01:00
Joel Kåberg
357166414c use .get() and added more options 2013-11-21 22:20:45 +01:00
Joel Kåberg
e1a311de40 initial couchtarter provider (torrent newznab)
initial ground work based on newznab provider
needs UI changes: http://i.imgur.com/4MiJUH5.png (need to add ratio and
seed hours also)

untested code
2013-11-21 19:55:36 +01:00
Kate von Roeder
ab923cc592 Sort directories so that we scan them in alphabetical order as well (keeps things nice and well ordered!) 2013-11-20 18:47:09 -08:00
Kate von Roeder
99947fb135 CSS fix for #1578 part 2 - Change text direction from RTL to LTR, fixing issue where root drives would show up as '\C:'. Weird! 2013-11-20 13:47:40 -08:00
Kate von Roeder
185cb0196a Fix for #1578 - Depends on stableSort, so added to PR#2500.
Object.each is not necessarily alphabetic when iterating an object's properties, so we pull the folders out of the object, add them to an array, and sort that.
2013-11-20 13:36:08 -08:00
Kate von Roeder
309ec50691 Array.sortBy should also use the new stablesort. 2013-11-20 09:15:25 -08:00
Kate von Roeder
f865484182 Add Array.stableSort from mootools forge.
Change calls to Array.sort to use new Array.stableSort. Fixes sorting problems on Chrome
2013-11-20 05:47:36 -08:00
Dean Gardiner
ed19fd0254 Added Pushbullet notifications 2013-11-20 22:04:11 +13:00
Ruud
cec88319fe Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-19 23:45:28 +01:00
Ruud
d31b7eb72d Add date and message-id to email notification 2013-11-19 23:45:12 +01:00
Joel Kåberg
b7d93b84dd option to set download directory in utorrent 2013-11-19 18:59:54 +01:00
Joel Kåberg
4008774908 append label unnecessary
just set the full path to the dir
2013-11-19 18:51:28 +01:00
Ruud
accce789ba Normalize path sp function 2013-11-19 09:16:47 +01:00
Ruud
091b1fefd2 Add category_id to movie add docs 2013-11-19 09:09:29 +01:00
Ruud
899b1f9b96 Add mobile web capable for Android
Thanks @Elziah
2013-11-18 23:03:40 +01:00
Ruud
0ce5c51c67 renamer.scan needs some files. fix #2481 2013-11-18 22:56:03 +01:00
Ruud
da760db340 Merge branch 'refs/heads/mano3m-develop_fixes' into develop 2013-11-17 21:18:37 +01:00
Ruud
4242a5cedb Merge branch 'develop_fixes' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_fixes 2013-11-17 21:16:35 +01:00
mano3m
8c41046836 more fixes 2013-11-17 21:10:07 +01:00
mano3m
c5e6ce0e48 Several sp fixes 2013-11-17 18:26:01 +01:00
Ruud
3ad527eb62 Allow 1080p in webrip quality 2013-11-17 00:24:09 +01:00
Ruud
af2a6bf031 Force ETA data not to be to far in the future 2013-11-16 23:58:58 +01:00
Ruud
731419b61f Better error logging for syno downloader. close #2464 2013-11-16 23:33:23 +01:00
Ruud
0fafd83d76 Do some scoring with scene / nuked. fix #2009 2013-11-16 23:26:46 +01:00
Ruud
003b78a66e Scene validation 2013-11-16 23:24:05 +01:00
Ruud
4ade857f01 Better string regex between brackets 2013-11-16 23:23:10 +01:00
Ruud
658596659f Deluge wrong sp wrap. fix #2463 2013-11-16 17:23:51 +01:00
Ruud
59e6d68416 Use correct config name for bithdtv 2013-11-16 14:39:51 +01:00
Ruud
e6d76db250 Merge branch 'refs/heads/mano3m-develop_scan_basefolder' into manual_scan 2013-11-16 14:29:54 +01:00
Ruud
3b3288c53d Manual scan folder cleanup 2013-11-16 14:29:34 +01:00
Ruud
16cf220741 Merge branch 'develop_scan_basefolder' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_scan_basefolder
Conflicts:
	couchpotato/core/plugins/renamer/main.py
2013-11-16 13:45:06 +01:00
Ruud
db4f7a216a SP function wrapping whole variables 2013-11-16 13:32:00 +01:00
Ruud
3f8b97feb9 Merge branch 'refs/heads/mano3m-develop_clean_path' into develop 2013-11-16 12:57:35 +01:00
Ruud
a27673eaa4 Merge branch 'develop_clean_path' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_clean_path 2013-11-16 12:57:21 +01:00
Ruud
8e3291a1b0 bithdtv, Import correct functions 2013-11-16 12:49:39 +01:00
Ruud
89c04902e8 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-16 12:47:57 +01:00
Ruud
e29b100374 Don't try to unicode None object 2013-11-16 12:47:18 +01:00
Shatil Rafiullah
941d4414ce Changed to using getattr() so films lacking sets/collections are also handled. 2013-11-16 12:43:07 +01:00
Shatil Rafiullah
dc830324ae Added XBMC collection (set) categorization capability. 2013-11-16 12:43:02 +01:00
Ruud
3f37fc1e11 Move proxy getter to global torrent provider 2013-11-16 12:39:43 +01:00
Ruud
3442129610 Merge branch 'refs/heads/cptjhmiller-develop' into develop 2013-11-16 12:16:42 +01:00
Ruud
e9d29f10c1 Cleanup KAT import 2013-11-16 12:16:37 +01:00
Joel Kåberg
8996dd34c2 fix ident in bithdtv 2013-11-16 12:01:29 +01:00
Ruud
e2c5be0fcd Merge branch 'develop' of git://github.com/cptjhmiller/CouchPotatoServer into cptjhmiller-develop 2013-11-16 11:57:27 +01:00
Ruud
3d42c55560 Merge branch 'refs/heads/techmunk-develop' into develop 2013-11-16 11:56:35 +01:00
Ruud
9d287f140b Reorder deluge import 2013-11-16 11:56:29 +01:00
Jamie
5a8f28764d Fix to help find working proxy 2013-11-16 02:30:31 +00:00
Joel Kåberg
a2c5074d66 fixed bithdtv provider 2013-11-16 02:58:29 +01:00
Joel Kåberg
6acc125d4f bithdtv provider
thanks to @lansinghd ,
https://github.com/RuudBurger/CouchPotatoServer/pull/2460
2013-11-16 02:57:06 +01:00
Techmunk
7b9ebc2f34 Fixed issue https://github.com/RuudBurger/CouchPotatoServer/issues/2440, by returning a 'True' status when an existing torrent in deluge is added from CP. 2013-11-15 21:25:19 +10:00
Ruud
4e0d6ec980 Merge branch 'refs/heads/clinton-hall-develop' into develop 2013-11-14 22:36:17 +01:00
Ruud
c1944c987d Add some more double char replacements 2013-11-14 22:35:13 +01:00
Ruud
cdb889a985 Merge branch 'develop' of git://github.com/clinton-hall/CouchPotatoServer into clinton-hall-develop 2013-11-14 21:56:31 +01:00
Jamie
f6281c6dcc Update __init__.py 2013-11-14 14:51:27 +00:00
Jamie
c832a9e2b2 Added proxy support 2013-11-14 14:50:50 +00:00
Ruud
0c4851e436 Escape filename before using it in a regex. fixes #2430 2013-11-13 19:32:59 +01:00
Ruud
ce1b205993 Allow 720p tag for screener 2013-11-13 19:22:19 +01:00
Clinton Hall
b771aa303f replace multiple separators. fixes #2448 2013-11-13 21:41:29 +10:30
Ruud Burger
81178b4c8b Merge pull request #2438 from fuzeman/feature/dev_rtorrent
rTorrent: Delete Torrent Directories
2013-11-10 08:28:29 -08:00
Dean Gardiner
0317681597 Added directory removal to the rtorrent downloader 2013-11-11 03:21:00 +13:00
Ruud
ddba0e318f Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-09 23:42:15 +01:00
Ruud
3ef9591abd Cleanup import 2013-11-09 23:42:08 +01:00
Joel Kåberg
0d3c0c4077 use delete_files 2013-11-09 22:42:09 +01:00
Ruud
22b32364b6 Missing ) 2013-11-09 21:41:03 +01:00
Ruud Burger
db8fd20d67 Merge pull request #2436 from jkaberg/develop
rTorrent: remove only files, not folder
2013-11-09 09:46:15 -08:00
Joel Kåberg
3c061095e9 remove only files, not folder
(or in worst case the download clients root folder and anything in it)
2013-11-09 18:33:21 +01:00
Ruud
05853bca89 Don't put plot over trailer z-index 2013-11-06 22:16:02 +01:00
Ruud
aa489bb709 Force name as string 2013-11-05 23:42:44 +01:00
Ruud
0b70465578 Add Flickcharts to userscript 2013-11-05 23:35:47 +01:00
Ruud
5c64ba3c9e Add box office top10 to IMDB automation. closes #2427 2013-11-05 22:45:25 +01:00
Ruud
e119020016 Ignore releases without any info. 2013-11-05 22:16:26 +01:00
Ruud
9b92a3d396 Make sure the ignored files get used. fix #2425 2013-11-05 21:24:47 +01:00
Ruud
c73dc10aeb Add a bit of padding to plot 2013-11-04 22:47:20 +01:00
Ruud
c5ee0a576e Merge branch 'refs/heads/jerbob92-suggestdescription' into develop 2013-11-04 22:26:14 +01:00
Ruud
3e2ede585a Animate plot to show more text 2013-11-04 22:26:08 +01:00
Ruud
ba3dd263ac Merge branch 'suggestdescription' of git://github.com/jerbob92/CouchPotatoServer into jerbob92-suggestdescription
# Please enter a commit message to explain why this merge is necessary,
# especially if it merges an updated upstream into a topic branch.
#
# Lines starting with '#' will be ignored, and an empty message aborts
# the commit.
2013-11-03 21:59:39 +01:00
Ruud
7c955ecc80 XMPP notification support
thanks @wernight
2013-11-03 17:17:59 +01:00
Ruud Burger
48193b38c5 Merge pull request #2415 from mano3m/develop_fix_scanner
Cleanup file size code in scanner
2013-11-03 07:44:30 -08:00
Ruud Burger
2f5a233e63 Merge pull request #2416 from mano3m/develop_remote
Default movie_folder to from folder
2013-11-02 11:56:55 -07:00
mano3m
7b86fe5587 Default movie_folder to from folder
In case remote downloaders return a path that does not exist locally,
the movie_folder and files are updated to the from folder. Fixes #2412,
#1762, #1667, #1047
2013-11-02 11:20:34 +01:00
mano3m
5396343940 Cleanup file size code in scanner 2013-11-02 10:43:22 +01:00
mano3m
fa1baa73e8 Introduce path cleaning
A new function sp is introduced. It does the same as ss but also cleans
the path.
2013-11-02 10:15:50 +01:00
mano3m
d984f11cbf First attempt at creating a working directory selector 2013-11-02 08:48:52 +01:00
mano3m
ae666bd9b6 Add API call to scan a folder for multiple movies 2013-11-02 08:48:52 +01:00
Ruud
9fa62de6dd Wrong variable logged in email notification 2013-10-30 23:09:45 +01:00
Adrien RAFFIN
7c5748ac87 Add support for starttls and allow modification of SMTP server port 2013-10-30 23:06:49 +01:00
Ruud
47de84259d Cleanup searcher PR 2013-10-30 22:51:26 +01:00
Ruud
f2b483b16e Merge branch 'refs/heads/fuzeman-dev_searcher' into develop 2013-10-30 22:09:10 +01:00
Ruud
98efe89833 Merge branch 'dev_searcher' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-dev_searcher 2013-10-30 22:08:59 +01:00
Ruud
f8872e2803 Use getter to prevent keyerror. fix #2410 2013-10-30 22:04:51 +01:00
Ruud
a1fd581bca Add HD quality tags 2013-10-29 21:31:02 +01:00
Ruud
6a4bc1eb08 Don't add tags twice for dvd-r quality 2013-10-29 21:16:32 +01:00
Ruud
94d1f99315 Add ignored group 2013-10-29 21:14:53 +01:00
Ruud
7c51bdbdaf Allow par3 files in binsearch validation 2013-10-27 20:21:02 +01:00
Ruud
d275dfd8cc Add br2dvd as DVD alternative. fix #1604 2013-10-27 20:16:03 +01:00
Ruud
82b879fbb4 Add proper detail url for OMGWTF 2013-10-27 19:50:26 +01:00
Ruud
cc32bd7050 OMGWTF https url 2013-10-27 19:22:58 +01:00
Ruud
4f4ba470e0 Prevent files keyerror for release_download files. fix #2392 2013-10-26 15:26:19 +02:00
Ruud
ce47429701 Only show n/a if undefined 2013-10-26 15:12:54 +02:00
Ruud
550051b3f6 Use order for quality allow calculation. fix #2396 2013-10-26 15:09:30 +02:00
Ruud
b149528406 Cleanup older releases calling the wrong function 2013-10-22 14:11:13 +02:00
Ruud
22c257618d Remove unused movie.search function 2013-10-21 00:00:13 +02:00
Ruud
e1c3c334d9 Use new provider named events for search. fix #2379 2013-10-20 23:56:31 +02:00
Ruud
c5e7159952 Don't add identifier score double when scoring 2013-10-20 23:40:16 +02:00
Ruud
fe8946e3b5 Cache qualities.all 2013-10-20 23:29:36 +02:00
Ruud
c354d3c6d5 Guess qualities based on score. fix #2373 2013-10-20 22:47:18 +02:00
Ruud
53cd907db1 Code cleanup 2013-10-20 17:43:30 +02:00
Ruud
605f340be5 Merge branch 'refs/heads/mano3m-develop_torrent_files' into develop 2013-10-20 17:39:36 +02:00
Ruud
e014ce7a47 Merge branch 'develop_torrent_files' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_torrent_files 2013-10-20 17:39:13 +02:00
mano3m
579c1fa53c Fix categories error 2013-10-20 13:24:01 +02:00
mano3m
4bfb5c6397 Make sure Transmission folders are 'normpath'-ed 2013-10-20 02:41:48 +02:00
mano3m
639d635913 Implement better folder checking
Fixes #2360, thanks @clinton_hall
2013-10-20 02:41:48 +02:00
mano3m
37e5f2c48b Fix SabNZBd folder bug
If only one file is extracted the storage key contains the extracted
file instead of the folder. This leads to CPS skipping the renamer. This
check fixes that.
2013-10-20 01:50:06 +02:00
mano3m
583bb1d0d9 Fix debug message 2013-10-19 00:14:04 +02:00
Jeroen Bobbeldijk
d0cffb5863 Fix up tabs 2013-10-18 23:35:20 +02:00
Jeroen Bobbeldijk
548686ebfe Added pilot to suggestion 2013-10-18 23:32:59 +02:00
Ruud
0635c571e4 Remove Notifo 2013-10-18 17:57:44 +02:00
Ruud
4764925ae6 Only skip data dir paths when updating source 2013-10-18 17:13:06 +02:00
mano3m
80e9831c03 Make uTorrent language independent
Fixes #2341
2013-10-18 00:48:42 +02:00
Dean Gardiner
f7e1fa1406 'release.download' renamed to 'release.manual_download', Moved 'searcher.download' and 'searcher.try_download_result' to 'release.*'. 2013-10-17 23:27:24 +13:00
Dean Gardiner
dc73e5c58f Added back migration code in 'searcher.download' 2013-10-17 22:53:44 +13:00
mano3m
526d383929 Fix for release.update
The done release has no release info. This is fixed by doing it in the
same way as the interface.
2013-10-16 22:17:22 +02:00
mano3m
89f7cfb896 tagging fixes 2013-10-16 22:17:21 +02:00
mano3m
6abc4cc549 Upgrade tagging
Havent tested this yet, but it should work with both one filed torrents
and folders. Everything mixed, let's go crazy!!
2013-10-16 22:17:21 +02:00
mano3m
6aa7cfc0fe Wrong use of "is" 2013-10-16 22:17:20 +02:00
mano3m
345d0b8211 Add status to renamer.scan api call
This allows for scripts to send the seeding status with the scan
2013-10-16 22:17:20 +02:00
mano3m
eb17afc368 Fixed bug where it didnt do anything... 2013-10-16 22:17:19 +02:00
mano3m
c12b189f5f Fixed variables in scanner 2013-10-16 22:17:19 +02:00
mano3m
5edc745727 Typo 2013-10-16 22:17:18 +02:00
mano3m
bc877df513 Cleanup variable naming
Use release_download variable for all item/status/download_info
variables (which are by now all the same thing)
2013-10-16 22:17:18 +02:00
mano3m
57cb22c9aa Fix type of torrent_files 2013-10-16 22:17:18 +02:00
mano3m
719aca88b7 Clean-up read only files uTorrent 2013-10-16 22:17:17 +02:00
rbfblk
b1e66478f0 Fixing an issue which strips all read bits from utorrent downloaded files on Linux 2013-10-16 22:17:17 +02:00
Dean Gardiner
25f0462c15 Added files for rTorrent 2013-10-16 22:17:16 +02:00
mano3m
caded0694c include files for Transmission 2013-10-16 22:17:16 +02:00
mano3m
39190495be Correct path for one file torrent 2013-10-16 22:17:15 +02:00
Techmunk
1cc998bc95 Include files for renamer in Deluge downloader. 2013-10-16 22:17:15 +02:00
mano3m
54c7aad57a Include files from downloader in renamer 2013-10-16 22:17:14 +02:00
Joel Kåberg
611c159373 Merge pull request #2356 from fuzeman/tv_searcher
[TV][Searcher] Release Matching and Snatching
2013-10-15 22:16:35 -07:00
Joel Kåberg
db65980ba4 Merge pull request #2354 from nrgaway/tv_xem
Tv xem
2013-10-15 21:51:41 -07:00
Dean Gardiner
1c8fed5457 Minor cleanup to Searcher and Matcher
Conflicts:

	couchpotato/core/plugins/matcher/main.py
2013-10-16 15:46:46 +13:00
Dean Gardiner
8e51513ee0 Moved 'searcher.create_releases' from Searcher to Release.
Conflicts:

	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/show/searcher/main.py
2013-10-16 15:46:24 +13:00
Dean Gardiner
1788440a5c Cleaned up usage of helper functions
Conflicts:

	couchpotato/core/media/show/searcher/main.py
	couchpotato/core/plugins/matcher/main.py
2013-10-16 15:40:25 +13:00
Dean Gardiner
f467e4d75a Fix to Provider getCatId when returning the cet_backup_id 2013-10-16 15:38:41 +13:00
Dean Gardiner
1e3f8410c0 Added 'searcher.get_media_searcher_id' event, Cleaned up some 'status.get' calls, Renamed some references of 'nzb' to 'rel'.
Conflicts:

	couchpotato/core/media/_base/searcher/main.py
2013-10-16 15:37:52 +13:00
Dean Gardiner
cbb7b96391 'searcher.correct_release' can now return a float indicating the weight/accuracy which is used to scale the score. Fix to IPT _buildUrl method.
Conflicts:

	couchpotato/core/providers/torrent/iptorrents/main.py
2013-10-16 15:34:08 +13:00
Dean Gardiner
5f24338bd2 Renamed 'movie' -> 'media' in 'searcher.download'
Conflicts:

	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/plugins/release/main.py
2013-10-16 15:32:25 +13:00
Dean Gardiner
56f049cd7d Created 'searcher.try_download_result' event from section in MovieSearcher.single 2013-10-16 15:04:10 +13:00
Dean Gardiner
180576f2b7 Minor change to ShowSearcher.correctMatch logging 2013-10-16 14:58:53 +13:00
Dean Gardiner
46d4d34da7 Minor cleanup to Searcher and Matcher 2013-10-16 14:58:52 +13:00
Dean Gardiner
3fa21560be Moved 'searcher.create_releases' from Searcher to Release. 2013-10-16 14:58:51 +13:00
Dean Gardiner
b902186389 Cleaned up usage of helper functions 2013-10-16 14:58:50 +13:00
Dean Gardiner
da87e68fad Implemented basic usage of QueryCondenser 2013-10-16 14:58:49 +13:00
Dean Gardiner
f23412ea7e Added qcond (Query Condenser) v0.1.0 library - https://github.com/fuzeman/QueryCondenser 2013-10-16 14:58:48 +13:00
Dean Gardiner
07abf7c83d Updated Caper to version 0.2.2 2013-10-16 14:58:47 +13:00
Dean Gardiner
6259684487 Moved caper matching into a new 'matcher' plugin. 2013-10-16 14:58:47 +13:00
Dean Gardiner
0a0935d635 Fix to Provider getCatId when returning the cet_backup_id 2013-10-16 14:58:46 +13:00
Dean Gardiner
fb5b17005f Cleaned up status.get calls in TV searcher 2013-10-16 14:58:45 +13:00
Dean Gardiner
e3745b5d74 Updated Caper library 2013-10-16 14:58:44 +13:00
Dean Gardiner
8d24d96804 Implemented 'searcher.get_media_searcher_id' in the TV searcher. 2013-10-16 14:58:43 +13:00
Dean Gardiner
529b535d9f Added 'searcher.get_media_searcher_id' event, Cleaned up some 'status.get' calls, Renamed some references of 'nzb' to 'rel'. 2013-10-16 14:58:42 +13:00
Dean Gardiner
0793668e5c Chain result weight now returned from TV searcher correctRelease function. 2013-10-16 14:58:41 +13:00
Dean Gardiner
8d368ecf29 'searcher.correct_release' can now return a float indicating the weight/accuracy which is used to scale the score. Fix to IPT _buildUrl method. 2013-10-16 14:58:40 +13:00
Dean Gardiner
2d2b0c9048 IPT provider now searches in multiple categories. 2013-10-16 14:58:40 +13:00
Dean Gardiner
fb0719d677 TV Searcher now supports xem scene mappings 2013-10-16 14:58:39 +13:00
Dean Gardiner
7ffa5dc7b6 Fixed IPT Show SD cat_ids 2013-10-16 14:58:38 +13:00
Dean Gardiner
32c289fd3d Renamed 'movie' -> 'media' in 'searcher.download' 2013-10-16 14:58:37 +13:00
Dean Gardiner
ff63b8a1c5 Added TV release snatching/downloading 2013-10-16 14:58:36 +13:00
Dean Gardiner
60d8934444 Created 'searcher.try_download_result' event from section in MovieSearcher.single 2013-10-16 14:58:35 +13:00
Jason Mehring
e0aba01866 more tvdb info provider guards 2013-10-15 17:54:06 -04:00
Jason Mehring
1ae498e3c8 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_xem 2013-10-15 16:25:05 -04:00
Jason Mehring
d1db099f71 grab tvdb fields more defensively 2013-10-15 16:24:49 -04:00
Ruud Burger
f4ef64290d Merge pull request #2352 from fuzeman/tv
[TV] Fixed show searching (broken in search merge)
2013-10-15 12:20:29 -07:00
Dean Gardiner
026151d1a1 Fixed show searching (broken in search merge) 2013-10-15 15:28:35 +13:00
Ruud Burger
a09e8b63ae Merge pull request #2350 from einartryggvi/develop
Make ubuntu init script executable so it can be symlinked to /etc/init.d
2013-10-14 13:29:53 -07:00
Einar Tryggvi Leifsson
400643cbcd Make ubuntu init script executable so it can be symlinked to /etc/init.d 2013-10-14 20:27:21 +00:00
Ruud
ce68a37441 Zero fill imdb ids found 2013-10-14 22:24:23 +02:00
Ruud
1377b6315c Allow imdb id with int of 4-7 2013-10-14 22:05:32 +02:00
Ruud
0e18dcb8a1 Use success when adding movies 2013-10-14 21:13:31 +02:00
Ruud
7277ef3bd8 Remove SceneHD as we can't login with captcha. fix #2146 2013-10-14 21:07:37 +02:00
Ruud
5bf3b929a2 Detect Windows 8 tablets as touchdevice also. 2013-10-14 00:01:38 +02:00
Ruud
66967f8326 Whatever! #2283
@clinton ;)
2013-10-13 22:37:15 +02:00
Ruud
e9abf982fe Flixter decode json before parsing. closes #2305 2013-10-13 22:21:32 +02:00
Ruud
3535f44db9 No need to use disable check in automation 2013-10-13 22:12:27 +02:00
Ruud
c772758683 Add category to renamer replacements. fix #2283 2013-10-13 22:12:15 +02:00
Ruud
2fc097c0e8 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-10-13 21:47:21 +02:00
Ruud
c9d7418899 Force unicode name for newznab. fix #2347 2013-10-13 21:46:16 +02:00
Ruud Burger
1317a4c6b7 Merge pull request #2346 from mano3m/develop_fix_dashboard
Move and fix cleanreleases
2013-10-13 12:17:49 -07:00
mano3m
4b0a5bdd9b Move and fix cleanreleases 2013-10-13 16:53:45 +02:00
Ruud
2b57bdcd03 Revert "Make sure to untag downloading dir if it's completed. fix #2341"
This reverts commit 65f039e9ed.
2013-10-13 15:17:39 +02:00
Ruud
65f039e9ed Make sure to untag downloading dir if it's completed. fix #2341 2013-10-13 14:25:50 +02:00
Ruud
3be6389fbf Use json in flixter 2013-10-13 14:16:59 +02:00
Ruud
9bf01e3a0b Plex endless loop when no clients connected 2013-10-13 14:01:18 +02:00
Ruud
1305327564 Merge branch 'refs/heads/fuzeman-feature/dev_plex' into develop 2013-10-13 13:56:38 +02:00
Ruud
97b6cf013f Merge branch 'feature/dev_plex' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-feature/dev_plex 2013-10-13 13:56:26 +02:00
Ruud
e1a6b813a5 Merge branch 'refs/heads/mano3m-develop_fix_dashboard' into develop 2013-10-13 13:45:45 +02:00
Ruud
b0e30921ae Merge branch 'develop_fix_dashboard' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_fix_dashboard 2013-10-13 13:45:27 +02:00
Ruud
f4c4f013da Cleanup searcher and release checking 2013-10-13 13:44:26 +02:00
Ruud
43ef982d95 Merge branch 'refs/heads/fuzeman-dev_searcher' into develop 2013-10-13 12:57:43 +02:00
Ruud
d930bc4afd Merge branch 'dev_searcher' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-dev_searcher 2013-10-13 12:57:22 +02:00
Kevin Carter
6dbdd4c0be Load lsb init-functions so that status_of_proc is available 2013-10-13 12:50:28 +02:00
Ruud
93bd75acc8 Make iframe https 2013-10-12 23:12:45 +02:00
Dean Gardiner
bdeace8a68 New clients added that aren't in the current client cache now trigger a reload if the list isn't "stale" yet. 2013-10-13 03:00:52 +13:00
Dean Gardiner
efdf70acb2 When notifications fail to send the client list is automatically reloaded in case the client address has changed. 2013-10-13 02:52:55 +13:00
Dean Gardiner
d31ca2677e Cleaned up Plex notifications plugin. 2013-10-13 02:26:35 +13:00
mano3m
3a117b6077 Make sure movies are removed from dashboard 2013-10-12 13:42:48 +02:00
mano3m
6d2889f88d Fix releases missing from Snatched&Available
Fixes #1958
2013-10-12 13:42:30 +02:00
Ruud Burger
213b03589a Merge pull request #2339 from cicavey/develop
Changed MIME type of JSONP requests to text/javascript
2013-10-12 04:26:08 -07:00
cicavey
79fd5fe332 Changed MIME type of JSONP requests to text/javascript 2013-10-12 07:11:37 -04:00
Ruud Burger
25a5b72d26 Merge pull request #2331 from fuzeman/feature/dev_rtorrent
rTorrent Downloader - fixes to scgi on Python 2.6
2013-10-12 03:07:31 -07:00
Dean Gardiner
8970e7fbba Fix to Searcher.createReleases (media_id doesn't exist yet) 2013-10-12 15:24:06 +13:00
Dean Gardiner
e96724beaf Fix to MovieSearcher.single to set default media type as types aren't in develop yet. 2013-10-12 15:11:46 +13:00
Dean Gardiner
73d7d01ae4 Fixed ResultList.append call to 'movie.searcher.correct_movie' instead of 'searcher.correct_release' 2013-10-12 15:10:26 +13:00
Dean Gardiner
34c69786de Merge base/movie searcher changes from branch 'tv' into develop 2013-10-12 14:25:00 +13:00
Dean Gardiner
8587b9b780 Updated rTorrent library - MethodError exceptions when calling group methods should be fixed. 2013-10-11 13:33:20 +13:00
Dean Gardiner
b9f88f431b Updated rTorrent library and fixed call to MethodError.message (should be MethodError.msg) in _update_provider_group 2013-10-11 04:12:36 +13:00
Dean Gardiner
df90ee0a55 Updated rtorrent library - scgi fix for Python 2.6 2013-10-10 15:58:35 +13:00
Ruud Burger
32a4075979 Merge pull request #2326 from fuzeman/feature/dev_rtorrent
rTorrent Downloader - scgi support
2013-10-09 08:08:04 -07:00
Ruud
99606e22d6 Make YIFY a imdbid search. fix #2323 2013-10-09 16:45:45 +02:00
Ruud
5fd0253089 Import Media, not Movie. fix #2320 2013-10-09 16:37:16 +02:00
Ruud
a46241bb9f Better year name guessing. #2323 2013-10-09 16:36:13 +02:00
Dean Gardiner
a8087c8ce9 Updated rTorrent downloader options 2013-10-09 23:07:14 +13:00
Dean Gardiner
0a90ad5db7 Updated rtorrent library to current master - scgi:// support 2013-10-09 22:24:22 +13:00
Ruud
75bda46f64 Userscript styling fixes 2013-10-08 21:53:03 +02:00
Ruud
a0d2a64e57 Userscript didn't load properly 2013-10-08 21:51:34 +02:00
Ruud
70dada8ef6 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/_base/media/main.py
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/media/movie/searcher/main.py
2013-10-08 10:02:40 +02:00
Ruud
d1c3f0c241 Use Media for all Movie db actions 2013-10-08 09:57:36 +02:00
Ruud
107606ce65 Add tv branch column aliases 2013-10-08 09:57:17 +02:00
Ruud
9ef752f8a3 Rename mediaplugin 2013-10-08 09:22:20 +02:00
Ruud
32646d0608 Use movie instaid of media model 2013-10-08 09:22:05 +02:00
Ruud
eabd2b6c41 Rename mediaplugin 2013-10-08 09:21:53 +02:00
Ruud
b8ac093182 Remove refresh from movie media
Conflicts:
	couchpotato/core/media/movie/_base/main.py
2013-10-08 09:15:41 +02:00
Ruud
d265a5bddd Remove refresh from movie media 2013-10-08 08:48:38 +02:00
Ruud
bac3055726 Move media refresh to media plugin 2013-10-08 08:46:32 +02:00
Ruud
b2b6e3eb33 Cleanup show media 2013-10-08 08:46:04 +02:00
Ruud
2b6c7a8f94 Move media refresh to media plugin 2013-10-08 08:45:45 +02:00
Ruud
955814397a Revert "TorrentBytes login url change. fix #2317"
This reverts commit 95d0dacd28.
2013-10-07 23:38:53 +02:00
Ruud
6070209d33 Attach shows to searcher 2013-10-07 23:37:17 +02:00
Ruud
fa78d18890 Merge searches 2013-10-07 23:23:46 +02:00
Ruud
40eaf2a96b Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/static/search.js
2013-10-07 23:12:59 +02:00
Ruud
10fe175ff5 Move suggestions to movie folder 2013-10-07 22:52:05 +02:00
Ruud
bca4a2e241 Move search item to movie folder 2013-10-07 22:51:23 +02:00
Ruud
3925d4c215 Make search work for multiple media types 2013-10-07 21:23:09 +02:00
Ruud
8ca5c62575 YIFY use IMDB id for search. fix #2313 2013-10-07 15:52:25 +02:00
Ruud
95d0dacd28 TorrentBytes login url change. fix #2317 2013-10-07 09:20:01 +02:00
Ruud
73dd0916c0 Merge branch 'tv' of github.com:RuudBurger/CouchPotatoServer into tv 2013-10-03 21:20:24 +02:00
Joel Kåberg
77d32fe16b Merge pull request #2292 from nrgaway/tv_xem
Tv xem
2013-10-03 08:17:26 -07:00
Jason Mehring
7def0944a6 Implemented map_absolute. model was changed to implement. map_names now stores in EpisodeLibrary 2013-10-03 04:30:05 -04:00
Ruud
b6f850dc27 in_ needs list.. 2013-10-03 08:30:13 +02:00
Ruud
38ce63795c Check snatched with single query 2013-10-03 08:26:02 +02:00
Ruud
8782cd77d5 Import cleanup 2013-10-03 08:21:37 +02:00
Ruud
1b59fd9af0 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/plugins/renamer/main.py
2013-10-03 08:17:20 +02:00
Joel Kåberg
9dca8a03be Merge pull request #2290 from nrgaway/tv_xem
Tv xem
2013-10-02 05:11:10 -07:00
Jason Mehring
132f4882e5 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_xem 2013-10-02 01:29:55 -04:00
Jason Mehring
9e32a38288 fixed bug that was not storing xem maps in EpisodeLibrary.info 2013-10-02 01:29:21 -04:00
Joel Kåberg
c1b13cd076 Merge pull request #2284 from fuzeman/tv_searcher
[TV][Searcher] TV searching, parsing and Release creation
2013-10-01 03:29:51 -07:00
Ruud
bbf42da875 ILoveTorrents cleanup 2013-09-30 22:18:36 +02:00
salfab
8df0ecc223 disabled by default 2013-09-30 21:55:33 +02:00
salfab
c37bf12c8a improve resilience to retrieve description in get_more_info 2013-09-30 21:55:29 +02:00
salfab
83051b2576 support getting more info. 2013-09-30 21:55:24 +02:00
salfab
75360f734c use a proper name, instead of the link 2013-09-30 21:55:20 +02:00
salfab
87754047fa torrents are found and appended to the results argument 2013-09-30 21:55:16 +02:00
salfab
f121db059e add new provider for ILT. 2013-09-30 21:55:08 +02:00
Ruud
c9e693287c Merge branch 'refs/heads/mano3m-develop_release' into develop 2013-09-30 20:52:51 +02:00
Ruud
0876d1ff8e Rename release.update to update_status 2013-09-30 20:52:04 +02:00
Ruud
6bbcc5af77 Merge branch 'develop_release' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_release 2013-09-30 20:31:50 +02:00
Ruud Burger
6a9f6a6fc8 Merge pull request #2099 from mano3m/develop_folder
Remove all empty folders after rename
2013-09-30 11:24:21 -07:00
Ruud Burger
1da3546f2d Merge pull request #2270 from fuzeman/feature/dev_rtorrent
rTorrent Downloader fixes
2013-09-30 11:23:00 -07:00
Ruud Burger
d233425a77 Merge pull request #2272 from fuzeman/feature/dev_plex
Fixed Plex notifications on latest PHT
2013-09-30 11:22:26 -07:00
Ruud
8883d505ba Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-09-30 20:12:31 +02:00
Ruud Burger
c51d806840 Merge pull request #2282 from mano3m/develop_encryptedasfailed
Consider encrypted as failed fix #2260
2013-09-30 11:12:08 -07:00
Ruud
13a0c4607d Merge branch 'refs/heads/jkaberg-develop' into develop 2013-09-30 20:11:51 +02:00
mano3m
fd8e50b533 [SabNZBd] Consider encrypted as failed 2013-09-30 20:05:34 +02:00
Ruud Burger
682216dcf4 Merge pull request #2281 from mano3m/develop_seedfix
Fix seeding status check #2278
2013-09-30 10:44:43 -07:00
mano3m
6bda5f5b03 Don't use movie done status to check seeding
Fixes #2278
2013-09-30 19:34:12 +02:00
mano3m
6174f121c8 fix log message 2013-09-30 19:27:11 +02:00
mano3m
89daa836e7 Remove all empty folders
Quite often there is a subfolder in the movie folder after extraction.
This folder is deleted but the actual movie folder remains behind. This
update fixes that in both cases: move_folder is known, or we work in the
'from' folder.
2013-09-30 19:24:46 +02:00
mano3m
7c5616cc79 fix colour order 2013-09-30 19:16:19 +02:00
mano3m
27fdbff619 Set missing to ignored after 1 week 2013-09-30 19:16:13 +02:00
mano3m
516447a104 Remove movie_dict 2013-09-30 19:16:08 +02:00
mano3m
0c6c172d6a Update movie quality status colour and text
It isnt perfect this way. I think we need to add a sepperate function to
do this and call that from both when CPS is loading the page and when it
updates a release (e.g. just rebuild the icons)
2013-09-30 19:16:01 +02:00
mano3m
d11f9d26c0 Add missing status 2013-09-30 19:15:51 +02:00
mano3m
a2cb0ec8ad frontend release.update 2013-09-30 19:15:44 +02:00
mano3m
1bddadf3a4 clean-up searcher 2013-09-30 19:15:30 +02:00
mano3m
f0f843f746 Add release.update event
Proof of concept commit.

It updates the database and calls movie.update.id to refresh the entire movie in the frontend. It would be better to crease a static js file in the release folder and add release functionality there including updating one release only.
2013-09-30 19:15:10 +02:00
Dean Gardiner
820588aa5f Created 'searcher.create_releases' event to replace some shared functionality, releases are now created for TV search results. 2013-09-30 23:08:32 +13:00
Dean Gardiner
8fbf050510 Created 'searcher.search' event to replace some shared functionality, Fixed an issue in Release.download when snatching movies. 2013-09-30 22:49:54 +13:00
Dean Gardiner
dd5ae3c4ee Working TV correctRelease function with quality, identifier and show title checking. 2013-09-30 22:03:21 +13:00
Dean Gardiner
ab51707607 Added Caper (0.2.0-master) and Logr (0.2.1) libraries 2013-09-30 18:40:27 +13:00
Dean Gardiner
8acdc56df1 Added the start of the ShowSearcher correctRelease function 2013-09-30 14:53:33 +13:00
Dean Gardiner
d345a05b3c Switched IPTorrents provider to the MultiProvider layout, few fixes to provider base for MultiProvider 2013-09-30 14:53:32 +13:00
Dean Gardiner
5f427ec6ea Moved required/ignored word checking from 'correctMovie' into 'searcher.correct_words' event, Renamed 'movie.searcher.correct_movie' to 'searcher.correct_release' 2013-09-30 14:53:32 +13:00
Dean Gardiner
a95c030885 Fix for discovering the cat_ids structure when the 'ids' are of str type. 2013-09-30 14:49:42 +13:00
Dean Gardiner
bef6a74dfe Minor cleanup to getSearchTitle 2013-09-30 14:49:41 +13:00
Dean Gardiner
01da470c21 Few changes to getSearchTitle in case a title isn't found, Added check to ensure enough media was returned from _lookupMedia 2013-09-30 14:49:40 +13:00
Dean Gardiner
5fdf4d9085 Extended providers to support multiple media types
- 'cat_ids' now support media type groups
  - 'type' extended to allow a list of support media types
  - Added 'searcher.get_search_title' to return a title for media to be used in searches.
2013-09-30 14:49:40 +13:00
Dean Gardiner
bc51e263e1 Switched back to a single search method 'show.searcher.single' 2013-09-30 14:47:51 +13:00
Dean Gardiner
4c527f0931 Added 'show.refresh' API method and the base for season and episode searching. 2013-09-30 14:47:50 +13:00
Joel Kåberg
317a1f119b not needed 2013-09-29 18:03:52 +02:00
Joel Kåberg
b128ef17c9 Added directory option
and an option to append label to directory path
2013-09-29 15:32:23 +02:00
Ruud
cc4350b0f9 NZBGet missing in wizard. fix #2262 2013-09-29 14:05:28 +02:00
Dean Gardiner
0b00f2d9e6 Fixed Plex notifications on latest PHT (protocol renamed to 'plex') 2013-09-30 00:49:00 +13:00
Ruud
e7aa91b3e1 Don't try to use custom_plugins when folder doesn't exist 2013-09-29 13:44:52 +02:00
Ruud
333abd2486 Custom plugin folder outside source. fix #2076 2013-09-29 13:25:10 +02:00
Dean Gardiner
226835e3d0 Added a check to ensure a torrent has been loaded (and found). 2013-09-29 23:32:03 +13:00
Dean Gardiner
48db4c8b8e Updated rtorrent-python library 2013-09-29 23:21:53 +13:00
Ruud
ae4e15286a Don't try to loop over None. fix #2268 2013-09-29 12:17:09 +02:00
Ruud
1b96489656 Merge branch 'refs/heads/jkaberg-develop' into develop 2013-09-29 10:06:22 +02:00
Ruud
99c899ea3a Proper variable naming 2013-09-29 10:06:12 +02:00
Ruud
8f76dd7a2e Merge branch 'develop' of git://github.com/jkaberg/CouchPotatoServer into jkaberg-develop 2013-09-29 09:57:17 +02:00
Ruud
1f2c2269e6 Ignore thumbs.db files and don't fail on single path split. fix #2265 2013-09-29 09:54:37 +02:00
Joel Kåberg
201185f7e7 better english damnit! 2013-09-29 01:49:51 +02:00
Joel Kåberg
e38d68c019 actual code 2013-09-29 01:45:50 +02:00
Joel Kåberg
91332e06e5 add option to create sub directory 2013-09-29 01:45:24 +02:00
Ruud
96b4af1fea Hide first item in combined table 2013-09-29 00:08:26 +02:00
Ruud
b4bccc9be2 Flixter automation support
Thanks @mikedm139
2013-09-28 23:41:15 +02:00
Ruud
d6ddee236a Merge branch 'refs/heads/mano3m-develop_bluray' into develop 2013-09-28 23:17:42 +02:00
Ruud
364e355114 Also try to load the root module for each path 2013-09-28 21:25:25 +02:00
Ruud
7d4f9d60b1 Code formating 2013-09-28 19:17:41 +02:00
Ruud
116bc839fc Make description more clear 2013-09-28 19:12:05 +02:00
Ruud
153d4b2b1d Merge branch 'develop_bluray' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_bluray 2013-09-28 18:20:47 +02:00
Ruud
2f4f140662 Don't overwrite data variable in utorrent download. fix #2222 2013-09-28 18:19:17 +02:00
Ruud
475ac1bb9c Only use filename for identification when possible. fix #2233 & #954 2013-09-28 18:06:45 +02:00
Ruud
49015b7d64 Be sure to ss quality alt in guess 2013-09-28 17:45:32 +02:00
Ruud
99efcce4d0 Merge branch 'refs/heads/techmunk-2235' into develop 2013-09-28 17:04:00 +02:00
Ruud
c3c971db23 Merge branch '2235' of git://github.com/techmunk/CouchPotatoServer into techmunk-2235 2013-09-28 17:03:27 +02:00
Ruud
8011634b7a Use correct encoding for emails. fix #2254 2013-09-28 16:39:31 +02:00
Ruud
ededfcb822 Escape spaces for each request. fix #2256 2013-09-28 16:28:46 +02:00
Ruud
92a0af5ce3 Use label for quality guess also. closes #2237 2013-09-28 15:23:45 +02:00
Ruud
ffaffbc66f Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-09-28 14:31:47 +02:00
Ruud
2596bbe2bc Merge branch 'refs/heads/saxicek-tsh_scene_only' into develop 2013-09-28 14:30:27 +02:00
Ruud
3310bdf551 Don't use quotes for torrentshack 2013-09-28 14:30:20 +02:00
Ruud Burger
19d357b866 Merge pull request #2261 from mano3m/develop_transmission
[Transmission] Fix  #2168
2013-09-28 04:49:31 -07:00
mano3m
871aecb689 Fix transmission #2168 2013-09-28 13:35:26 +02:00
mano3m
00bb055474 set backlog to False after backlog search 2013-09-28 12:36:43 +02:00
mano3m
f10d182468 Added Blu-ray.com backlog automation
I missed a few movies, so I added backlog functionality to Blu-ray.com

If you want to add all Blu-rays that ever came out to the wanted list,
you can use this. Be careful with what you wish for :D
2013-09-28 12:36:43 +02:00
Techmunk
74a4e7d19d Indenting on deluge auth fix was incorrect. 2013-09-27 14:59:03 +10:00
sax
c7c64c6002 Changed implementation of "scene_only" parameter to use filter criteria instead of parsing the information from query result. 2013-09-25 14:05:16 +02:00
Techmunk
8474d0d95d Fix the way the client auth file is found and processed to match the defaults in the deluge clients. 2013-09-25 21:44:05 +10:00
Ruud
4a5c878c36 Wrong config name for plex host 2013-09-24 22:44:14 +02:00
Ruud
2b0a70355a Merge branch 'refs/heads/fuzeman-feature/dev_plex' into develop 2013-09-24 22:37:46 +02:00
Ruud
9b5166826f Cleanup Plex notification 2013-09-24 22:37:40 +02:00
Ruud
3b1efb2c30 Merge branch 'feature/dev_plex' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-feature/dev_plex
Conflicts:
	couchpotato/core/notifications/plex/main.py
2013-09-24 21:35:36 +02:00
Ruud
e9fc528a0f movie_id > media_id 2013-09-23 22:23:45 +02:00
Ruud
c9ba3c804e Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/plugins/dashboard/main.py
	couchpotato/core/plugins/renamer/main.py
	couchpotato/core/providers/torrent/sceneaccess/main.py
2013-09-23 22:14:06 +02:00
Ruud
b5d2a41d60 Enable NewzNab bij default 2013-09-23 21:35:40 +02:00
Ruud
cc3aad49ed Remove FTDWorld 2013-09-23 21:35:29 +02:00
Ruud
2365e1859f Don't show suggestions if there aren't any. fix #2153 2013-09-22 10:47:13 +02:00
Ruud
03700e0a04 Userscript image didn't show 2013-09-22 00:43:50 +02:00
Ruud
1ff4901846 Make sure to remove listener, even after fail 2013-09-21 22:29:15 +02:00
Ruud
d70a71a12e Make nonblock debug message 2013-09-21 22:17:01 +02:00
Ruud
866d9621cb Create new listener list 2013-09-21 22:16:44 +02:00
Ruud
2d3fc03a00 Revert back to UTF8 when ss encoding fails. fix #2220 2013-09-21 13:56:17 +02:00
Ruud
19f782e4a5 Don't try to change elements that don't exist. fix #2219 2013-09-21 12:41:06 +02:00
Ruud
fdd851d29a Binsearch age parse failed for release new than 1 day. fix #2217 2013-09-21 12:14:40 +02:00
Ruud
6cd38a3469 Providers missing in wizard 2013-09-21 11:20:53 +02:00
Ruud
bfa3b87188 Only show soon and late with no releases 2013-09-21 11:07:16 +02:00
Ruud
69a9fa1193 Simplify string before checking on imdb 2013-09-20 18:08:27 +02:00
Ruud
9e0805ec89 Hide IE clear button on search 2013-09-20 18:08:12 +02:00
Ruud
f67c6fe8be Only remove images from cache folder on cleanup 2013-09-20 16:07:18 +02:00
Ruud
8d38fa87a4 Copy unrar dll to cache folder. fix #2205 2013-09-20 16:06:23 +02:00
Ruud
7c79c6d1f3 Update TorrentShack url. fix #2209 2013-09-20 12:51:58 +02:00
Ruud
b0781b45f8 Different seperator for folder and filename 2013-09-19 23:49:23 +02:00
Ruud Burger
ee53539906 Merge pull request #2163 from mano3m/develop_utorrent
Fix folder issue uTorrent
2013-09-19 14:40:16 -07:00
Ruud
c8ab6a06fb ASCII encode md5 string. closes #2167 2013-09-19 23:39:15 +02:00
Ruud
c75ac51eb7 Try the info dict to get title. fix #2206 2013-09-19 23:29:21 +02:00
Ruud
33d7d994d4 Don't try to finish an already closed connection 2013-09-19 23:16:49 +02:00
Ruud
96291f63da Create db backup dir before trying to use it. fix #2207 2013-09-19 22:11:10 +02:00
Ruud
6464bb065d Better year guessing. fix #609 2013-09-18 23:04:54 +02:00
Ruud
8b45b6f1a0 Only backup database max once an hour. fix #1218 2013-09-18 22:07:07 +02:00
Ruud
70ba5d80cd Trailers not downloading. fix #1563 2013-09-18 21:42:25 +02:00
Ruud
ac30152930 Don't start new long-poll right away. 2013-09-17 21:45:43 +02:00
Ruud
ad01a3da4d Update GuessIt 2013-09-17 21:04:15 +02:00
Ruud
5f5f17112a Don't try to search SceneAccess for BR-Disk. fix #2188 2013-09-17 20:48:01 +02:00
Ruud
156da670e8 Encode before checking imdb content. fix #2186 2013-09-17 20:43:41 +02:00
Ruud
821c26f35b Return default cached suggestion list. fix #2191 2013-09-17 20:39:20 +02:00
Ruud
a092f394fa Snatch next didn't pick correct element 2013-09-17 20:18:41 +02:00
Ruud
18e3194e27 Better category defaults 2013-09-16 22:37:10 +02:00
Ruud
08a1e1e582 Done use faulty None value for category 2013-09-16 22:33:45 +02:00
Ruud
074005ed02 Use existing category on re-add. fix #2182 2013-09-16 22:33:26 +02:00
Ruud Burger
7660a3d78f Merge pull request #2180 from techmunk/2107
Deluge SSL negotiation errors on Windows machines.
2013-09-16 13:09:49 -07:00
Joel Kåberg
ee9fe347c7 Merge pull request #2155 from nrgaway/tv_xem
Tv xem
2013-09-16 12:39:15 -07:00
Techmunk
9211e60804 Use the actual SSLv3 constant in deluge transfer.py. 2013-09-17 00:06:35 +10:00
Techmunk
87f295be28 Fix Deluge SSL negotiation errors on Windows machines. 2013-09-16 23:12:46 +10:00
mano3m
cfa89c8921 [uTorrent] Guarantee a folder
uTorrent does not create a folder in case only one file is present in
the torrent. This is a workaround that detects torrents with one file.
It then removes the torrent and readds it with a specified subfolder.
2013-09-15 10:01:59 +02:00
Ruud
70f834d925 Gilles de la Tourette 2013-09-15 00:46:39 +02:00
Ruud
6b4e4fd440 Only show login when both username and password are filled in. fix #2157 2013-09-14 11:41:16 +02:00
Jason Mehring
515aafe112 bug fixes for add show 2013-09-13 22:02:22 -04:00
Jason Mehring
314016e1fa (WIP) Started intergrating xem 2013-09-13 03:28:03 -04:00
Jason Mehring
906a54ef09 Finished creating xem info provider 2013-09-13 03:11:51 -04:00
Jason Mehring
ec2facd056 Fix reference to Movie, its now Media 2013-09-13 03:10:22 -04:00
Ruud
b83b2453a0 not in 2013-09-12 22:50:08 +02:00
Ruud
82d31d996d Set order changes on each run. fix #2148 2013-09-12 22:29:59 +02:00
Ruud
4faa617039 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-09-12 11:08:14 +02:00
Ruud
a1d2276668 Match variable name in ubuntu init. fix #2149 2013-09-12 11:07:49 +02:00
Jason Mehring
ddbfef575f Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-09-12 01:30:57 -04:00
Jason Mehring
8dd7a4771c partially implemented xem info provider (wip) 2013-09-12 01:30:38 -04:00
Jason Mehring
49ba1f1acd Reworked code to allow better intergration of other info providers. Initial prep for xem mapping 2013-09-12 01:00:14 -04:00
Ruud
c4d661535c Movie > Media 2013-09-11 23:25:51 +02:00
Ruud
bd52ab7ab1 Movie > Media 2013-09-11 23:18:31 +02:00
Ruud
cce0a8ec62 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/media/movie/library/movie/main.py
	couchpotato/core/plugins/dashboard/main.py
	couchpotato/core/plugins/profile/main.py
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/suggestion/main.py
2013-09-11 23:13:28 +02:00
Ruud
19c50f728e Suggestions, mark as seen. 2013-09-11 22:41:38 +02:00
Ruud
a94307c59f rTorrent import cleanup 2013-09-11 21:33:11 +02:00
Joel Kåberg
d02e62f89f Merge pull request #2139 from nrgaway/tv_tvdb
Tv tvdb
2013-09-11 07:52:42 -07:00
Ruud
c6403e87f1 Get releases when cleaning up managed movies 2013-09-11 12:24:50 +02:00
Ruud
b56cd3439e added_identifiers needs to be mutable. fix #2140 #2141 2013-09-11 09:28:30 +02:00
Ruud
25693d44eb Count NONE as success for NZBGet. fix #2135 2013-09-11 09:07:32 +02:00
Jason Mehring
e180addc3c added posters for seasons 2013-09-11 00:36:12 -04:00
Jason Mehring
a37a4a8cd4 thetvdb, add alternate titles if they exist 2013-09-10 23:45:15 -04:00
Jason Mehring
8328c18728 set cache directory for thetvdb_api 2013-09-10 22:08:17 -04:00
Jason Mehring
7ae07d6c15 Opps, remove debug code for language 2013-09-10 21:58:47 -04:00
Jason Mehring
770bcf5bc6 Added ability to search thetvdb by language 2013-09-10 21:51:38 -04:00
Jason Mehring
7bd6a295d8 return False on fail. Everything caches now 2013-09-10 21:06:44 -04:00
Jason Mehring
4063761313 Changed model to accept Unicode value for airs_time and add last_updated field for episode. Now stores both as well as airs_daysofweek 2013-09-10 21:04:44 -04:00
Ruud
43af25a30e Fix menu phone styling 2013-09-10 23:50:17 +02:00
Ruud
023278e0c0 Remove webkit button styling 2013-09-10 23:32:51 +02:00
Ruud
0634c79f74 Give minified own FileHandler 2013-09-10 23:21:31 +02:00
Ruud
31b3c2ef64 Change static path 2013-09-10 22:59:31 +02:00
Ruud
4a71f2c556 Login styling 2013-09-10 22:58:41 +02:00
Ruud
9783409756 Login base 2013-09-10 18:02:04 +02:00
Ruud Burger
c7e85c00ca Merge pull request #2133 from mythin/fix-variable-change
Fix the variable passed to the getImdb method
2013-09-09 23:32:14 -07:00
Mythin
94647bbb57 Fix the variable passed to the getImdb method 2013-09-09 23:08:49 -07:00
Ruud
1aa26a5a6c Replace protocol if it doesn't exist 2013-09-09 22:28:21 +02:00
Ruud
df13a0edc2 Ignore modules with only .pyc files in them. 2013-09-08 22:12:08 +02:00
Ruud
52a0de3b59 Deleting from late block didn't work 2013-09-06 23:12:22 +02:00
Ruud
38886b28f7 Hide soon and late blocks on dashboard if their empty. fix #1778 2013-09-06 23:05:41 +02:00
Ruud
226cf6fc38 Make sure to not query db when there aren't any ids 2013-09-06 22:45:37 +02:00
Ruud
203a52bfd1 Don't load updater.js twice 2013-09-06 20:17:21 +02:00
Ruud
1b6bf13619 Optimize and order dashboard list 2013-09-06 20:03:34 +02:00
Ruud
bc94e90994 Optimize available char listing 2013-09-06 19:37:39 +02:00
Ruud
347125365f movie.list didn't keep order 2013-09-06 19:19:20 +02:00
Jason Mehring
d62b346a74 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-09-06 13:16:38 -04:00
Ruud
59a718be20 Optimize events with single handler 2013-09-06 00:41:15 +02:00
Ruud
c41b3a612a Optimize dashboard soon listing 2013-09-06 00:24:17 +02:00
Ruud
23f77df911 Optimize profile queries 2013-09-06 00:23:52 +02:00
Ruud
117b952455 Default back to type on protocol. fix #2120 2013-09-05 21:46:00 +02:00
Ruud
7714504831 Run dashboard calls serial 2013-09-04 23:20:03 +02:00
Ruud
5c61c24c04 Lazyload file list in manage tab 2013-09-04 22:39:42 +02:00
Ruud
b11e1d48e0 Suggestion listing: load library in single query 2013-09-04 22:30:32 +02:00
Ruud
a6ce114284 Optimize suggestion listing 2013-09-04 22:30:32 +02:00
Ruud
88d512eacc Don't try to use releases when there aren't any 2013-09-04 22:30:32 +02:00
Ruud
f4d5366c93 Remove profile from dashboard list 2013-09-04 22:30:32 +02:00
Ruud
ac9aaec7b8 Optimize movie.list 2013-09-04 22:30:32 +02:00
Ruud
0c5b950c87 Add manual to tryNextRelease 2013-09-04 22:30:32 +02:00
Ruud
47141f8e4f Api: added release.for_movie
Get all releases for a single movie
2013-09-04 22:30:32 +02:00
Ruud
ec302fe665 Make sure that a faulty api call end after error 2013-09-04 13:46:51 +02:00
Ruud
7f304b0c28 Don't load profile on movie list 2013-09-03 22:50:27 +02:00
Ruud
8f88f7d89b Javascript and css cleanup 2013-09-03 22:13:42 +02:00
Ruud
400fd461ab Always add timestamp to registered statics 2013-09-03 21:12:22 +02:00
Ruud
cd8d2d4808 PublicHD description cache timeout 2013-09-03 20:23:40 +02:00
Ruud
4cfa79488f PublicHD cache description call 2013-09-03 20:21:49 +02:00
Ruud
b5993bcc21 NonBlock calls need to finish 2013-09-03 19:14:59 +02:00
Ruud
6af00bf026 Standardize cache_key generation 2013-09-03 12:48:24 +02:00
Ruud
97c456c9e1 Optimize quality caching 2013-09-03 12:47:44 +02:00
Ruud
08f44197f3 Use own cache 2013-09-03 12:14:02 +02:00
Ruud
779c7d2942 Remove mutable objects from function args 2013-09-02 22:44:44 +02:00
Ruud
7fd14e0283 Code cleanup 2013-09-02 21:59:06 +02:00
Ruud
7d32a8750d type > protocol 2013-09-02 16:53:39 +02:00
Ruud
110e0b78fc Merge branch 'file_extension' of git://github.com/DarthNerdus/CouchPotatoServer into DarthNerdus-file_extension 2013-09-02 16:51:17 +02:00
Ruud
bc77812488 Copy file and maybe copy stats. fix #349 2013-09-02 16:49:57 +02:00
Ruud
3e28cd5c95 local ip checking helper 2013-09-02 15:27:18 +02:00
Ruud
2715dbaaa5 Don't do failed checking on local requests 2013-09-02 15:27:06 +02:00
Ruud
3baf12d3e4 Make sure cleanhost only has one trailing slash 2013-09-02 14:54:54 +02:00
Ruud
a428d36604 Wrap requests in try for better failing
Or would it be worse failing?
2013-09-02 14:35:05 +02:00
Ruud
b5207bc88c Return releasedate as string 2013-09-02 14:27:16 +02:00
Ruud
910578a2ac Use TheMovieDB v3 api 2013-09-02 14:10:31 +02:00
Ruud
155732ab1a Rollback type remove 2013-09-02 00:13:05 +02:00
Ruud
b3713b7ae5 Merge branch 'refs/heads/develop' into tv 2013-09-02 00:09:16 +02:00
Ruud
88176997e7 Don't use year if it's the first in the identified string. fix #1815 2013-09-02 00:00:27 +02:00
Ruud
233e6f9be0 Movie class wasn't remove on delete cancel. fix #1962 2013-09-01 23:33:24 +02:00
Ruud
1fd11fb547 Don't show delete dialog for category if it doesn't exist yet. fix #1961 2013-09-01 23:28:55 +02:00
Ruud
8bfd206578 Option to disable direct searching on adding. closes #2054 2013-09-01 23:18:12 +02:00
Ruud
62c6fd2e40 Don't error out on faulty PublicHD page. fix #2014 2013-09-01 23:05:28 +02:00
Ruud
ac2d2a0463 Always search on empty release dates. fix #2035 2013-09-01 22:51:59 +02:00
Ruud
c1e4b47b99 Return category by default. fix #2073 2013-09-01 18:21:53 +02:00
Jesse Read
32b479467a Fix missed type/protocol change. Fixes torrents being created as .movie files. 2013-08-31 20:45:37 -04:00
Ruud
6cab2b34d6 Continue after empty folder while loading plugins 2013-09-01 02:10:31 +02:00
Ruud
9e744199fe Make sure messages isn't empty 2013-09-01 01:44:47 +02:00
Ruud
b22021e7f0 Try next log remove, don't stop 2013-09-01 00:43:53 +02:00
Ruud
68bdf47ea4 Use protocol, not type for sorting 2013-09-01 00:31:47 +02:00
Ruud
af2876bd71 Lock same api routes 2013-09-01 00:24:47 +02:00
Ruud
1e5d6bad2a Lock while editing listeners 2013-09-01 00:24:18 +02:00
Ruud
f6c836157d Movie db to bottom in scanner 2013-09-01 00:22:22 +02:00
Ruud
d10874f216 Video object on iPad doesn't listen to z-index. fix #2093 2013-08-31 19:22:32 +02:00
Ruud
700713abcf Don't try to use undefined response 2013-08-31 17:48:19 +02:00
Ruud
5180426fc1 Remove debug print 2013-08-31 17:09:23 +02:00
Ruud
e1c8a08f2f Run api requests in own thread 2013-08-31 17:07:46 +02:00
Ruud
16f0bcc3ac Don't run handler if it doesn't exist.. 2013-08-31 17:04:53 +02:00
Ruud
9c98a38604 Tornado update 2013-08-31 15:59:47 +02:00
Ruud
1b03c7e474 Use finish instead of write 2013-08-31 15:32:45 +02:00
Ruud
689feb78d0 Torrentshack missin category for pre-dvd releases. fix #2083 2013-08-31 14:33:30 +02:00
Ruud
336b15b199 Deluge import cleanup 2013-08-30 19:21:31 +02:00
Ruud
4a4bb819ec Merge branch 'deluge' of git://github.com/techmunk/CouchPotatoServer into techmunk-deluge 2013-08-30 18:40:35 +02:00
Techmunk
48be010f33 Fix up some debug messages, and the torrent completed status. 2013-08-30 10:25:58 +10:00
Techmunk
104e21b314 Fix for deluge downloading torrent files. 2013-08-28 20:41:02 +10:00
Ruud
aaf5cab138 Encode folder returned from downloader. fix #2071 2013-08-27 23:38:51 +02:00
Ruud
22b744340a Properly remove backup folder 2013-08-27 22:25:56 +02:00
Techmunk
2954558004 Fix up deluge is Finished status matching. 2013-08-27 20:13:29 +10:00
Ruud
b797590a4e Make sure extr_files exists 2013-08-25 20:16:08 +02:00
Ruud
9d71fe1724 Deluge proper error logging. fix #2069 2013-08-25 12:24:15 +02:00
Ruud
9ad0ed642d Don't use type yet. fix #2068 2013-08-25 12:07:13 +02:00
Jason Mehring
19d026756c Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-08-24 21:11:53 -04:00
Ruud
3cddd29425 Merge branch 'refs/heads/develop' into tv 2013-08-25 01:15:40 +02:00
Ruud
cbd217271d Don't load options twice 2013-08-25 00:59:37 +02:00
Jason Mehring
23bde0b866 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-08-24 15:53:31 -04:00
Jason Mehring
6f895c1805 get apikey from config 2013-08-24 15:37:11 -04:00
Ruud
96089074ce Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/loader.py
2013-08-24 20:22:55 +02:00
Ruud
65896497fb Return true for loader 2013-08-24 20:22:31 +02:00
Ruud
54a37b577d Import cleanup
Conflicts:
	couchpotato/core/providers/torrent/sceneaccess/main.py
2013-08-24 20:15:54 +02:00
Ruud
f1948ffb6a Just load media recursively 2013-08-24 20:12:59 +02:00
Jason Mehring
7dd3b0ed15 fix loader error messages for modules that are selected recursively but are not really modules 2013-08-24 20:07:32 +02:00
Jason Mehring
11fcfa8202 Moved library and refactored to its now location. Modified anything firing libray.add/update/_release date to now fire library.add.movie...
Conflicts:
	couchpotato/core/loader.py
	couchpotato/core/media/show/_base/main.py
	couchpotato/core/media/show/library/season/main.py
2013-08-24 20:04:27 +02:00
Ruud
2ed53df008 Import cleanup 2013-08-24 19:14:09 +02:00
Ruud
060859483a Delete show provider 2013-08-24 18:36:44 +02:00
Ruud
eced476eaf Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/loader.py
	couchpotato/core/providers/info/_modifier/__init__.py
	couchpotato/core/providers/info/_modifier/main.py
	couchpotato/core/providers/movie/_modifier/main.py
	couchpotato/core/providers/show/_modifier/main.py
2013-08-24 18:30:00 +02:00
Ruud
199e61ea14 Fallback on type for current downloads 2013-08-24 16:37:16 +02:00
Ruud
0daa6c8eff Merge branch 'develop_unrar_fixes' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_unrar_fixes 2013-08-24 16:16:48 +02:00
Ruud
b1b5f97f03 Deluge fixes 2013-08-24 16:14:18 +02:00
Ruud
32d5587669 Don't load modules without __init__.py 2013-08-24 16:06:17 +02:00
mano3m
c13c0f24e5 Change type to protocol in release and renamer 2013-08-24 15:50:19 +02:00
mano3m
7eb1d72333 remove move exception from unrar PR 2013-08-24 15:50:19 +02:00
Ruud
3d6ec1feba Move info providers to proper folder 2013-08-24 15:31:30 +02:00
Ruud
8d5b55a753 Make info modifier multiprovider 2013-08-24 15:30:17 +02:00
Ruud
7296dc54d0 Move thetvdb to info providers 2013-08-24 15:29:57 +02:00
Ruud
e5e9cf7d5f Move info providers to proper folder 2013-08-24 15:20:00 +02:00
Ruud
b106229a78 Merge branch 'refs/heads/develop' into tv 2013-08-24 15:07:21 +02:00
Ruud
c267232160 Add unrar support
Thanks @mano3m
2013-08-24 15:04:56 +02:00
Ruud
48f4b008df Move deluge lib to libs folder 2013-08-24 14:46:46 +02:00
Ruud
ae1f181fbf Merge branch 'deluge' of git://github.com/techmunk/CouchPotatoServer into techmunk-deluge
# Please enter a commit message to explain why this merge is necessary,
# especially if it merges an updated upstream into a topic branch.
#
# Lines starting with '#' will be ignored, and an empty message aborts
# the commit.
2013-08-24 14:42:17 +02:00
Ruud
cbfee72d51 rTorrent make pause advanced setting 2013-08-24 14:38:57 +02:00
Ruud
ee709054f2 rTorrent rename type to protocol
code styling
2013-08-24 14:35:57 +02:00
Ruud
ee60ec962b Merge branch 'feature/dev_rtorrent' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-feature/dev_rtorrent 2013-08-24 14:33:17 +02:00
Ruud
73efd5549f Merge branch 'refs/heads/develop' into tv 2013-08-24 14:30:09 +02:00
Ruud
e013e38c5e Update ubuntu.init
Thanks @moriame
2013-08-24 14:26:16 +02:00
Ruud
20aa78105f Do window size check inside load event 2013-08-24 14:22:15 +02:00
Ruud
770590e4f2 Match default ports
Thanks @cpg
2013-08-24 14:08:05 +02:00
Ruud
8e9e7b49ea Simplify linking
Thanks @mano3m
2013-08-24 14:03:17 +02:00
Ruud
08554889fd Add the old rottentomatoes to default enabled list 2013-08-24 13:34:45 +02:00
Ruud
8ac2869de3 Merge branch 'rotten_tomatoes_custom_urls' of git://github.com/Lordcrash/CouchPotatoServer into Lordcrash-rotten_tomatoes_custom_urls 2013-08-24 13:28:10 +02:00
Ruud
bb8e8a0df5 Merge branch 'develop_seed_fixes' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_seed_fixes 2013-08-24 13:22:29 +02:00
Ruud
e2bd6a91cd MPAA rating for renamer 2013-08-24 13:21:39 +02:00
Ruud
ed0e5ef497 XMBC notification, better remote folder description 2013-08-24 12:24:15 +02:00
Ruud
e1e475e605 Merge branch 'develop_XBMC' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_XBMC 2013-08-24 12:19:32 +02:00
Ruud
cef5b04eb1 Return unique imdb list 2013-08-24 12:14:15 +02:00
Ruud
7e44af936d Watch shutdown when adding automation movies 2013-08-24 12:14:02 +02:00
Ruud
6aec5a9a60 Cleanup IMDB provider 2013-08-24 12:13:45 +02:00
Ruud
79c75c886b Merge branch 'develop_automationIMDB' of git://github.com/dkboy/CouchPotatoServer into dkboy-develop_automationIMDB 2013-08-24 10:59:32 +02:00
Joel Kåberg
8139016636 Merge pull request #2061 from nrgaway/tv_loader
fix loader error messages for modules that are selected recursively but ...
2013-08-23 12:51:34 -07:00
Jason Mehring
59c0d0416e fix loader error messages for modules that are selected recursively but are not really modules 2013-08-23 15:32:47 -04:00
Joel Kåberg
cd559ece04 Merge pull request #2058 from nrgaway/tv_refactored
Tv refactored
2013-08-23 07:47:29 -07:00
Joel Kåberg
120a4ad1ed Merge pull request #2057 from nrgaway/tv_development
Completed tvshow model
2013-08-23 07:47:14 -07:00
Jason Mehring
3363e164fd refactored Movie model to Media 2013-08-23 01:37:00 -04:00
Jason Mehring
6d6d5caeb6 Completed tvshow model 2013-08-23 00:53:34 -04:00
Joel Kåberg
21030e7cb4 Merge pull request #2052 from nrgaway/tv_database_2
Added Seasons
2013-08-22 12:40:20 -07:00
mano3m
bf6bcaed72 provide more info in case no movie is found
Several users reported an issue with "more than one group found (0)",
and it was unclear to them what it meant. This might help.
2013-08-22 21:20:02 +02:00
Jason Mehring
9b238ba712 Added Seasons. Show is the parent to Seasons and Episodes are the children if Season 2013-08-22 02:47:41 -04:00
mano3m
70bc2a6656 use right variable for pause
fixes #2049
2013-08-21 20:59:39 +02:00
mano3m
695cdea447 Remove 'move' exception
No need to remove files when 'move' is selected as the downloaders do
this themselves now when cleaning up
2013-08-21 20:59:38 +02:00
mano3m
d0735a6d58 Add failsafe for symlink errors
E.g. on Windows you need Admin rights to symlink...
2013-08-21 20:59:38 +02:00
mano3m
175c26bea9 Fix untagDir and hastagDir
Changes in commit 8a252bff64 broke the
tagging functionality
2013-08-21 20:59:23 +02:00
Techmunk
8a298edd4e Implementation of Deluge downloader. 2013-08-21 23:52:54 +10:00
Ruud
b3d2d5349b Rename database for TV branch 2013-08-20 23:02:43 +02:00
Ruud Burger
f9bad281de Merge pull request #2038 from nrgaway/tv_database
Tv database
2013-08-20 01:00:29 -07:00
Jason Mehring
72ce919989 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_database 2013-08-20 02:18:03 -04:00
Jason Mehring
ff782669f6 readded tvdb_api 2013-08-20 02:11:54 -04:00
Jason Mehring
36950993f1 removed tvdb_api since it was missing all files 2013-08-20 02:11:01 -04:00
Jason Mehring
7df93dc1b4 Moved library and refactored to its now location. Modified anything firing libray.add/update/_release date to now fire library.add.movie... 2013-08-20 01:54:47 -04:00
Ruud
a45913eee7 Default to movie type 2013-08-18 13:20:53 +02:00
Ruud
9860a1c138 Default to movie type 2013-08-18 13:17:40 +02:00
Ruud
3dff598d03 Add multiprovider for provider grouping 2013-08-18 11:48:00 +02:00
Ruud
62b571d5f1 Rename type to protocol 2013-08-18 11:47:54 +02:00
Ruud
3af6623a91 Move registerPlugin to __new__ magic 2013-08-18 11:47:49 +02:00
Ruud
a25eac6c4e Make SceneAccess multiprovider 2013-08-18 11:47:07 +02:00
Ruud
dd0fcf0bc1 Add multiprovider for provider grouping 2013-08-18 11:45:45 +02:00
Ruud
2267235eca Rename type to protocol 2013-08-18 11:44:00 +02:00
Jason Mehring
029cf9ecac New model implemented to work with both Movies and TV Shows as well any future types. Currenly episodes are mapped directly to shows; no seasons yet. Will get around to that soon. This version allows you to add any tv show and it will appear in wanted list, but no searches are written yet :) 2013-08-18 03:28:41 -04:00
Ruud
f4217ecd3d Move registerPlugin to __new__ magic 2013-08-18 00:22:36 +02:00
Jason Mehring
31cd993506 EOD commit (WIP). So close to writing tv objects to database but too tired to finish. Currently storing a show as a movie using imdb metadata. Added another search button beside movie button 2013-08-17 04:15:51 -04:00
Jason Mehring
fb579561de added a --noreloader option flag on startup to prevent CP from auto reloading when in development and debugging mode 2013-08-16 17:10:52 -04:00
Jason Mehring
37eb424827 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_database 2013-08-16 15:41:37 -04:00
Ruud
4348451692 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/__init__.py
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/media/movie/searcher/__init__.py
	couchpotato/core/media/movie/searcher/main.py
2013-08-16 21:07:59 +02:00
Ruud
c73ed8a4c5 Add multiple categories for BRRIP on TPB. fix #2025 2013-08-16 20:05:30 +02:00
Ruud
4d5ba65254 Migrate options 2013-08-16 17:23:40 +02:00
Ruud
91856f1159 Searcher base
Re-usable cronjob code
2013-08-16 16:52:12 +02:00
Ruud
e93e55a0f7 Searcher conf section 2013-08-16 10:22:43 +02:00
Ruud
f7da408f83 Searcher conf section 2013-08-16 10:21:44 +02:00
Jason Mehring
bc11f90529 EOD commit (WIP). Commented out schema added yesterday in favour of a more global scheme. Added menu option in GUI to search for tv shows (placed on top of movie one for now). Partially implemented thetvdb provider. Search is working and returns a list of shows for GUI search along with posters. posters still need work. 2013-08-16 02:44:41 -04:00
Jason Mehring
8fcc246f25 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_database 2013-08-15 20:48:57 -04:00
Ruud
2824c55231 Give moviesearcher a unique name 2013-08-15 23:52:48 +02:00
Ruud
874655846c Move movie plugin to media folder 2013-08-15 23:52:43 +02:00
Ruud
1620acedb1 Move movie to new media type folder 2013-08-15 23:52:37 +02:00
Ruud
6395e5dbbb Cleanup console log 2013-08-15 23:52:16 +02:00
Ruud
4d7c38d6db Cleanup console log 2013-08-15 23:51:42 +02:00
Ruud
c8d79cde21 Add base for TV media type 2013-08-15 23:47:07 +02:00
Ruud
f4d792079b Give moviesearcher a unique name 2013-08-15 23:46:55 +02:00
Ruud
78ab419cd8 Move movie plugin to media folder 2013-08-15 23:38:14 +02:00
Ruud
3e93983f6e Move movie to new media type folder 2013-08-15 22:22:45 +02:00
Jason Mehring
6a4822cc26 merged upstream changes 2013-08-15 15:08:44 -04:00
Ruud
92b08bb5d5 Merge branch 'refs/heads/develop' into tv 2013-08-15 20:30:48 +02:00
Ruud
251d9cdb8a Placeholder for preferred words 2013-08-15 18:47:57 +02:00
Ruud
623571acbb Make category destination editable 2013-08-15 18:31:06 +02:00
Jason Mehring
e270e09969 EOD commit (WIP). Added partial Show, Episode schema. 2013-08-15 01:15:48 -04:00
Ruud
40cd5218db Change branch to "tv" 2013-08-14 23:24:21 +02:00
Ruud
250f07ffa7 Optimize dashboard query 2013-08-14 16:55:57 +02:00
Ruud
8917d7c16c Optimize movie.list query 2013-08-14 16:47:59 +02:00
Ruud
d759280c18 Don't update library items on shutdown 2013-08-14 12:31:41 +02:00
Ruud
67bc3903d4 Don't show loader for scanner if page isn't loaded yet 2013-08-14 12:20:38 +02:00
Ruud
cf6f83a44b Option to disable manage scan at startup. fix #1951 2013-08-14 12:14:52 +02:00
Ruud
4b15563ba3 Don't use in_progress when it isn't set 2013-08-14 12:13:52 +02:00
Ruud
dc36e15448 Don't run multiple manage.progress requests 2013-08-14 11:56:08 +02:00
Ruud
0b6330e98b Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-08-13 20:56:46 +02:00
Ruud
2e93687bb4 Don't try to loop over empty enablers 2013-08-13 17:46:41 +02:00
Ruud
0f925a466a Also ignore __ when importing folders 2013-08-13 17:31:12 +02:00
Ruud
16eeeda787 Ignore folder include with __ at beginning 2013-08-13 17:25:24 +02:00
Ruud
52f1df98bb Don't try to split on empty string 2013-08-13 16:51:46 +02:00
Ruud
a0ccff23a3 Remove duplicate spaces 2013-08-13 16:08:34 +02:00
Ruud
b8bed627a8 Add possible title with some char replacements 2013-08-13 16:08:21 +02:00
Ruud
8d058d9dc8 Add hdscr to screener quality 2013-08-13 15:45:05 +02:00
Ruud
57e92ff8d3 Optimized frontend notifications 2013-08-13 15:40:56 +02:00
Ruud
6eff724f97 Clean nonblocking requestshandler 2013-08-13 15:36:11 +02:00
Ruud Burger
55c3fe503b Merge pull request #1985 from mano3m/develop_nzbget
Fix NZBGet url issue
2013-08-12 01:21:41 -07:00
Ruud Burger
7f1ac63c58 Merge pull request #2005 from mano3m/develop_sorting
Regard torrents and torrent_magnet the same
2013-08-12 01:08:05 -07:00
Dean Gardiner
2bb2e28f91 Updated rTorrent library and fixed some issues with ratio setup. 2013-08-12 15:32:15 +12:00
Dean Gardiner
0bdffc5036 Change to ratio group setup to ensure everything is set correctly. 2013-08-12 15:32:14 +12:00
Dean Gardiner
7202fbf084 Removed stop_complete option, Can instead be disabled by setting seed_ratio to zero on the provider. 2013-08-12 15:32:13 +12:00
Dean Gardiner
317c3afb7a Few minor fixes and implemented delete_files option via shutil.rmtree 2013-08-12 15:32:13 +12:00
Dean Gardiner
577baeca59 Hiding remove files in the rTorrent downloader until it's implemented. 2013-08-12 15:32:12 +12:00
Dean Gardiner
7c680cac10 Updated rTorrent downloader to set ratio stop action, added new seeding methods and updated the rTorrent library 2013-08-12 15:32:11 +12:00
Dean Gardiner
0fadbd52a3 Cleaned up imports and added support for downloading magnet torrents via sources. 2013-08-12 15:32:10 +12:00
Dean Gardiner
38e204dfe8 Added support for labels on the rtorrent downloader. 2013-08-12 15:32:10 +12:00
Dean Gardiner
bf62653531 Added missing 'folder' parameter on the rtorrent downloader to fix moving/linking issues. 2013-08-12 15:32:09 +12:00
Dean Gardiner
d851be41d3 Updated rtorrent-python library. 2013-08-12 15:32:08 +12:00
Dean Gardiner
3bd1875321 Added initial rtorrent downloader, currently testing, possibly has some bugs. 2013-08-12 15:32:00 +12:00
mano3m
448c1d69a7 Regard torrents and torrent_magnet the same
When sorting the torrents and torrent_magnets were sorted, by taking
only the three first characters (as 'nzb; is three chars), the score
prevails. Fixes #2004
2013-08-11 00:06:07 +02:00
Ruud
c99a5cb535 Don't autoadd when already in wanted 2013-08-07 20:06:30 +02:00
Dean Gardiner
b824ef93bd Fix plex notifications test method. 2013-08-04 15:39:02 +12:00
mano3m
0492e90d6f XBMC: properly check if host is local
And added option to scan if remote
2013-08-03 01:52:20 +02:00
Micah James
4ffda9f705 Made code more python-y per mano3ms recommendation. 2013-08-01 23:15:36 -04:00
mano3m
b32d4fc42d Fix NZBGet url issue 2013-08-01 23:24:25 +02:00
Dean Gardiner
c92aa91aa7 Corrected notify() force parameter default. 2013-08-02 02:43:55 +12:00
Dean Gardiner
a6c32a7e30 Fixed Plex notifications
Conflicts:

	couchpotato/core/notifications/plex/main.py
2013-08-02 02:43:37 +12:00
Micah James
4330dc39bf Changed description to be better suited for this. 2013-07-31 23:14:58 -04:00
Micah James
da50b19b6b Added custom url code handling 2013-07-31 23:06:12 -04:00
Micah James
797018fb8a Revert "Adding more code."
This reverts commit 3a8f891c7d.
2013-07-31 22:47:52 -04:00
Micah James
3a8f891c7d Adding more code. 2013-07-31 22:45:48 -04:00
Micah James
56a788286c Adding code for custom urls UI 2013-07-31 22:41:49 -04:00
mano3m
fd95364d5f uTorrent ratio issue fixed
The tryFloat function returns 0 if it is fed with a float(!). This resulted in the seed_ratio being set to 0 on first/automatic download. When manually downloading, it did work as the ratio is stored as a string.
2013-07-31 15:04:48 +02:00
mano3m
470fde0890 Unset the uTorrent read only flags
Fix for #1871

Note that this is a fix for Windows only. I am unaware if this issue
arises on Linux/Mac and what happens with this fix on those systems.
2013-07-23 19:07:36 +02:00
Ruud
f12d878c0b Select category for search, suggest & edit 2013-07-22 21:57:13 +02:00
Ruud
e8993932c1 Check isMac function 2013-07-22 21:56:33 +02:00
Ruud
e3933e4ddc Proper meta tag 2013-07-22 21:56:22 +02:00
Ruud
dd67239b6e Add categories to settings 2013-07-21 19:12:53 +02:00
Ruud
1ea0d3bd8b Move providers to main searcher tab in settings 2013-07-21 19:12:32 +02:00
Ruud
8b952d4be6 Combine global and category words 2013-07-19 16:58:49 +02:00
Ruud
9e8a3bc701 Movie category migrate 2013-07-15 22:51:53 +02:00
Ruud
76807176fb Merge branch 'develop-categories' of git://github.com/clinton-hall/CouchPotatoServer into clinton-hall-develop-categories
Conflicts:
	couchpotato/core/plugins/score/main.py
2013-07-15 20:47:29 +02:00
iguyking
3650624e4b Update contributing.md
Fixed to say what was intended
2013-07-15 20:44:42 +02:00
Ruud Burger
585c509aba Merge pull request #1950 from mano3m/develop_rpc-url
Add rpc_url to Transmission options
2013-07-15 04:20:25 -07:00
mano3m
046c7e732f Add rpc_url to Transmission options
Fixes  #1832
2013-07-14 23:43:07 +02:00
mano3m
564a27461d XBMC: Only add directory if XBMC is on localhost 2013-07-14 23:30:37 +02:00
mano3m
4ebbc1a01d XBMC: Only scan the new movie folder 2013-07-14 02:19:35 +02:00
Ruud
4ec32a6403 Merge branch 'develop_seed_fixes' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_seed_fixes 2013-07-13 17:56:07 +02:00
Ruud
412627aab0 Move rating and genres to suggestions only 2013-07-13 17:52:40 +02:00
mano3m
2584abda0e Several fixes and increased readability 2013-07-13 17:06:59 +02:00
dkboy
7692322fba Expand IMDB automation provider to include charts
Expand IMDB automation provider to include certain top charts, this
includes the 'in theaters' list, as well as the top 250 list. They both
respect the minimum requirement settings.
2013-07-13 16:45:39 +12:00
Ruud
954018fea2 Youtube trailer search in https 2013-07-12 21:03:03 +02:00
Ruud
ebf37f7310 Cleanup plex urls 2013-07-12 20:52:41 +02:00
Ruud
f22b836ede Combine adopt 2013-07-12 14:42:59 +02:00
Ruud
1cea786d66 Style rating and genres 2013-07-12 14:36:04 +02:00
dkboy
9be10f7b79 Add Rating / Genre to Dashboard Suggestions
Add Rating and up to 3 Genres to movie suggestions, to avoid constantly
jumping through to IMDB site.
2013-07-12 21:49:24 +12:00
Ruud
1f35d0ec2f Remove debug print 2013-07-11 17:36:27 +02:00
Ruud
9fcf36a2ff Add WEB-DL and WEB-Rip. fix #1913 2013-07-11 17:34:55 +02:00
Ruud
30f5a66487 AwesomeHD: Log wrong passkey. fix #1912 2013-07-11 15:24:20 +02:00
Ruud
60e0ad1f5d Add Windows Media Center / Explorer folder.jpg creation. closes #1932 2013-07-11 15:05:08 +02:00
Ruud
ed60b4670e Move root creation to metadata base 2013-07-11 15:04:39 +02:00
Ruud
318daaf083 Cleanup BitSoup 2013-07-09 23:31:43 +02:00
Ruud
182987218b Merge branch 'develop' of git://github.com/dkboy/CouchPotatoServer into dkboy-develop 2013-07-09 23:13:15 +02:00
Ruud
5ff8c7302f Sabnzbd prio description 2013-07-09 23:08:33 +02:00
Ruud
398712403b Merge branch 'develop' of git://github.com/gthicks/CouchPotatoServer into gthicks-develop 2013-07-09 23:04:28 +02:00
Ruud
63f72eb23b Merge branch 'refs/heads/seeding' into develop 2013-07-09 22:53:14 +02:00
Ruud
9dea6d7200 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-07-09 22:52:53 +02:00
Ruud
36f63bdf99 Seeding cleanup and better defaults 2013-07-09 22:52:32 +02:00
Ruud
a09fc14625 Twitter DM didn't work 2013-07-09 20:32:29 +02:00
dkboy
71e280238d Fixed missing detail_url 2013-07-10 01:48:11 +12:00
Ruud
e20bb13649 Delete NZBx 2013-07-08 11:31:13 +02:00
Ruud
ed8108a9d8 Remove NZBsRus 2013-07-08 11:30:55 +02:00
Ruud
c0b3c9a330 Make description a bit shorter 2013-07-07 13:44:49 +02:00
Ruud
8a252bff64 Don't use parentdir for tagging 2013-07-07 13:00:38 +02:00
Ruud
d3d3106fc9 Merge branch 'develop_seed' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_seed 2013-07-07 11:37:53 +02:00
dkboy
1ebb09226d Add Bitsoup provider 2013-07-07 14:23:15 +12:00
Ruud
52163428e9 Break if media headers are corrupt. fix #1828 2013-07-07 00:09:22 +02:00
Ruud
da9dda2c2b Make minimal movie automation clearer. fix #1923 2013-07-06 23:39:34 +02:00
Ruud
a4a14cae96 Use forwarded host when provided. fix #1922 2013-07-06 23:26:46 +02:00
Garret
989d6c55c4 Added priority setting for SABnzbd
Includes ability to add nzb to queue paused.
2013-07-06 10:28:32 -07:00
Ruud
1c3e6ba930 Ignore current suggested results 2013-07-06 00:24:57 +02:00
Ruud
99123ad1c3 Remove version on branch 2013-07-05 22:17:43 +02:00
Ruud
cdf9cf5cf4 Yifi: don't search empty results. fix #1900 2013-07-05 21:54:55 +02:00
Ruud
797dedfcbb Remove cdX from subname. fix #1524 2013-07-05 21:28:07 +02:00
Ruud
b61de4866c Make subliminal work with Requests 1.0+ 2013-07-05 20:40:27 +02:00
Ruud
931951ff37 Change default min size for 720p and 1080p 2013-06-30 15:57:58 +02:00
Ruud
6f42b4c316 Don't show coming soon when no dvd release is set 2013-06-30 15:21:06 +02:00
Ruud
58c446de2d Make string param boolean 2013-06-30 15:20:02 +02:00
Ruud
74bf6bc411 Always set info dict on library 2013-06-30 13:17:56 +02:00
Ruud
ad3c24f950 Improved "too early to search" calculations 2013-06-30 13:17:43 +02:00
mano3m
998e487fe8 NZBs are not torrents :) 2013-06-30 10:14:08 +02:00
Ruud
93346b0c63 Properly update release dates 2013-06-30 01:16:13 +02:00
mano3m
7d9920691f Fix uTorrent settings automatically
Note that this might not be the way we want to go?
2013-06-29 22:50:25 +02:00
Ruud
b1942678b4 Add hash and date to update available notification. fix #1883 2013-06-29 22:20:35 +02:00
Ruud
8c77d0d775 Add advanced option to search on launch. fix #1887 2013-06-29 22:03:29 +02:00
Ruud
3e667ee39a Couldn't press letter in movie filter. fix #1888 2013-06-29 21:56:24 +02:00
Ruud
52b2858ac2 Don't enable yifi by default 2013-06-29 21:39:53 +02:00
Ruud
6fcb4c2058 Change default automation interval 2013-06-29 21:07:07 +02:00
mano3m
7411670e22 Added complete download removal to SabNZBd 2013-06-29 10:36:02 +02:00
mano3m
cfd23c395a Add failed download handling to Transmission 2013-06-29 10:23:08 +02:00
Ruud
2e8f670e94 Remove import 2013-06-28 23:32:38 +02:00
mano3m
18a88eab51 Textual change 2013-06-26 20:02:25 +02:00
mano3m
84e9f9794d Add awesomehd torrent provider 2013-06-26 19:53:28 +02:00
mano3m
628c0e5dcc Add yify torrent provider 2013-06-26 19:52:39 +02:00
mano3m
cdee08bd36 Add status colours in dashboard 2013-06-26 19:49:05 +02:00
mano3m
7ed43da425 Also set seeding status in case nothing is done 2013-06-26 19:49:05 +02:00
mano3m
461a0b3645 Seeding support
Design intent:
- Option to turn seeding support on or off
- After torrent downloading is complete the seeding phase starts, seeding parameters can be set per torrent provide (0 disables them)
- When the seeding phase starts the checkSnatched function renames all files if (sym)linking/copying is used. The movie is set to done (!), the release to seeding status.
- Note that Direct symlink functionality is removed as the original file needs to end up in the movies store and not the downloader store (if the downloader cleans up his files, the original is deleted and the symlinks are useless)
- checkSnatched waits until downloader sets the download to completed (met the seeding parameters)
- When completed, checkSnatched intiates the renamer if move is used, or if linking is used asks the downloader to remove the torrent and clean-up it's files and sets the release to downloaded
- Updated some of the .ignore file behavior to allow the downloader to remove its files

Known items/issues:
- only implemented for uTorrent and Transmission
- text in downloader settings is too long and messes up the layout...

To do (after this PR):
- implement for other torrent downloaders
- complete download removal for NZBs (remove from history in sabNZBd)
- failed download management for torrents (no seeders, takes too long, etc.)
- unrar support

Updates:
- Added transmission support
- Simplified uTorrent
- Added checkSnatched to renamer to make sure the poller is always first
- Updated default values and removed advanced option tag for providers
- Updated the tagger to allow removing of ignore tags and tagging when the group is not known
- Added tagging of downloading torrents
- fixed subtitles being leftover after seeding
2013-06-26 19:49:04 +02:00
Ruud
bd56539103 Yifi cleanup 2013-06-24 22:31:50 +02:00
Ruud
9bcd3de69b Merge branch 'develop' of git://github.com/Mochaka/CouchPotatoServer into Mochaka-develop 2013-06-24 22:08:00 +02:00
Ruud
d8f57963a1 NZBIndex: Search for year inside brackets. closes #1874 2013-06-24 22:07:21 +02:00
Ruud
5328f7fe69 Allow unknown keywords for all api calls. fix #1881 2013-06-24 21:21:49 +02:00
Ruud
9eea42b121 Get array arguments as list. fix #1875 2013-06-24 00:26:00 +02:00
Ruud
374f8ba1de Allow non trailing slash API calls 2013-06-23 23:28:13 +02:00
Ruud
74c984dec3 Send CP headers to suggestion call. fix #1872 2013-06-23 20:44:11 +02:00
Ruud
52ea0215f0 Use done for suggestion also 2013-06-23 19:14:11 +02:00
Ruud
ea3d719b32 Suggest on wrong dev port 2013-06-23 19:09:07 +02:00
Ruud
fd1e655075 Initial suggestion support 2013-06-23 19:07:03 +02:00
Ruud
9f8d439780 Add limit to CP search api 2013-06-22 17:01:24 +02:00
Aaron Florey
7e1bdc99eb Add Yify Torrent Provider 2013-06-23 00:11:01 +10:00
Ruud
dac36d7f55 IPTorrents ignore empty results 2013-06-22 14:16:02 +02:00
Ruud
9d495a10ec Unicode static folder 2013-06-22 01:38:07 +02:00
Ruud
9bb99319ba SplitString don't clean 2013-06-22 01:37:27 +02:00
Ruud
bc8d8dcd04 Update guessit with unicode fix 2013-06-22 00:34:58 +02:00
Ruud
b2d9a7675d Add version to SAB description 2013-06-22 00:33:12 +02:00
Ruud
2477197656 Don't use unicode in repo 2013-06-22 00:33:00 +02:00
Ruud
171083b2f1 Remove empty values from splitString. fix #1795 2013-06-21 13:44:14 +02:00
Ruud
e592eb969f NZBget error when downloadrate is 0. fix #1849 2013-06-21 13:00:58 +02:00
Ruud
db1493f138 Update pytwitter library. fix #1847 2013-06-21 12:50:58 +02:00
Ruud
57c270f8fa Don't break while sending messages to listeners 2013-06-21 11:32:45 +02:00
Ruud
bfe8bc89c0 IMDB description csv link 2013-06-19 23:39:00 +02:00
Ruud
0a00862495 Show csv imdb export in image 2013-06-19 23:34:58 +02:00
sax
7dd53d93cd Added nzb support to Synology downloader. 2013-06-19 22:43:11 +02:00
theorem21
abe65d4064 Update README.md
added FreeBSD installation instructions.  Requires additional FreeBSD init script (pending creation)
2013-06-19 22:36:56 +02:00
Ruud
4977b31ba6 Use failed status to ignore releases too 2013-06-16 00:21:33 +02:00
Ruud
c1beb85ba5 Add spotter to name for scoring 2013-06-15 23:32:44 +02:00
Ruud
ca9a78eea4 Advanced option for XBMC to only update first in list
Thanks @cliffordwhansen
2013-06-15 22:17:23 +02:00
Ruud
9bf006f4d3 Return if api is not found 2013-06-15 21:43:14 +02:00
Ruud
3bb2a082b7 AwesomeHD provider
Thanks @jrsdead
2013-06-15 20:41:23 +02:00
Ruud
92d11522d2 Use id for HDBits torrent name 2013-06-15 00:06:14 +02:00
Ruud
44cfdc1503 Include full requests lib 2013-06-15 00:04:15 +02:00
Ruud
2fdcbedea8 Use has_key for events check 2013-06-15 00:02:37 +02:00
Ruud
787c7fd966 Codestyle cleanup 2013-06-14 23:35:28 +02:00
sax
09b4ad6937 Fixed torrent support for Synology downloader to work properly with torrent files passed directly by CouchPotato. 2013-06-14 23:31:56 +02:00
sax
580d43aeaf Updated requests library to version 1.2.3 2013-06-14 23:31:47 +02:00
sax
a1a7fec15f Added torrent support for Synology downloader. 2013-06-14 23:31:40 +02:00
Ruud
6dcd74d116 Re-use code for ignore toggle 2013-06-14 23:21:41 +02:00
Ruud
187f5a8a93 Merge branch 'develop' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop 2013-06-14 22:43:19 +02:00
Ruud
2eb938147a Move login downloads to default list item 2013-06-14 22:08:25 +02:00
Ruud
deffb75c14 TorrentByte provider
Thanks @StealthGod
2013-06-14 21:52:15 +02:00
Ruud
f91707bfbe Uncomment debug code 2013-06-14 21:51:41 +02:00
Ruud
8aba7825dc Only show to_early when it has items 2013-06-14 21:24:38 +02:00
Joel Kåberg
b8b5b2fef2 dont spam the log damnit! 2013-06-14 21:17:45 +02:00
Ruud
f4d6d69184 Check if handler has parent 2013-06-14 20:48:20 +02:00
Ben Fox-Moore
a5b1c685e1 Allow IPTorrents provider to read results across multiple pages
Conflicts:
	couchpotato/core/providers/torrent/iptorrents/main.py
2013-06-14 20:47:05 +02:00
Ruud
609805b84d Don't allow keyerror in event 2013-06-14 20:04:49 +02:00
Ruud
00d1da7c01 Bind quickscan to class 2013-06-14 19:51:31 +02:00
Ruud
7335726c7d Add handler aswell 2013-06-14 19:51:20 +02:00
Ruud
02779939f0 Catch im_self error 2013-06-14 19:51:14 +02:00
Ruud
6c6f015f40 Use str not unicode in minification 2013-06-14 19:47:54 +02:00
Ruud
f087d38b86 Cleanup 2013-06-14 17:36:26 +02:00
Ruud
c78957f55c Don't try to run event without beforeCall 2013-06-14 17:24:34 +02:00
Ruud
9ce0c47cd4 More login fixes 2013-06-14 16:03:02 +02:00
clinton-hall
60034f2c96 add category preffered words and partial ignore. 2013-06-14 21:56:26 +09:30
Ruud
c9a4af218e Send port with referer. fix #1827 2013-06-14 13:54:01 +02:00
Ruud
c5c2e61e06 Log startup errors 2013-06-14 11:22:29 +02:00
Ruud
b2930dd6a7 Encode used path on startup. fix #1797 fix #1297 2013-06-14 11:11:34 +02:00
Ruud
4aa6700ceb Update SQLAlchemy 2013-06-14 11:00:06 +02:00
Ruud
267ecfacab Status check in ubuntu init script
Thanks @LeonB
2013-06-14 08:57:37 +02:00
clinton-hall
007597239f add categories 2013-06-14 15:06:59 +09:30
Ruud
5699abf1be Use new KickAss domain 2013-06-14 00:43:03 +02:00
Ruud
a6ccd037e2 Login check for SceneHD 2013-06-14 00:37:55 +02:00
Ruud
009991ce4c Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-06-14 00:04:44 +02:00
Ruud
6ef788a8f4 Check login after 1 hour 2013-06-14 00:03:48 +02:00
Ruud
fa37f7d40a Add some logging to core messaging 2013-06-13 12:15:59 +02:00
Ruud
b195cebac7 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-06-12 23:46:35 +02:00
Ruud
8aeea60888 Update Tornado 2013-06-12 23:42:38 +02:00
Ruud
6e0857c6c1 Remove Flask dependencies 2013-06-12 23:37:08 +02:00
Ruud Burger
260fdbe3b3 Merge pull request #1836 from clinton-hall/develop-extra-logging
Add logging when no rating available
2013-06-12 02:05:21 -07:00
mano3m
2f30c6c781 fix failed issues
As reported in issue #1822 I broke try next release when failed. This
commit adds the failed status to several items.
2013-06-11 21:43:01 +02:00
Clinton Hall
d5b4da655a add logging when no rating available 2013-06-11 21:51:56 +09:30
Ruud
1694ed7758 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-06-10 21:14:02 +02:00
Ruud
ee6cc6d319 PTP torrent id in lowercases 2013-06-10 21:10:58 +02:00
Ruud Burger
7670e320ba Merge pull request #1799 from clinton-hall/develop-audiochannels
add audio channels to renamer
2013-06-10 11:39:34 -07:00
Ruud
15ab745bd0 Don't assume imdb key. fixes #1819 2013-06-08 18:04:46 +02:00
Ruud
7468b33991 Send along ignored movies 2013-06-08 17:32:45 +02:00
Ruud
750e02f38a Close zipfile. fixes #1798 2013-06-08 16:04:14 +02:00
Ruud
95d146fea2 Send referer with scheme 2013-06-02 14:22:59 +02:00
Ruud
dc20b68a37 See if need to login on "belongs_to" check. fix #1190 2013-05-31 16:32:03 +02:00
clinton-hall
563e3072a5 add audio channels to renamer 2013-05-31 13:44:40 +09:30
Ruud
b3ba4db00b Append instead of add for subtitle file list 2013-05-29 19:30:51 +02:00
Ruud
a4c1480a1a Force update check from dropdown 2013-05-29 19:03:49 +02:00
Ruud
91e0452320 Torrentshack cleanup 2013-05-29 19:03:28 +02:00
Ruud
ad80ea7885 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-05-29 18:33:36 +02:00
Ruud
1c20cda389 Set updater crons on start. 2013-05-29 14:50:22 +02:00
sax
631759d833 Added configuration option to search over scene releases only. Fixed release name issue (removed &shy; element). 2013-05-28 22:52:22 +02:00
sax
ca02c66f26 Fixed login success detection. 2013-05-28 22:52:13 +02:00
sax
3ac095d359 Added support for Torrent Shack provider. 2013-05-28 22:52:05 +02:00
Ruud
e1bc223de0 Get year with default 2013-05-28 21:13:15 +02:00
Ruud
e065ead9b3 Api on subdomain 2013-05-26 21:50:20 +02:00
Ruud
f9471f9b9b HDBits cleanup 2013-05-26 15:16:55 +02:00
Ronald Pompa
2612b50d06 created hdbits torrent provider 2013-05-26 14:30:10 +02:00
Ruud
d9ce2906a0 Fix line ending 2013-05-26 14:24:59 +02:00
Joel Kåberg
b76397f98e addApiView explenation 2013-05-26 14:22:31 +02:00
Joel Kåberg
fcad9e0be5 fireAsync made optional 2013-05-26 14:22:23 +02:00
Ruud
2934347865 Send user-agent on login 2013-05-26 14:20:05 +02:00
Ruud
315f1b0207 Add r6 to quality list 2013-05-23 07:35:11 +02:00
Ruud
965bd79a86 Cleanup import 2013-05-19 23:20:32 +02:00
Ruud
c18563e34b uTorrent cleanup 2013-05-19 23:12:22 +02:00
Ruud
161e0de8d5 Don't need makedir in Transmission 2013-05-19 22:51:32 +02:00
Ruud
40aeca0740 PTP extra scoring 2013-05-19 22:18:01 +02:00
Ruud
63dd7fa7c0 New PTP-config for more accurate hits
Conflicts:
	couchpotato/core/providers/torrent/passthepopcorn/main.py
2013-05-19 22:10:51 +02:00
Ruud
509b49caf1 Deepcopy and merge movie info results 2013-05-19 01:12:09 +02:00
Ruud
38c51cf79c Import cleanup 2013-05-19 00:57:50 +02:00
Ruud
0b693bba4e Add "on snatch" options to XBMC & Plex notifications
fix #1379
2013-05-19 00:30:56 +02:00
Ruud
1258f34c78 Update counter on movie add / delete
fix #1383
2013-05-19 00:22:44 +02:00
Ruud
510c0d5f56 Remove size_check in quality guess
fix #1393
2013-05-19 00:12:19 +02:00
Ruud
cdb630e580 More touch fixes 2013-05-18 23:59:37 +02:00
Ruud
65fbd38105 Make buttons more touch friendly
fix #1416
2013-05-18 23:37:49 +02:00
Ruud
1570132a55 Don't try to rss parse empty string
fix #1418
2013-05-18 22:52:02 +02:00
Ruud
7b5b748d23 Failed joining unicode and none unicode paths
fix #1447
2013-05-18 22:45:09 +02:00
Ruud
041601c4a5 Change TPB search string
fix #1451
2013-05-18 22:12:43 +02:00
Ruud
f692fd0202 Make sure info isn't not overwriten by none
fix #1724
2013-05-18 21:53:17 +02:00
Ruud
e7b4de56f2 Only run updater if enabled.
fix #1756
2013-05-18 20:30:48 +02:00
Ruud
4a616a0c04 Placeholder styling 2013-05-18 19:40:26 +02:00
Ruud
0814675d2a Remove prints in clientscript 2013-05-18 17:28:25 +02:00
Ruud
13df35462b Force expire database objects 2013-05-17 21:36:23 +02:00
Ruud
899868f51e Don't show empty message when search 2013-05-17 21:32:46 +02:00
Ruud
ee466aebce Easily reset search 2013-05-17 18:32:20 +02:00
Ruud
687ef2662e Switch filter and view 2013-05-17 17:52:19 +02:00
Ruud
5aa29acbd3 Logging fixes 2013-05-17 17:51:15 +02:00
Ruud
1c2b3d063b Empty wanted list background 2013-05-17 15:40:27 +02:00
Ruud
551a000893 Incorrect marking as BD-Rip
Fixes #1643
2013-05-17 15:12:13 +02:00
Ruud
0d82d425cc Show original message when log is failing
closes #1735
2013-05-17 12:41:31 +02:00
Ruud
0e1cea1034 Simplify minifier
fixes #1744
2013-05-17 12:30:48 +02:00
Ruud
2b75153148 Don't limit snatched & wanted
fixes #1747
2013-05-17 12:11:53 +02:00
Ruud
c170615fb3 Ignore temp updater files on cleanup 2013-05-15 14:49:45 +02:00
Ruud
f6e84b6a35 Remove view after update 2013-05-14 00:22:19 +02:00
Ruud
6144f09a1f Make lists of sorted movies files also 2013-05-14 00:15:28 +02:00
Ruud
de142e8050 Goodfilm fixes. closes #1723
Thanks @qooplmao
2013-05-14 00:13:42 +02:00
Ruud
d0c1a119fd Use list for leftover files 2013-05-13 23:48:02 +02:00
Ruud
8fd80d3185 Update instead of extend 2013-05-13 23:44:01 +02:00
Ruud
ae28c82858 Cleanup 2013-05-13 23:27:46 +02:00
Ruud
1766764c7d Skip available movies in "still not available" view. fix #1687 2013-05-13 23:07:57 +02:00
Ruud
129f8d72bd API movie.list didn't return proper total. fix #1727 2013-05-13 21:37:54 +02:00
Ruud
7314b5ecae Run async event in thread so the on_complete is fired properly 2013-05-11 00:04:59 +02:00
Ruud
7b0806355f Thumbnail list action position 2013-05-10 19:36:48 +02:00
Ruud
49cf72e058 Load notification after window load 2013-05-10 18:28:52 +02:00
Ruud
a11cad619d Don't unicode css 2013-05-10 18:06:10 +02:00
Ruud
c1d35e8a57 Stop blinking text when scrolling in webkit 2013-05-10 15:19:39 +02:00
Ruud
fede348fbd Icon replacements 2013-05-10 15:14:47 +02:00
Ruud
f3c60e8fa6 Added TPB proxies 2013-05-10 12:00:12 +02:00
Ruud
00e53439ed Don't wait between xbmc calls 2013-05-10 00:07:06 +02:00
Ruud
368fced0c4 Cancel autocomplete searches when starting new one 2013-05-10 00:03:42 +02:00
Ruud
666771fb0f Notification is empty styling 2013-05-10 00:02:11 +02:00
Ruud
9e3f978677 Styling fixes 2013-05-09 23:36:54 +02:00
Ruud
f467d1c4f7 Dashboard thumbnails height not set properly. fix #1698 2013-05-08 12:54:34 +02:00
Ruud
d8fc9d937e Filmweb userscript fix 2013-05-08 12:47:17 +02:00
998 changed files with 81757 additions and 100146 deletions

View File

@@ -40,3 +40,23 @@ Linux (ubuntu / debian):
* Make it executable. `sudo chmod +x /etc/init.d/couchpotato`
* Add it to defaults. `sudo update-rc.d couchpotato defaults`
* Open your browser and go to: `http://localhost:5050/`
FreeBSD :
* Update your ports tree `sudo portsnap fetch update`
* Install Python 2.6+ [lang/python](http://www.freshports.org/lang/python) with `cd /usr/ports/lang/python; sudo make install clean`
* Install port [databases/py-sqlite3](http://www.freshports.org/databases/py-sqlite3) with `cd /usr/ports/databases/py-sqlite3; sudo make install clean`
* Add a symlink to 'python2' `sudo ln -s /usr/local/bin/python /usr/local/bin/python2`
* Install port [ftp/libcurl](http://www.freshports.org/ftp/libcurl) with `cd /usr/ports/ftp/fpc-libcurl; sudo make install clean`
* Install port [ftp/curl](http://www.freshports.org/ftp/bcurl), deselect 'Asynchronous DNS resolution via c-ares' when prompted as part of config `cd /usr/ports/ftp/fpc-libcurl; sudo make install clean`
* Install port [textproc/docbook-xml-450](http://www.freshports.org/textproc/docbook-xml-450) with `cd /usr/ports/textproc/docbook-xml-450; sudo make install clean`
* Install port [GIT](http://git-scm.com/) with `cd /usr/ports/devel/git; sudo make install clean`
* 'cd' to the folder of your choosing.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then run `sudo python CouchPotatoServer/CouchPotato.py` to start for the first time
* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/freebsd /etc/rc.d/couchpotato`
* Change the paths inside the init script. `sudo vim /etc/init.d/couchpotato`
* Make init script executable. `sudo chmod +x /etc/rc.d/couchpotato`
* Add init to startup. `sudo echo 'couchpotato_enable="YES"' >> /etc/rc.conf`
* Open your browser and go to: `http://server:5050/`

View File

@@ -1,15 +1,25 @@
#So you feel like posting a bug, sending me a pull request or just telling me how awesome I am. No problem!
## Got a issue/feature request or submitting a pull request?
##Just make sure you think of the following things:
Make sure you think of the following things:
* Search through the existing (and closed) issues first. See if you can get your answer there.
## Issue
* Search through the existing (and closed) issues first, see if you can get your answer there.
* Double check the result manually, because it could be an external issue.
* Post logs! Without seeing what is going on, I can't reproduce the error.
* What is the movie + quality you are searching for.
* What are you settings for the specific problem.
* What providers are you using. (While your logs include these, scanning through hundred of lines of log isn't my hobby).
* Give me a short step by step of how to reproduce.
* Also check the logs before submitting, obvious errors like permission or http errors are often not related to CP.
* What is the movie + quality you are searching for?
* What are you're settings for the specific problem?
* What providers are you using? (While you're logs include these, scanning through hundred of lines of log isn't our hobby)
* Post the logs from config directory, please do not copy paste the UI. Use pastebin to store these logs!
* Give a short step by step of how to reproduce the error.
* What hardware / OS are you using and what are the limits? NAS can be slow and maybe have a different python installed then when you use CP on OSX or Windows for example.
* I will mark issues with the "can't reproduce" tag. Don't go asking me "why closed" if it clearly says the issue in the tag ;)
* I will mark issues with the "can't reproduce" tag. Don't go asking "why closed" if it clearly says the issue in the tag ;)
* If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are setup to use our source repo (RuudBurger/CouchPotatoServer) and nothing else!!
**If I don't get enough info, the change of the issue getting closed is a lot bigger ;)**
## Pull Request
* Make sure you're pull request is made for develop branch (or relevant feature branch)
* Have you tested your PR? If not, why?
* Are there any limitations of your PR we should know of?
* Make sure to keep you're PR up-to-date with the branch you're trying to push into.
**If we don't get enough info, the chance of the issue getting closed is a lot bigger ;)**

View File

@@ -1,84 +1,139 @@
from couchpotato.api import api_docs, api_docs_missing
from couchpotato.core.auth import requires_auth
from couchpotato.api import api_docs, api_docs_missing, api
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.request import getParams, jsonified
from couchpotato.core.helpers.variable import md5
from couchpotato.core.helpers.variable import md5, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from flask.app import Flask
from flask.blueprints import Blueprint
from flask.globals import request
from flask.helpers import url_for
from flask.templating import render_template
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
from werkzeug.utils import redirect
from tornado import template
from tornado.web import RequestHandler, authenticated
import os
import time
import traceback
log = CPLog(__name__)
app = Flask(__name__, static_folder = 'nope')
web = Blueprint('web', __name__)
views = {}
template_loader = template.Loader(os.path.join(os.path.dirname(__file__), 'templates'))
class BaseHandler(RequestHandler):
def get_current_user(self):
username = Env.setting('username')
password = Env.setting('password')
if username and password:
return self.get_secure_cookie('user')
else: # Login when no username or password are set
return True
# Main web handler
class WebHandler(BaseHandler):
@authenticated
def get(self, route, *args, **kwargs):
route = route.strip('/')
if not views.get(route):
page_not_found(self)
return
try:
self.write(views[route]())
except:
log.error("Failed doing web request '%s': %s", (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
def addView(route, func, static = False):
views[route] = func
def get_session(engine = None):
return Env.getSession(engine)
def addView(route, func, static = False):
web.add_url_rule(route + ('' if static else '/'), endpoint = route if route else 'index', view_func = func)
""" Web view """
@web.route('/')
@requires_auth
# Web view
def index():
return render_template('index.html', sep = os.sep, fireEvent = fireEvent, env = Env)
return template_loader.load('index.html').generate(sep = os.sep, fireEvent = fireEvent, Env = Env)
addView('', index)
""" Api view """
@web.route('docs/')
@requires_auth
# API docs
def apiDocs():
from couchpotato import app
routes = []
for route, x in sorted(app.view_functions.iteritems()):
if route[0:4] == 'api.':
routes += [route[4:].replace('::', '.')]
for route in api.iterkeys():
routes.append(route)
if api_docs.get(''):
del api_docs['']
del api_docs_missing['']
return render_template('api.html', fireEvent = fireEvent, routes = sorted(routes), api_docs = api_docs, api_docs_missing = sorted(api_docs_missing))
@web.route('getkey/')
def getApiKey():
return template_loader.load('api.html').generate(fireEvent = fireEvent, routes = sorted(routes), api_docs = api_docs, api_docs_missing = sorted(api_docs_missing), Env = Env)
api = None
params = getParams()
username = Env.setting('username')
password = Env.setting('password')
addView('docs', apiDocs)
if (params.get('u') == md5(username) or not username) and (params.get('p') == password or not password):
api = Env.setting('api_key')
# Make non basic auth option to get api key
class KeyHandler(RequestHandler):
def get(self, *args, **kwargs):
api = None
return jsonified({
'success': api is not None,
'api_key': api
})
try:
username = Env.setting('username')
password = Env.setting('password')
@app.errorhandler(404)
def page_not_found(error):
index_url = url_for('web.index')
url = request.path[len(index_url):]
if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password):
api = Env.setting('api_key')
self.write({
'success': api is not None,
'api_key': api
})
except:
log.error('Failed doing key request: %s', (traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
class LoginHandler(BaseHandler):
def get(self, *args, **kwargs):
if self.get_current_user():
self.redirect(Env.get('web_base'))
else:
self.write(template_loader.load('login.html').generate(sep = os.sep, fireEvent = fireEvent, Env = Env))
def post(self, *args, **kwargs):
api = None
username = Env.setting('username')
password = Env.setting('password')
if (self.get_argument('username') == username or not username) and (md5(self.get_argument('password')) == password or not password):
api = Env.setting('api_key')
if api:
remember_me = tryInt(self.get_argument('remember_me', default = 0))
self.set_secure_cookie('user', api, expires_days = 30 if remember_me > 0 else None)
self.redirect(Env.get('web_base'))
class LogoutHandler(BaseHandler):
def get(self, *args, **kwargs):
self.clear_cookie('user')
self.redirect('%slogin/' % Env.get('web_base'))
def page_not_found(rh):
index_url = Env.get('web_base')
url = rh.request.uri[len(index_url):]
if url[:3] != 'api':
if request.path != '/':
r = request.url.replace(request.path, index_url + '#' + url)
else:
r = '%s%s' % (request.url.rstrip('/'), index_url + '#' + url)
return redirect(r)
r = index_url + '#' + url.lstrip('/')
rh.redirect(r)
else:
if not Env.get('dev'):
time.sleep(0.1)
return 'Wrong API key used', 404
rh.set_status(404)
rh.write('Wrong API key used')

View File

@@ -1,44 +1,65 @@
from flask.blueprints import Blueprint
from flask.helpers import url_for
from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog
from functools import wraps
from threading import Thread
from tornado.gen import coroutine
from tornado.web import RequestHandler, asynchronous
from werkzeug.utils import redirect
import json
import threading
import tornado
import traceback
import urllib
api = Blueprint('api', __name__)
api_docs = {}
api_docs_missing = []
log = CPLog(__name__)
api = {}
api_locks = {}
api_nonblock = {}
api_docs = {}
api_docs_missing = []
def run_async(func):
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
# NonBlock API handler
class NonBlockHandler(RequestHandler):
stoppers = []
stopper = None
@asynchronous
def get(self, route):
def get(self, route, *args, **kwargs):
route = route.strip('/')
start, stop = api_nonblock[route]
self.stoppers.append(stop)
self.stopper = stop
start(self.onNewMessage, last_id = self.get_argument("last_id", None))
start(self.onNewMessage, last_id = self.get_argument('last_id', None))
def onNewMessage(self, response):
if self.request.connection.stream.closed():
self.on_connection_close()
return
self.finish(response)
try:
self.finish(response)
except:
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
def on_connection_close(self):
for stop in self.stoppers:
stop(self.onNewMessage)
if self.stopper:
self.stopper(self.onNewMessage)
self.stoppers = []
def addApiView(route, func, static = False, docs = None, **kwargs):
api.add_url_rule(route + ('' if static else '/'), endpoint = route.replace('.', '::') if route else 'index', view_func = func, **kwargs)
if docs:
api_docs[route[4:] if route[0:4] == 'api.' else route] = docs
else:
api_docs_missing.append(route)
self.stopper = None
def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
api_nonblock[route] = func_tuple
@@ -48,9 +69,67 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
else:
api_docs_missing.append(route)
""" Api view """
def index():
index_url = url_for('web.index')
return redirect(index_url + 'docs/')
# Blocking API handler
class ApiHandler(RequestHandler):
addApiView('', index)
@coroutine
def get(self, route, *args, **kwargs):
route = route.strip('/')
if not api.get(route):
self.write('API call doesn\'t seem to exist')
return
api_locks[route].acquire()
try:
kwargs = {}
for x in self.request.arguments:
kwargs[x] = urllib.unquote(self.get_argument(x))
# Split array arguments
kwargs = getParams(kwargs)
# Remove t random string
try: del kwargs['t']
except: pass
# Add async callback handler
@run_async
def run_handler(callback):
try:
result = api[route](**kwargs)
callback(result)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'})
result = yield tornado.gen.Task(run_handler)
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
api_locks[route].release()
def addApiView(route, func, static = False, docs = None, **kwargs):
if static: func(route)
else:
api[route] = func
api_locks[route] = threading.Lock()
if docs:
api_docs[route[4:] if route[0:4] == 'api.' else route] = docs
else:
api_docs_missing.append(route)

View File

@@ -1,6 +1,5 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.request import jsonified
from couchpotato.core.helpers.variable import cleanHost, md5
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -56,8 +55,12 @@ class Core(Plugin):
if not Env.get('desktop'):
self.signalHandler()
# Set default urlopen timeout
import socket
socket.setdefaulttimeout(30)
def md5Password(self, value):
return md5(value.encode(Env.get('encoding'))) if value else ''
return md5(value) if value else ''
def checkApikey(self, value):
return value if value and len(value) > 3 else uuid4().hex
@@ -68,12 +71,12 @@ class Core(Plugin):
return True
def available(self):
return jsonified({
def available(self, **kwargs):
return {
'success': True
})
}
def shutdown(self):
def shutdown(self, **kwargs):
if self.shutdown_started:
return False
@@ -83,7 +86,7 @@ class Core(Plugin):
return 'shutdown'
def restart(self):
def restart(self, **kwargs):
if self.shutdown_started:
return False
@@ -125,7 +128,7 @@ class Core(Plugin):
time.sleep(1)
log.debug('Save to shutdown/restart')
log.debug('Safe to shutdown/restart')
try:
IOLoop.current().stop()
@@ -156,10 +159,10 @@ class Core(Plugin):
host = 'localhost'
port = Env.setting('port')
return '%s:%d%s' % (cleanHost(host).rstrip('/'), int(port), '/' + Env.setting('url_base').lstrip('/') if Env.setting('url_base') else '')
return '%s:%d%s' % (cleanHost(host).rstrip('/'), int(port), Env.get('web_base'))
def createApiUrl(self):
return '%s/api/%s' % (self.createBaseUrl(), Env.setting('api_key'))
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def version(self):
ver = fireEvent('updater.info', single = True)
@@ -170,10 +173,10 @@ class Core(Plugin):
return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash'])
def versionView(self):
return jsonified({
def versionView(self, **kwargs):
return {
'version': self.version()
})
}
def signalHandler(self):
if Env.get('daemonized'): return

View File

@@ -4,9 +4,11 @@ from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from minify.cssmin import cssmin
from minify.jsmin import jsmin
import cssprefixer
from tornado.web import StaticFileHandler
import os
import re
import traceback
log = CPLog(__name__)
@@ -32,6 +34,8 @@ class ClientScript(Plugin):
'scripts/library/question.js',
'scripts/library/scrollspy.js',
'scripts/library/spin.js',
'scripts/library/Array.stableSort.js',
'scripts/library/async.js',
'scripts/couchpotato.js',
'scripts/api.js',
'scripts/library/history.js',
@@ -79,7 +83,7 @@ class ClientScript(Plugin):
for static_type in self.core_static:
for rel_path in self.core_static.get(static_type):
file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path)
core_url = 'api/%s/static/%s?%s' % (Env.setting('api_key'), rel_path, tryInt(os.path.getmtime(file_path)))
core_url = 'static/%s' % rel_path
if static_type == 'script':
self.registerScript(core_url, file_path, position = 'front')
@@ -89,6 +93,13 @@ class ClientScript(Plugin):
def minify(self):
# Create cache dir
cache = Env.get('cache_dir')
parent_dir = os.path.join(cache, 'minified')
self.makeDir(parent_dir)
Env.get('app').add_handlers(".*$", [(Env.get('web_base') + 'minified/(.*)', StaticFileHandler, {'path': parent_dir})])
for file_type in ['style', 'script']:
ext = 'js' if file_type is 'script' else 'css'
positions = self.paths.get(file_type, {})
@@ -99,8 +110,8 @@ class ClientScript(Plugin):
def _minify(self, file_type, files, position, out):
cache = Env.get('cache_dir')
out_name = 'minified_' + out
out = os.path.join(cache, out_name)
out_name = out
out = os.path.join(cache, 'minified', out_name)
raw = []
for file_path in files:
@@ -109,7 +120,8 @@ class ClientScript(Plugin):
if file_type == 'script':
data = jsmin(f)
else:
data = cssprefixer.process(f, debug = False, minify = True)
data = self.prefix(f)
data = cssmin(data)
data = data.replace('../images/', '../static/images/')
data = data.replace('../fonts/', '../static/fonts/')
data = data.replace('../../static/', '../static/') # Replace inside plugins
@@ -119,17 +131,17 @@ class ClientScript(Plugin):
# Combine all files together with some comments
data = ''
for r in raw:
data += self.comment.get(file_type) % (r.get('file'), r.get('date'))
data += self.comment.get(file_type) % (ss(r.get('file')), r.get('date'))
data += r.get('data') + '\n\n'
self.createFile(out, ss(data.strip()))
self.createFile(out, data.strip())
if not self.minified.get(file_type):
self.minified[file_type] = {}
if not self.minified[file_type].get(position):
self.minified[file_type][position] = []
minified_url = 'api/%s/file.cache/%s?%s' % (Env.setting('api_key'), out_name, tryInt(os.path.getmtime(out)))
minified_url = 'minified/%s?%s' % (out_name, tryInt(os.path.getmtime(out)))
self.minified[file_type][position].append(minified_url)
def getStyles(self, *args, **kwargs):
@@ -163,6 +175,8 @@ class ClientScript(Plugin):
def register(self, api_path, file_path, type, location):
api_path = '%s?%s' % (api_path, tryInt(os.path.getmtime(file_path)))
if not self.urls[type].get(location):
self.urls[type][location] = []
self.urls[type][location].append(api_path)
@@ -170,3 +184,28 @@ class ClientScript(Plugin):
if not self.paths[type].get(location):
self.paths[type][location] = []
self.paths[type][location].append(file_path)
prefix_properties = ['border-radius', 'transform', 'transition', 'box-shadow']
prefix_tags = ['ms', 'moz', 'webkit']
def prefix(self, data):
trimmed_data = re.sub('(\t|\n|\r)+', '', data)
new_data = ''
colon_split = trimmed_data.split(';')
for splt in colon_split:
curl_split = splt.strip().split('{')
for curly in curl_split:
curly = curly.strip()
for prop in self.prefix_properties:
if curly[:len(prop) + 1] == prop + ':':
for tag in self.prefix_tags:
new_data += ' -%s-%s; ' % (tag, curly)
new_data += curly + (' { ' if len(curl_split) > 1 else ' ')
new_data += '; '
new_data = new_data.replace('{ ;', '; ').replace('} ;', '} ')
return new_data

View File

@@ -31,13 +31,13 @@ class Scheduler(Plugin):
pass
def doShutdown(self):
super(Scheduler, self).doShutdown()
self.stop()
return super(Scheduler, self).doShutdown()
def stop(self):
if self.started:
log.debug('Stopping scheduler')
self.sched.shutdown()
self.sched.shutdown(wait = False)
log.debug('Scheduler stopped')
self.started = False

View File

@@ -1,7 +1,6 @@
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.request import jsonified
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
@@ -33,10 +32,10 @@ class Updater(Plugin):
else:
self.updater = SourceUpdater()
addEvent('app.load', self.autoUpdate)
addEvent('app.load', self.setCrons)
addEvent('updater.info', self.info)
addApiView('updater.info', self.getInfo, docs = {
addApiView('updater.info', self.info, docs = {
'desc': 'Get updater information',
'return': {
'type': 'object',
@@ -62,7 +61,7 @@ class Updater(Plugin):
self.autoUpdate() # Check after enabling
def autoUpdate(self):
if self.check() and self.conf('automatic') and not self.updater.update_failed:
if self.isEnabled() and self.check() and self.conf('automatic') and not self.updater.update_failed:
if self.updater.doUpdate():
# Notify before restarting
@@ -80,31 +79,30 @@ class Updater(Plugin):
return False
def check(self):
if self.isDisabled():
def check(self, force = False):
if not force and self.isDisabled():
return
if self.updater.check():
if not self.available_notified and self.conf('notification') and not self.conf('automatic'):
fireEvent('updater.available', message = 'A new update is available', data = self.updater.info())
info = self.updater.info()
version_date = datetime.fromtimestamp(info['update_version']['date'])
fireEvent('updater.available', message = 'A new update with hash "%s" is available, this version is from %s' % (info['update_version']['hash'], version_date), data = info)
self.available_notified = True
return True
return False
def info(self):
def info(self, **kwargs):
return self.updater.info()
def getInfo(self):
return jsonified(self.updater.info())
def checkView(self):
return jsonified({
'update_available': self.check(),
def checkView(self, **kwargs):
return {
'update_available': self.check(force = True),
'info': self.updater.info()
})
}
def doUpdateView(self):
def doUpdateView(self, **kwargs):
self.check()
if not self.updater.update_version:
@@ -119,9 +117,9 @@ class Updater(Plugin):
if not success:
success = True
return jsonified({
return {
'success': success
})
}
class BaseUpdater(Plugin):
@@ -134,13 +132,11 @@ class BaseUpdater(Plugin):
update_failed = False
update_version = None
last_check = 0
auto_register_static = False
def doUpdate(self):
pass
def getInfo(self):
return jsonified(self.info())
def info(self):
return {
'last_check': self.last_check,
@@ -187,9 +183,6 @@ class GitUpdater(BaseUpdater):
def doUpdate(self):
try:
log.debug('Stashing local changes')
self.repo.saveStash()
log.info('Updating to latest version')
self.repo.pull()
@@ -279,6 +272,7 @@ class SourceUpdater(BaseUpdater):
if download_data.get('type') == 'zip':
zip = zipfile.ZipFile(destination)
zip.extractall(extracted_path)
zip.close()
else:
tar = tarfile.open(destination)
tar.extractall(path = extracted_path)
@@ -301,6 +295,7 @@ class SourceUpdater(BaseUpdater):
def replaceWith(self, path):
app_dir = ss(Env.get('app_dir'))
data_dir = ss(Env.get('data_dir'))
# Get list of files we want to overwrite
self.deletePyc()
@@ -332,12 +327,15 @@ class SourceUpdater(BaseUpdater):
log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc()))
return False
if Env.get('app_dir') not in Env.get('data_dir'):
for still_exists in existing_files:
try:
os.remove(still_exists)
except:
log.error('Failed removing non-used file: %s', traceback.format_exc())
for still_exists in existing_files:
if data_dir in still_exists:
continue
try:
os.remove(still_exists)
except:
log.error('Failed removing non-used file: %s', traceback.format_exc())
return True

View File

@@ -24,7 +24,7 @@ var UpdaterBase = new Class({
self.doUpdate();
else {
App.unBlockPage();
App.fireEvent('message', 'No updates available');
App.on('message', 'No updates available');
}
}
})

View File

@@ -1,26 +0,0 @@
from couchpotato.core.helpers.variable import md5
from couchpotato.environment import Env
from flask import request, Response
from functools import wraps
def check_auth(username, password):
return username == Env.setting('username') and password == Env.setting('password')
def authenticate():
return Response(
'This is not the page you are looking for. *waves hand*', 401,
{'WWW-Authenticate': 'Basic realm="CouchPotato Login"'}
)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = getattr(request, 'authorization')
if Env.setting('username') and Env.setting('password'):
if (not auth or not check_auth(auth.username.decode('latin1'), md5(auth.password.decode('latin1').encode(Env.get('encoding'))))):
return authenticate()
return f(*args, **kwargs)
return decorated

View File

@@ -1,4 +1,4 @@
config = {
config = [{
'name': 'download_providers',
'groups': [
{
@@ -10,4 +10,4 @@ config = {
'options': [],
},
],
}
}]

View File

@@ -11,12 +11,13 @@ log = CPLog(__name__)
class Downloader(Provider):
type = []
protocol = []
http_time_between_calls = 0
status_support = True
torrent_sources = [
'http://torrage.com/torrent/%s.torrent',
'http://torcache.net/torrent/%s.torrent',
'https://torcache.net/torrent/%s.torrent',
]
torrent_trackers = [
@@ -36,48 +37,74 @@ class Downloader(Provider):
def __init__(self):
addEvent('download', self._download)
addEvent('download.enabled', self._isEnabled)
addEvent('download.enabled_types', self.getEnabledDownloadType)
addEvent('download.enabled_protocols', self.getEnabledProtocol)
addEvent('download.status', self._getAllDownloadStatus)
addEvent('download.remove_failed', self._removeFailed)
addEvent('download.pause', self._pause)
addEvent('download.process_complete', self._processComplete)
def getEnabledDownloadType(self):
for download_type in self.type:
if self.isEnabled(manual = True, data = {'type': download_type}):
return self.type
def getEnabledProtocol(self):
for download_protocol in self.protocol:
if self.isEnabled(manual = True, data = {'protocol': download_protocol}):
return self.protocol
return []
def _download(self, data = {}, movie = {}, manual = False, filedata = None):
def _download(self, data = None, media = None, manual = False, filedata = None):
if not media: media = {}
if not data: data = {}
if self.isDisabled(manual, data):
return
return self.download(data = data, movie = movie, filedata = filedata)
return self.download(data = data, media = media, filedata = filedata)
def _getAllDownloadStatus(self):
def _getAllDownloadStatus(self, download_ids):
if self.isDisabled(manual = True, data = {}):
return
return self.getAllDownloadStatus()
ids = [download_id['id'] for download_id in download_ids if download_id['downloader'] == self.getName()]
def getAllDownloadStatus(self):
return
if ids:
return self.getAllDownloadStatus(ids)
else:
return
def _removeFailed(self, item):
def getAllDownloadStatus(self, ids):
return []
def _removeFailed(self, release_download):
if self.isDisabled(manual = True, data = {}):
return
if self.conf('delete_failed', default = True):
return self.removeFailed(item)
if release_download and release_download.get('downloader') == self.getName():
if self.conf('delete_failed'):
return self.removeFailed(release_download)
return False
def removeFailed(self, item):
return False
return
def isCorrectType(self, item_type):
is_correct = item_type in self.type
def removeFailed(self, release_download):
return
def _processComplete(self, release_download):
if self.isDisabled(manual = True, data = {}):
return
if release_download and release_download.get('downloader') == self.getName():
if self.conf('remove_complete', default = False):
return self.processComplete(release_download = release_download, delete_files = self.conf('delete_files', default = False))
return False
return
def processComplete(self, release_download, delete_files):
return
def isCorrectProtocol(self, protocol):
is_correct = protocol in self.protocol
if not is_correct:
log.debug("Downloader doesn't support this type")
log.debug("Downloader doesn't support this protocol")
return is_correct
@@ -101,31 +128,50 @@ class Downloader(Provider):
except:
log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source))
log.error('Failed converting magnet url to torrent: %s', (torrent_hash))
log.error('Failed converting magnet url to torrent: %s', torrent_hash)
return False
def downloadReturnId(self, download_id):
return {
'downloader': self.getName(),
'status_support': self.status_support,
'id': download_id
}
def isDisabled(self, manual, data):
def isDisabled(self, manual = False, data = None):
if not data: data = {}
return not self.isEnabled(manual, data)
def _isEnabled(self, manual, data = {}):
def _isEnabled(self, manual, data = None):
if not data: data = {}
if not self.isEnabled(manual, data):
return
return True
def isEnabled(self, manual, data = {}):
def isEnabled(self, manual = False, data = None):
if not data: data = {}
d_manual = self.conf('manual', default = False)
return super(Downloader, self).isEnabled() and \
((d_manual and manual) or (d_manual is False)) and \
(not data or self.isCorrectType(data.get('type')))
(d_manual and manual or d_manual is False) and \
(not data or self.isCorrectProtocol(data.get('protocol')))
def _pause(self, release_download, pause = True):
if self.isDisabled(manual = True, data = {}):
return
class StatusList(list):
if release_download and release_download.get('downloader') == self.getName():
self.pause(release_download, pause)
return True
return False
def pause(self, release_download, pause):
return
class ReleaseDownloadList(list):
provider = None
@@ -134,7 +180,7 @@ class StatusList(list):
self.provider = provider
self.kwargs = kwargs
super(StatusList, self).__init__()
super(ReleaseDownloadList, self).__init__()
def extend(self, results):
for r in results:
@@ -142,7 +188,7 @@ class StatusList(list):
def append(self, result):
new_result = self.fillResult(result)
super(StatusList, self).append(new_result)
super(ReleaseDownloadList, self).append(new_result)
def fillResult(self, result):
@@ -151,6 +197,7 @@ class StatusList(list):
'status': 'busy',
'downloader': self.provider.getName(),
'folder': '',
'files': '',
}
return mergeDicts(defaults, result)

View File

@@ -13,7 +13,7 @@ config = [{
'list': 'download_providers',
'name': 'blackhole',
'label': 'Black hole',
'description': 'Download the NZB/Torrent to a specific folder.',
'description': 'Download the NZB/Torrent to a specific folder. <em>Note: Seeding and copying/linking features do <strong>not</strong> work with Black hole</em>.',
'wizard': True,
'options': [
{
@@ -35,6 +35,13 @@ config = [{
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
},
{
'name': 'create_subdir',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Create a sub directory when saving the .nzb (or .torrent).',
},
{
'name': 'manual',
'default': 0,

View File

@@ -7,22 +7,26 @@ import traceback
log = CPLog(__name__)
class Blackhole(Downloader):
type = ['nzb', 'torrent', 'torrent_magnet']
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('type'))
log.error('No directory set for blackhole %s download.', data.get('protocol'))
else:
try:
if not filedata or len(filedata) < 50:
try:
if data.get('type') == 'torrent_magnet':
if data.get('protocol') == 'torrent_magnet':
filedata = self.magnetToTorrent(data.get('url'))
data['type'] = 'torrent'
data['protocol'] = 'torrent'
except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
@@ -30,18 +34,28 @@ class Blackhole(Downloader):
log.error('No nzb/torrent available: %s', data.get('url'))
return False
fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name)
if self.conf('create_subdir'):
try:
new_path = os.path.splitext(full_path)[0]
if not os.path.exists(new_path):
os.makedirs(new_path)
full_path = os.path.join(new_path, file_name)
except:
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try:
if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.', (data.get('type'), fullPath))
with open(fullPath, 'wb') as f:
if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f:
f.write(filedata)
os.chmod(fullPath, Env.getPermission('file'))
return True
os.chmod(full_path, Env.getPermission('file'))
return self.downloadReturnId('')
else:
log.info('File %s already exists.', fullPath)
return True
log.info('File %s already exists.', full_path)
return self.downloadReturnId('')
except:
log.error('Failed to download to blackhole %s', traceback.format_exc())
@@ -53,20 +67,21 @@ class Blackhole(Downloader):
return False
def getEnabledDownloadType(self):
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledDownloadType()
return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual, data = {}):
for_type = ['both']
if data and 'torrent' in data.get('type'):
for_type.append('torrent')
def isEnabled(self, manual = False, data = None):
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_type.append(data.get('type'))
for_protocol.append(data.get('protocol'))
return super(Blackhole, self).isEnabled(manual, data) and \
((self.conf('use_for') in for_type))
((self.conf('use_for') in for_protocol))

View File

@@ -0,0 +1,90 @@
from .main import Deluge
def start():
return Deluge()
config = [{
'name': 'deluge',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'deluge',
'label': 'Deluge',
'description': 'Use <a href="http://www.deluge-torrent.org/" target="_blank">Deluge</a> to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'host',
'default': 'localhost:58846',
'description': 'Hostname with port. Usually <strong>localhost:58846</strong>',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Deluge download directory.',
},
{
'name': 'completed_directory',
'type': 'directory',
'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.',
'advanced': True,
},
{
'name': 'label',
'description': 'Label to add to torrents in the Deluge UI.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Remove the torrent from Deluge after it has finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]

View File

@@ -0,0 +1,275 @@
from base64 import b64encode, b16encode, b32decode
from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryFloat
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
from synchronousdeluge import DelugeClient
import os.path
import re
import traceback
log = CPLog(__name__)
class Deluge(Downloader):
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
drpc = None
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.drpc:
self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return self.drpc
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Set parameters for Deluge
options = {
'add_paused': self.conf('paused', default = 0),
'label': self.conf('label')
}
if self.conf('directory'):
if os.path.isdir(self.conf('directory')):
options['download_location'] = self.conf('directory')
else:
log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if self.conf('completed_directory'):
if os.path.isdir(self.conf('completed_directory')):
options['move_completed'] = 1
options['move_completed_path'] = self.conf('completed_directory')
else:
log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if data.get('seed_ratio'):
options['stop_at_ratio'] = 1
options['stop_ratio'] = tryFloat(data.get('seed_ratio'))
# Deluge only has seed time as a global option. Might be added in
# in a future API release.
# if data.get('seed_time'):
# Send request to Deluge
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, media)
remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent:
log.error('Failed sending torrent to Deluge')
return False
log.info('Torrent sent to Deluge successfully.')
return self.downloadReturnId(remote_torrent)
def getAllDownloadStatus(self, ids):
log.debug('Checking Deluge download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents(ids)
if not queue:
log.debug('Nothing in queue or error')
return []
for torrent_id in queue:
torrent = queue[torrent_id]
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
if torrent['is_seed'] and tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio']):
# We have torrent['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio.
# See above comment in download().
status = 'seeding'
elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
status = 'completed'
download_dir = sp(torrent['save_path'])
if torrent['move_on_completed']:
download_dir = torrent['move_completed_path']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
release_downloads.append({
'id': torrent['hash'],
'name': torrent['name'],
'status': status,
'original_status': torrent['state'],
'seed_ratio': torrent['ratio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])),
'files': '|'.join(torrent_files),
})
return release_downloads
def pause(self, release_download, pause = True):
if pause:
return self.drpc.pause_torrent([release_download['id']])
else:
return self.drpc.resume_torrent([release_download['id']])
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.drpc.remove_torrent(release_download['id'], True)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files)
class DelugeRPC(object):
host = 'localhost'
port = 58846
username = None
password = None
client = None
def __init__(self, host = 'localhost', port = 58846, username = None, password = None):
super(DelugeRPC, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
def connect(self):
self.client = DelugeClient()
self.client.connect(self.host, int(self.port), self.username, self.password)
def add_torrent_magnet(self, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options).get()
if not torrent_id:
torrent_id = self._check_torrent(True, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err:
log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def add_torrent_file(self, filename, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get()
if not torrent_id:
torrent_id = self._check_torrent(False, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err:
log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def get_alltorrents(self, ids):
ret = False
try:
self.connect()
ret = self.client.core.get_torrents_status({'id': ids}, {}).get()
except Exception, err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def pause_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.pause_torrent(torrent_ids).get()
except Exception, err:
log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def resume_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.resume_torrent(torrent_ids).get()
except Exception, err:
log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def remove_torrent(self, torrent_id, remove_local_data):
ret = False
try:
self.connect()
ret = self.client.core.remove_torrent(torrent_id, remove_local_data).get()
except Exception, err:
log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def disconnect(self):
self.client.disconnect()
def _check_torrent(self, magnet, torrent):
# Torrent not added, check if it already existed.
if magnet:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0]
else:
info = bdecode(torrent)["info"]
torrent_hash = sha1(benc(info)).hexdigest()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
torrent_hash = torrent_hash.lower()
torrent_check = self.client.core.get_torrent_status(torrent_hash, {}).get()
if torrent_check['hash']:
return torrent_hash
return False

View File

@@ -12,6 +12,7 @@ config = [{
'name': 'nzbget',
'label': 'NZBGet',
'description': 'Use <a href="http://nzbget.sourceforge.net/Main_Page" target="_blank">NZBGet</a> to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',
@@ -24,6 +25,13 @@ config = [{
'default': 'localhost:6789',
'description': 'Hostname with port. Usually <strong>localhost:6789</strong>',
},
{
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'username',
'default': 'nzbget',
@@ -42,6 +50,7 @@ config = [{
},
{
'name': 'priority',
'advanced': True,
'default': '0',
'type': 'dropdown',
'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100)],
@@ -57,6 +66,7 @@ config = [{
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},

View File

@@ -1,6 +1,6 @@
from base64 import standard_b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import ss, sp
from couchpotato.core.helpers.variable import tryInt, md5
from couchpotato.core.logger import CPLog
from datetime import timedelta
@@ -12,13 +12,16 @@ import xmlrpclib
log = CPLog(__name__)
class NZBGet(Downloader):
type = ['nzb']
protocol = ['nzb']
url = 'http://%(username)s:%(password)s@%(host)s/xmlrpc'
url = '%(protocol)s://%(username)s:%(password)s@%(host)s/xmlrpc'
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
if not filedata:
log.error('Unable to get NZB file: %s', traceback.format_exc())
@@ -26,13 +29,13 @@ class NZBGet(Downloader):
log.info('Sending "%s" to NZBGet.', data.get('name'))
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))
url = self.url % {'protocol': 'https' if self.conf('ssl') else 'http', 'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
nzb_name = ss('%s.nzb' % self.createNzbName(data, media))
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
log.info('Successfully connected to NZBGet')
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
@@ -64,27 +67,27 @@ class NZBGet(Downloader):
log.error('NZBGet could not add %s to the queue.', nzb_name)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking NZBGet download status.')
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
url = self.url % {'protocol': 'https' if self.conf('ssl') else 'http', 'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to check status'):
log.info('Successfully connected to NZBGet')
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return False
return []
except xmlrpclib.ProtocolError, e:
if e.errcode == 401:
log.error('Password is incorrect.')
else:
log.error('Protocol Error: %s', e)
return False
return []
# Get NZBGet data
try:
@@ -94,60 +97,72 @@ class NZBGet(Downloader):
history = rpc.history()
except:
log.error('Failed getting data: %s', traceback.format_exc(1))
return False
return []
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
for item in groups:
log.debug('Found %s in NZBGet download queue', item['NZBFilename'])
for nzb in groups:
try:
nzb_id = [param['Value'] for param in item['Parameters'] if param['Name'] == 'couchpotato'][0]
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = item['NZBID']
statuses.append({
'id': nzb_id,
'name': item['NZBFilename'],
'original_status': 'DOWNLOADING' if item['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': str(timedelta(seconds = item['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20)) if item['ActiveDownloads'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']) else -1,
})
nzb_id = nzb['NZBID']
for item in queue: # 'Parameters' is not passed in rpc.postqueue
log.debug('Found %s in NZBGet postprocessing queue', item['NZBFilename'])
statuses.append({
'id': item['NZBID'],
'name': item['NZBFilename'],
'original_status': item['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
if nzb_id in ids:
log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
timeleft = -1
try:
if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except:
pass
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft,
})
for item in history:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (item['NZBFilename'] , item['ParStatus'], item['ScriptStatus'] , item['Log']))
for nzb in queue: # 'Parameters' is not passed in rpc.postqueue
if nzb['NZBID'] in ids:
log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
release_downloads.append({
'id': nzb['NZBID'],
'name': nzb['NZBFilename'],
'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
for nzb in history:
try:
nzb_id = [param['Value'] for param in item['Parameters'] if param['Name'] == 'couchpotato'][0]
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = item['NZBID']
statuses.append({
'id': nzb_id,
'name': item['NZBFilename'],
'status': 'completed' if item['ParStatus'] == 'SUCCESS' and item['ScriptStatus'] == 'SUCCESS' else 'failed',
'original_status': item['ParStatus'] + ', ' + item['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['DestDir']
})
nzb_id = nzb['NZBID']
return statuses
if nzb_id in ids:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})
def removeFailed(self, item):
return release_downloads
log.info('%s failed downloading, deleting...', item['name'])
def removeFailed(self, release_download):
url = self.url % {'host': self.conf('host'), 'password': self.conf('password')}
log.info('%s failed downloading, deleting...', release_download['name'])
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to delete some history'):
log.info('Successfully connected to NZBGet')
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
@@ -162,11 +177,16 @@ class NZBGet(Downloader):
try:
history = rpc.history()
nzb_id = None
path = None
for hist in history:
if hist['Parameters'] and hist['Parameters']['couchpotato'] and hist['Parameters']['couchpotato'] == item['id']:
nzb_id = hist['ID']
path = hist['DestDir']
if rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
for param in hist['Parameters']:
if param['Name'] == 'couchpotato' and param['Value'] == release_download['id']:
nzb_id = hist['ID']
path = hist['DestDir']
if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
shutil.rmtree(path, True)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))

View File

@@ -38,6 +38,7 @@ config = [{
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},

View File

@@ -1,6 +1,6 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from urllib2 import URLError
@@ -8,65 +8,72 @@ from uuid import uuid4
import hashlib
import httplib
import json
import os
import socket
import ssl
import sys
import time
import traceback
import urllib2
log = CPLog(__name__)
class NZBVortex(Downloader):
type = ['nzb']
protocol = ['nzb']
api_level = None
session_id = None
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, movie)
self.call('nzb/add', params = {'file': (ss(nzb_filename), filedata)}, multipart = True)
nzb_filename = self.createFileName(data, filedata, media)
self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
time.sleep(10)
raw_statuses = self.call('nzb')
nzb_id = [item['id'] for item in raw_statuses.get('nzbs', []) if item['name'] == nzb_filename][0]
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if os.path.basename(item['nzbFileName']) == nzb_filename][0]
return self.downloadReturnId(nzb_id)
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
raw_statuses = self.call('nzb')
statuses = StatusList(self)
for item in raw_statuses.get('nzbs', []):
release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []):
if nzb['id'] in ids:
# Check status
status = 'busy'
if item['state'] == 20:
status = 'completed'
elif item['state'] in [21, 22, 24]:
status = 'failed'
# Check status
status = 'busy'
if nzb['state'] == 20:
status = 'completed'
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
release_downloads.append({
'id': nzb['id'],
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
'timeleft':-1,
'folder': sp(nzb['destinationPath']),
})
statuses.append({
'id': item['id'],
'name': item['uiTitle'],
'status': status,
'original_status': item['state'],
'timeleft': -1,
'folder': item['destinationPath'],
})
return release_downloads
return statuses
def removeFailed(self, release_download):
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
log.info('%s failed downloading, deleting...', release_download['name'])
try:
self.call('nzb/%s/cancel' % item['id'])
self.call('nzb/%s/cancel' % release_download['id'])
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
@@ -96,9 +103,10 @@ class NZBVortex(Downloader):
return False
def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs):
def call(self, call, parameters = None, repeat = False, auth = True, *args, **kwargs):
# Login first
if not parameters: parameters = {}
if not self.session_id and auth:
self.login()
@@ -109,10 +117,9 @@ class NZBVortex(Downloader):
params = tryUrlencode(parameters)
url = cleanHost(self.conf('host')) + 'api/' + call
url_opener = urllib2.build_opener(HTTPSHandler())
try:
data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs)
data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
if data:
return json.loads(data)
@@ -121,7 +128,7 @@ class NZBVortex(Downloader):
# Try login and do again
if not repeat:
self.login()
return self.call(call, parameters = parameters, repeat = True, *args, **kwargs)
return self.call(call, parameters = parameters, repeat = True, **kwargs)
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
except:
@@ -134,10 +141,9 @@ class NZBVortex(Downloader):
if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
url_opener = urllib2.build_opener(HTTPSHandler())
try:
data = self.urlopen(url, opener = url_opener, show_error = False)
data = self.urlopen(url, show_error = False)
self.api_level = float(json.loads(data).get('apilevel'))
except URLError, e:
if hasattr(e, 'code') and e.code == 403:
@@ -147,7 +153,8 @@ class NZBVortex(Downloader):
return self.api_level
def isEnabled(self, manual, data):
def isEnabled(self, manual = False, data = None):
if not data: data = {}
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()

View File

@@ -6,12 +6,16 @@ import traceback
log = CPLog(__name__)
class Pneumatic(Downloader):
type = ['nzb']
protocol = ['nzb']
strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
status_support = False
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
if not directory or not os.path.isdir(directory):
@@ -22,15 +26,15 @@ class Pneumatic(Downloader):
log.error('No nzb available!')
return False
fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
fullPath = os.path.join(directory, self.createFileName(data, filedata, media))
try:
if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.', (data.get('type'), fullPath))
log.info('Downloading %s to %s.', (data.get('protocol'), fullPath))
with open(fullPath, 'wb') as f:
f.write(filedata)
nzb_name = self.createNzbName(data, movie)
nzb_name = self.createNzbName(data, media)
strm_path = os.path.join(directory, nzb_name)
strm_file = open(strm_path + '.strm', 'wb')
@@ -38,11 +42,11 @@ class Pneumatic(Downloader):
strm_file.write(strmContent)
strm_file.close()
return True
return self.downloadReturnId('')
else:
log.info('File %s already exists.', fullPath)
return True
return self.downloadReturnId('')
except:
log.error('Failed to download .strm: %s', traceback.format_exc())

View File

@@ -0,0 +1,78 @@
from .main import rTorrent
def start():
return rTorrent()
config = [{
'name': 'rtorrent',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'rtorrent',
'label': 'rTorrent',
'description': '',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'url',
'default': 'http://localhost:80/RPC2',
'description': 'XML-RPC Endpoint URI. Usually <strong>scgi://localhost:5000</strong> '
'or <strong>http://localhost:80/RPC2</strong>'
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'label',
'description': 'Label to apply on added torrents.',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default rTorrent download directory.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'default': False,
'advanced': True,
'type': 'bool',
'description': 'Remove the torrent after it finishes seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]

View File

@@ -0,0 +1,229 @@
from base64 import b16encode, b32decode
from bencode import bencode, bdecode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
from rtorrent import RTorrent
from rtorrent.err import MethodError
import os
log = CPLog(__name__)
class rTorrent(Downloader):
protocol = ['torrent', 'torrent_magnet']
rt = None
def connect(self):
# Already connected?
if self.rt is not None:
return self.rt
# Ensure url is set
if not self.conf('url'):
log.error('Config properties are not filled in correctly, url is missing.')
return False
if self.conf('username') and self.conf('password'):
self.rt = RTorrent(
self.conf('url'),
self.conf('username'),
self.conf('password')
)
else:
self.rt = RTorrent(self.conf('url'))
return self.rt
def _update_provider_group(self, name, data):
if data.get('seed_time'):
log.info('seeding time ignored, not supported')
if not name:
return False
if not self.connect():
return False
views = self.rt.get_views()
if name not in views:
self.rt.create_group(name)
group = self.rt.get_group(name)
try:
if data.get('seed_ratio'):
ratio = int(float(data.get('seed_ratio')) * 100)
log.debug('Updating provider ratio to %s, group name: %s', (ratio, name))
# Explicitly set all group options to ensure it is setup correctly
group.set_upload('1M')
group.set_min(ratio)
group.set_max(ratio)
group.set_command('d.stop')
group.enable()
else:
# Reset group action and disable it
group.set_command()
group.disable()
except MethodError, err:
log.error('Unable to set group options: %s', err.msg)
return False
return True
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.debug('Sending "%s" to rTorrent.', (data.get('name')))
if not self.connect():
return False
group_name = 'cp_' + data.get('provider').lower()
if not self._update_provider_group(group_name, data):
return False
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Try download magnet torrents
if data.get('protocol') == 'torrent_magnet':
filedata = self.magnetToTorrent(data.get('url'))
if filedata is False:
return False
data['protocol'] = 'torrent'
info = bdecode(filedata)["info"]
torrent_hash = sha1(bencode(info)).hexdigest().upper()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to rTorrent
try:
# Send torrent to rTorrent
torrent = self.rt.load_torrent(filedata)
if not torrent:
log.error('Unable to find the torrent, did it fail to load?')
return False
# Set label
if self.conf('label'):
torrent.set_custom(1, self.conf('label'))
if self.conf('directory'):
torrent.set_directory(self.conf('directory'))
# Set Ratio Group
torrent.set_visible(group_name)
# Start torrent
if not self.conf('paused', default = 0):
torrent.start()
return self.downloadReturnId(torrent_hash)
except Exception, err:
log.error('Failed to send torrent to rTorrent: %s', err)
return False
def getAllDownloadStatus(self, ids):
log.debug('Checking rTorrent download status.')
if not self.connect():
return []
try:
torrents = self.rt.get_torrents()
release_downloads = ReleaseDownloadList(self)
for torrent in torrents:
if torrent.info_hash in ids:
torrent_files = []
for file_item in torrent.get_files():
torrent_files.append(sp(os.path.join(torrent.directory, file_item.path)))
status = 'busy'
if torrent.complete:
if torrent.active:
status = 'seeding'
else:
status = 'completed'
release_downloads.append({
'id': torrent.info_hash,
'name': torrent.name,
'status': status,
'seed_ratio': torrent.ratio,
'original_status': torrent.state,
'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
'folder': sp(torrent.directory),
'files': '|'.join(torrent_files)
})
return release_downloads
except Exception, err:
log.error('Failed to get status from rTorrent: %s', err)
return []
def pause(self, release_download, pause = True):
if not self.connect():
return False
torrent = self.rt.find_torrent(release_download['id'])
if torrent is None:
return False
if pause:
return torrent.pause()
return torrent.resume()
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.processComplete(release_download, delete_files = True)
def processComplete(self, release_download, delete_files):
log.debug('Requesting rTorrent to remove the torrent %s%s.',
(release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
torrent = self.rt.find_torrent(release_download['id'])
if torrent is None:
return False
if delete_files:
for file_item in torrent.get_files(): # will only delete files, not dir/sub-dir
os.unlink(os.path.join(torrent.directory, file_item.path))
if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
# Remove empty directories bottom up
try:
for path, _, _ in os.walk(torrent.directory, topdown = False):
os.rmdir(path)
except OSError:
log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)
torrent.erase() # just removes the torrent, doesn't delete data
return True

View File

@@ -11,7 +11,7 @@ config = [{
'list': 'download_providers',
'name': 'sabnzbd',
'label': 'Sabnzbd',
'description': 'Use <a href="http://sabnzbd.org/" target="_blank">SABnzbd</a> to download NZBs.',
'description': 'Use <a href="http://sabnzbd.org/" target="_blank">SABnzbd</a> (0.7+) to download NZBs.',
'wizard': True,
'options': [
{
@@ -34,6 +34,15 @@ config = [{
'label': 'Category',
'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>',
},
{
'name': 'priority',
'label': 'Priority',
'type': 'dropdown',
'default': '0',
'advanced': True,
'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)],
'description': 'Add to the queue with this priority.',
},
{
'name': 'manual',
'default': False,
@@ -41,9 +50,18 @@ config = [{
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'remove_complete',
'advanced': True,
'label': 'Remove NZB',
'default': False,
'type': 'bool',
'description': 'Remove the NZB from history after it completed.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},

View File

@@ -1,43 +1,49 @@
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import os
import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
protocol = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
'nzbname': self.createNzbName(data, media),
'priority': self.conf('priority'),
}
nzb_filename = None
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
log.error('No proper nzb available: %s', filedata)
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
nzb_filename = self.createFileName(data, filedata, media)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
if nzb_filename and req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)})
else:
sab_data = self.call(req_params)
except URLError:
@@ -58,7 +64,7 @@ class Sabnzbd(Downloader):
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking SABnzbd download status.')
@@ -69,7 +75,7 @@ class Sabnzbd(Downloader):
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
return []
# Go through history items
try:
@@ -79,52 +85,80 @@ class Sabnzbd(Downloader):
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
return []
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
for nzb in queue.get('slots', []):
if nzb['nzo_id'] in ids:
status = 'busy'
if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['filename'],
'status': status,
'original_status': nzb['status'],
'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for item in history.get('slots', []):
for nzb in history.get('slots', []):
if nzb['nzo_id'] in ids:
status = 'busy'
if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed'
elif nzb['status'] == 'Completed':
status = 'completed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['name'],
'status': status,
'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
})
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
status = 'completed'
return release_downloads
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
})
def removeFailed(self, release_download):
return statuses
log.info('%s failed downloading, deleting...', release_download['name'])
def removeFailed(self, item):
try:
self.call({
'mode': 'queue',
'name': 'delete',
'del_files': '1',
'value': release_download['id']
}, use_json = False)
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': release_download['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
log.info('%s failed downloading, deleting...', item['name'])
return True
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting SabNZBd to remove the NZB %s.', release_download['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
'del_files': '0',
'value': release_download['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
log.error('Failed removing: %s', traceback.format_exc(0))
return False
return True

View File

@@ -18,7 +18,7 @@ config = [{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
'radio_group': 'nzb,torrent',
},
{
'name': 'host',
@@ -32,6 +32,13 @@ config = [{
'name': 'password',
'type': 'password',
},
{
'name': 'use_for',
'label': 'Use for',
'default': 'both',
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
},
{
'name': 'manual',
'default': 0,

View File

@@ -1,22 +1,24 @@
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.logger import CPLog
import httplib
import json
import urllib
import urllib2
import requests
import traceback
log = CPLog(__name__)
class Synology(Downloader):
type = ['torrent_magnet']
log = CPLog(__name__)
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data, movie, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.error('Sending "%s" (%s) to Synology.', (data.get('name'), data.get('type')))
response = False
log.error('Sending "%s" (%s) to Synology.', (data['name'], data['protocol']))
# Load host from config and split out port.
host = self.conf('host').split(':')
@@ -24,24 +26,47 @@ class Synology(Downloader):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if data.get('type') == 'torrent':
log.error('Can\'t add binary torrent file')
return False
try:
# Send request to Transmission
# Send request to Synology
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
remote_torrent = srpc.add_torrent_uri(data.get('url'))
log.info('Response: %s', remote_torrent)
return remote_torrent['success']
except Exception, err:
log.error('Exception while adding torrent: %s', err)
return False
if data['protocol'] == 'torrent_magnet':
log.info('Adding torrent URL %s', data['url'])
response = srpc.create_task(url = data['url'])
elif data['protocol'] in ['nzb', 'torrent']:
log.info('Adding %s' % data['protocol'])
if not filedata:
log.error('No %s data found', data['protocol'])
else:
filename = data['name'] + '.' + data['protocol']
response = srpc.create_task(filename = filename, filedata = filedata)
except:
log.error('Exception while adding torrent: %s', traceback.format_exc())
finally:
return self.downloadReturnId('') if response else False
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':
return super(Synology, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual = False, data = None):
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_protocol.append(data.get('protocol'))
return super(Synology, self).isEnabled(manual, data) and\
((self.conf('use_for') in for_protocol))
class SynologyRPC(object):
'''SynologyRPC lite library'''
"""SynologyRPC lite library"""
def __init__(self, host = 'localhost', port = 5000, username = None, password = None):
@@ -58,11 +83,13 @@ class SynologyRPC(object):
args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2,
'method': 'login', 'session': self.session_name, 'format': 'sid'}
response = self._req(self.auth_url, args)
if response['success'] == True:
if response['success']:
self.sid = response['data']['sid']
log.debug('Sid=%s', self.sid)
return response
elif self.username or self.password:
log.debug('sid=%s', self.sid)
else:
log.error('Couldn\'t login to Synology, %s', response)
return response['success']
else:
log.error('User or password missing, not using authentication.')
return False
@@ -70,36 +97,51 @@ class SynologyRPC(object):
args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid}
return self._req(self.auth_url, args)
def _req(self, url, args):
req_url = url + '?' + urllib.urlencode(args)
def _req(self, url, args, files = None):
response = {'success': False}
try:
req_open = urllib2.urlopen(req_url)
response = json.loads(req_open.read())
if response['success'] == True:
req = requests.post(url, data = args, files = files)
req.raise_for_status()
response = json.loads(req.text)
if response['success']:
log.info('Synology action successfull')
return response
except httplib.InvalidURL, err:
log.error('Invalid Transmission host, check your config %s', err)
return False
except urllib2.HTTPError, err:
except requests.ConnectionError, err:
log.error('Synology connection error, check your config %s', err)
except requests.HTTPError, err:
log.error('SynologyRPC HTTPError: %s', err)
return False
except urllib2.URLError, err:
log.error('Unable to connect to Synology %s', err)
return False
except Exception, err:
log.error('Exception: %s', err)
finally:
return response
def add_torrent_uri(self, torrent):
log.info('Adding torrent URL %s', torrent)
response = {}
def create_task(self, url = None, filename = None, filedata = None):
""" Creates new download task in Synology DownloadStation. Either specify
url or pair (filename, filedata).
Returns True if task was created, False otherwise
"""
result = False
# login
login = self._login()
if len(login) > 0 and login['success'] == True:
log.info('Login success, adding torrent')
args = {'api':'SYNO.DownloadStation.Task', 'version':1, 'method':'create', 'uri':torrent, '_sid':self.sid}
response = self._req(self.download_url, args)
if self._login():
args = {'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'_sid': self.sid}
if url:
log.info('Login success, adding torrent URI')
args['uri'] = url
response = self._req(self.download_url, args = args)
log.info('Response: %s', response)
result = response['success']
elif filename and filedata:
log.info('Login success, adding torrent')
files = {'file': (filename, filedata)}
response = self._req(self.download_url, args = args, files = files)
log.info('Response: %s', response)
result = response['success']
else:
log.error('Invalid use of SynologyRPC.create_task: either url or filename+filedata must be specified')
self._logout()
else:
log.error('Couldn\'t login to Synology, %s', login)
return response
return result

View File

@@ -25,6 +25,13 @@ config = [{
'default': 'localhost:9091',
'description': 'Hostname with port. Usually <strong>localhost:9091</strong>',
},
{
'name': 'rpc_url',
'type': 'string',
'default': 'transmission',
'advanced': True,
'description': 'Change if you don\'t run Transmission RPC at the default url.',
},
{
'name': 'username',
},
@@ -32,30 +39,33 @@ config = [{
'name': 'password',
'type': 'password',
},
{
'name': 'paused',
'type': 'bool',
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Transmission download directory.',
},
{
'name': 'ratio',
'default': 10,
'type': 'float',
'name': 'remove_complete',
'label': 'Remove torrent',
'default': True,
'advanced': True,
'description': 'Stop transfer when reaching ratio',
'type': 'bool',
'description': 'Remove the torrent from Transmission after it finished seeding.',
},
{
'name': 'ratiomode',
'default': 0,
'type': 'int',
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': '0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.',
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
@@ -64,6 +74,20 @@ config = [{
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'stalled_as_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Consider a stalled torrent as failed',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],

View File

@@ -1,15 +1,13 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
import httplib
import json
import os.path
import re
import shutil
import traceback
import urllib2
log = CPLog(__name__)
@@ -17,154 +15,144 @@ log = CPLog(__name__)
class Transmission(Downloader):
type = ['torrent', 'torrent_magnet']
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
trpc = None
def download(self, data, movie, filedata = None):
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type')))
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Set parameters for Transmission
params = {
'paused': self.conf('paused', default = 0),
}
if not self.trpc:
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url'), username = self.conf('username'), password = self.conf('password'))
if len(self.conf('directory', default = '')) > 0:
folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1]
folder_path = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
return self.trpc
# Create the empty folder to download too
self.makeDir(folder_path)
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
params['download-dir'] = folder_path
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol')))
torrent_params = {}
if self.conf('ratio'):
torrent_params = {
'seedRatioLimit': self.conf('ratio'),
'seedRatioMode': self.conf('ratiomode')
}
if not self.connect():
return False
if not filedata and data.get('type') == 'torrent':
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Send request to Transmission
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
if data.get('type') == 'torrent_magnet':
remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
# Set parameters for adding torrent
params = {
'paused': self.conf('paused', default = False)
}
if self.conf('directory'):
if os.path.isdir(self.conf('directory')):
params['download-dir'] = self.conf('directory')
else:
remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params)
log.error('Download directory from Transmission settings: %s doesn\'t exist', self.conf('directory'))
if not remote_torrent:
return False
# Change parameters of torrent
torrent_params = {}
if data.get('seed_ratio'):
torrent_params['seedRatioLimit'] = tryFloat(data.get('seed_ratio'))
torrent_params['seedRatioMode'] = 1
# Change settings of added torrents
elif torrent_params:
trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
if data.get('seed_time'):
torrent_params['seedIdleLimit'] = tryInt(data.get('seed_time')) * 60
torrent_params['seedIdleMode'] = 1
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
except:
log.error('Failed to change settings for transfer: %s', traceback.format_exc())
# Send request to Transmission
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
else:
remote_torrent = self.trpc.add_torrent_file(b64encode(filedata), arguments = params)
if not remote_torrent:
log.error('Failed sending torrent to Transmission')
return False
def getAllDownloadStatus(self):
# Change settings of added torrents
if torrent_params:
self.trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
def getAllDownloadStatus(self, ids):
log.debug('Checking Transmission download status.')
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.connect():
return []
# Go through Queue
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isFinished', 'downloadDir', 'uploadRatio']
}
queue = trpc.get_alltorrents(return_params)
release_downloads = ReleaseDownloadList(self)
except Exception, err:
log.error('Failed getting queue: %s', err)
return False
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files']
}
statuses = StatusList(self)
queue = self.trpc.get_alltorrents(return_params)
if not (queue and queue.get('torrents')):
log.debug('Nothing in queue or error')
return []
# Get torrents status
# CouchPotato Status
#status = 'busy'
#status = 'failed'
#status = 'completed'
# Transmission Status
#status = 0 => "Torrent is stopped"
#status = 1 => "Queued to check files"
#status = 2 => "Checking files"
#status = 3 => "Queued to download"
#status = 4 => "Downloading"
#status = 4 => "Queued to seed"
#status = 6 => "Seeding"
#To do :
# add checking file
# manage no peer in a range time => fail
for item in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / confRatio=%s / isFinished=%s', (item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], self.conf('ratio'), item['isFinished']))
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if (item['percentDone'] * 100) >= 100 and (item['status'] == 6 or item['status'] == 0) and item['uploadRatio'] > self.conf('ratio'):
try:
trpc.stop_torrent(item['hashString'], {})
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'completed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': os.path.join(item['downloadDir'], item['name']),
})
except Exception, err:
log.error('Failed to stop and remove torrent "%s" with error: %s', (item['name'], err))
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'failed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
})
else:
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'busy',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = item['eta'])), # Is ETA in seconds??
for torrent in queue['torrents']:
if torrent['hashString'] in ids:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s',
(torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished']))
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(torrent['downloadDir'], file_item['name'])))
status = 'busy'
if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'
release_downloads.append({
'id': torrent['hashString'],
'name': torrent['name'],
'status': status,
'original_status': torrent['status'],
'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(torrent['downloadDir'] if len(torrent_files) == 1 else os.path.join(torrent['downloadDir'], torrent['name'])),
'files': '|'.join(torrent_files)
})
return statuses
return release_downloads
def pause(self, release_download, pause = True):
if pause:
return self.trpc.stop_torrent(release_download['id'])
else:
return self.trpc.start_torrent(release_download['id'])
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.trpc.remove_torrent(release_download['id'], True)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.trpc.remove_torrent(release_download['id'], delete_files)
class TransmissionRPC(object):
"""TransmissionRPC lite library"""
def __init__(self, host = 'localhost', port = 9091, username = None, password = None):
def __init__(self, host = 'localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
super(TransmissionRPC, self).__init__()
self.url = 'http://' + host + ':' + str(port) + '/transmission/rpc'
self.url = 'http://' + host + ':' + str(port) + '/' + rpc_url + '/rpc'
self.tag = 0
self.session_id = 0
self.session = {}
@@ -188,7 +176,7 @@ class TransmissionRPC(object):
log.debug('request: %s', json.dumps(ojson))
log.debug('response: %s', json.dumps(response))
if response['result'] == 'success':
log.debug('Transmission action successfull')
log.debug('Transmission action successful')
return response['arguments']
else:
log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
@@ -240,13 +228,15 @@ class TransmissionRPC(object):
post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag}
return self._request(post_data)
def stop_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-stop', 'tag': self.tag}
def stop_torrent(self, torrent_id):
post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-stop', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, remove_local_data, arguments):
arguments['ids'] = torrent_id
arguments['delete-local-data'] = remove_local_data
post_data = {'arguments': arguments, 'method': 'torrent-remove', 'tag': self.tag}
def start_torrent(self, torrent_id):
post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-start', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, delete_local_data):
post_data = {'arguments': {'ids': torrent_id, 'delete-local-data': delete_local_data}, 'method': 'torrent-remove', 'tag': self.tag}
return self._request(post_data)

View File

@@ -11,7 +11,7 @@ config = [{
'list': 'download_providers',
'name': 'utorrent',
'label': 'uTorrent',
'description': 'Use <a href="http://www.utorrent.com/" target="_blank">uTorrent</a> to download torrents.',
'description': 'Use <a href="http://www.utorrent.com/" target="_blank">uTorrent</a> (3.0+) to download torrents.',
'wizard': True,
'options': [
{
@@ -36,9 +36,26 @@ config = [{
'name': 'label',
'description': 'Label to add torrent as.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Remove the torrent from uTorrent after it finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
@@ -49,6 +66,13 @@ config = [{
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],

View File

@@ -1,166 +1,202 @@
from base64 import b16encode, b32decode
from bencode import bencode, bdecode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt, ss
from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, ss, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
from multipartpost import MultipartPostHandler
from datetime import timedelta
import os
import cookielib
import httplib
import json
import os
import re
import stat
import time
import urllib
import urllib2
log = CPLog(__name__)
class uTorrent(Downloader):
type = ['torrent', 'torrent_magnet']
protocol = ['torrent', 'torrent_magnet']
utorrent_api = None
status_flags = {
'STARTED' : 1,
'CHECKING' : 2,
'CHECK-START' : 4,
'CHECKED' : 8,
'ERROR' : 16,
'PAUSED' : 32,
'QUEUED' : 64,
'LOADED' : 128
}
def download(self, data, movie, filedata = None):
log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('type')))
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return self.utorrent_api
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.debug("Sending '%s' (%s) to uTorrent.", (data.get('name'), data.get('protocol')))
if not self.connect():
return False
settings = self.utorrent_api.get_settings()
if not settings:
return False
#Fix settings in case they are not set for CPS compatibility
new_settings = {}
if not (settings.get('seed_prio_limitul') == 0 and settings['seed_prio_limitul_flag']):
new_settings['seed_prio_limitul'] = 0
new_settings['seed_prio_limitul_flag'] = True
log.info('Updated uTorrent settings to set a torrent to complete after it the seeding requirements are met.')
if settings.get('bt.read_only_on_complete'): #This doesn't work as this option seems to be not available through the api. Mitigated with removeReadOnly function
new_settings['bt.read_only_on_complete'] = False
log.info('Updated uTorrent settings to not set the files to read only after completing.')
if new_settings:
self.utorrent_api.set_settings(new_settings)
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
if not filedata and data.get('type') == 'torrent':
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
if data.get('type') == 'torrent_magnet':
if data.get('protocol') == 'torrent_magnet':
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers)
else:
info = bdecode(filedata)["info"]
torrent_hash = sha1(bencode(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, movie)
info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, media)
if data.get('seed_ratio'):
torrent_params['seed_override'] = 1
torrent_params['seed_ratio'] = tryInt(tryFloat(data['seed_ratio']) * 1000)
if data.get('seed_time'):
torrent_params['seed_override'] = 1
torrent_params['seed_time'] = tryInt(data['seed_time']) * 3600
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to uTorrent
try:
if not self.utorrent_api:
self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
if data.get('protocol') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'))
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata)
if data.get('type') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(data.get('url'))
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata)
# Change settings of added torrent
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
# Change settings of added torrents
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
return self.downloadReturnId(torrent_hash)
except Exception, err:
log.error('Failed to send torrent to uTorrent: %s', err)
return False
return self.downloadReturnId(torrent_hash)
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking uTorrent download status.')
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.connect():
return []
try:
self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
except Exception, err:
log.error('Failed to get uTorrent object: %s', err)
return False
release_downloads = ReleaseDownloadList(self)
data = ''
try:
data = self.utorrent_api.get_status()
queue = json.loads(data)
if queue.get('error'):
log.error('Error getting data from uTorrent: %s', queue.get('error'))
return False
data = self.utorrent_api.get_status()
if not data:
log.error('Error getting data from uTorrent')
return []
except Exception, err:
log.error('Failed to get status from uTorrent: %s', err)
return False
queue = json.loads(data)
if queue.get('error'):
log.error('Error getting data from uTorrent: %s', queue.get('error'))
return []
if queue.get('torrents', []) == []:
if not queue.get('torrents'):
log.debug('Nothing in queue')
return False
statuses = StatusList(self)
download_folder = ''
settings_dict = {}
try:
data = self.utorrent_api.get_settings()
utorrent_settings = json.loads(data)
# Create settings dict
for item in utorrent_settings['settings']:
if item[1] == 0: # int
settings_dict[item[0]] = int(item[2] if not item[2].strip() == '' else '0')
elif item[1] == 1: # bool
settings_dict[item[0]] = True if item[2] == 'true' else False
elif item[1] == 2: # string
settings_dict[item[0]] = item[2]
log.debug('uTorrent settings: %s', settings_dict)
# Get the download path from the uTorrent settings
if settings_dict['dir_completed_download_flag']:
download_folder = settings_dict['dir_completed_download']
elif settings_dict['dir_active_download_flag']:
download_folder = settings_dict['dir_active_download']
else:
log.info('No download folder set in uTorrent. Please set a download folder')
return False
except Exception, err:
log.error('Failed to get settings from uTorrent: %s', err)
return False
return []
# Get torrents
for item in queue.get('torrents', []):
for torrent in queue['torrents']:
if torrent[0] in ids:
# item[21] = Paused | Downloading | Seeding | Finished
status = 'busy'
if item[21] == 'Finished' or item[21] == 'Seeding':
status = 'completed'
#Get files of the torrent
torrent_files = []
try:
torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
except:
log.debug('Failed getting files from torrent: %s', torrent[2])
status = 'busy'
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & self.status_flags['ERROR']):
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
if not status == 'busy':
self.removeReadOnly(torrent_files)
release_downloads.append({
'id': torrent[0],
'name': torrent[2],
'status': status,
'seed_ratio': float(torrent[7]) / 1000,
'original_status': torrent[1],
'timeleft': str(timedelta(seconds = torrent[10])),
'folder': sp(torrent[26]),
'files': '|'.join(torrent_files)
})
if settings_dict['dir_add_label']:
release_folder = os.path.join(download_folder, item[11], item[2])
else:
release_folder = os.path.join(download_folder, item[2])
return release_downloads
statuses.append({
'id': item[0],
'name': item[2],
'status': status,
'original_status': item[1],
'timeleft': str(timedelta(seconds = item[10])),
'folder': release_folder,
})
def pause(self, release_download, pause = True):
if not self.connect():
return False
return self.utorrent_api.pause_torrent(release_download['id'], pause)
return statuses
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
if not self.connect():
return False
return self.utorrent_api.remove_torrent(release_download['id'], remove_data = True)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting uTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
return self.utorrent_api.remove_torrent(release_download['id'], remove_data = delete_files)
def removeReadOnly(self, files):
#Removes all read-on ly flags in a for all files
for filepath in files:
if os.path.isfile(filepath):
#Windows only needs S_IWRITE, but we bitwise-or with current perms to preserve other permission bits on Linux
os.chmod(filepath, stat.S_IWRITE | os.stat(filepath).st_mode)
class uTorrentAPI(object):
@@ -187,7 +223,7 @@ class uTorrentAPI(object):
if time.time() > self.last_time + 1800:
self.last_time = time.time()
self.token = self.get_token()
request = urllib2.Request(self.url + "?token=" + self.token + "&" + action, data)
request = urllib2.Request(self.url + '?token=' + self.token + '&' + action, data)
try:
open_request = self.opener.open(request)
response = open_request.read()
@@ -207,32 +243,82 @@ class uTorrentAPI(object):
return False
def get_token(self):
request = self.opener.open(self.url + "token.html")
token = re.findall("<div.*?>(.*?)</", request.read())[0]
request = self.opener.open(self.url + 'token.html')
token = re.findall('<div.*?>(.*?)</', request.read())[0]
return token
def add_torrent_uri(self, torrent):
action = "action=add-url&s=%s" % urllib.quote(torrent)
def add_torrent_uri(self, filename, torrent, add_folder = False):
action = 'action=add-url&s=%s' % urllib.quote(torrent)
if add_folder:
action += '&path=%s' % urllib.quote(filename)
return self._request(action)
def add_torrent_file(self, filename, filedata):
action = "action=add-file"
return self._request(action, {"torrent_file": (ss(filename), filedata)})
def add_torrent_file(self, filename, filedata, add_folder = False):
action = 'action=add-file'
if add_folder:
action += '&path=%s' % urllib.quote(filename)
return self._request(action, {'torrent_file': (ss(filename), filedata)})
def set_torrent(self, hash, params):
action = "action=setprops&hash=%s" % hash
action = 'action=setprops&hash=%s' % hash
for k, v in params.iteritems():
action += "&s=%s&v=%s" % (k, v)
action += '&s=%s&v=%s' % (k, v)
return self._request(action)
def pause_torrent(self, hash):
action = "action=pause&hash=%s" % hash
def pause_torrent(self, hash, pause = True):
if pause:
action = 'action=pause&hash=%s' % hash
else:
action = 'action=unpause&hash=%s' % hash
return self._request(action)
def stop_torrent(self, hash):
action = 'action=stop&hash=%s' % hash
return self._request(action)
def remove_torrent(self, hash, remove_data = False):
if remove_data:
action = 'action=removedata&hash=%s' % hash
else:
action = 'action=remove&hash=%s' % hash
return self._request(action)
def get_status(self):
action = "list=1"
action = 'list=1'
return self._request(action)
def get_settings(self):
action = "action=getsettings"
action = 'action=getsettings'
settings_dict = {}
try:
utorrent_settings = json.loads(self._request(action))
# Create settings dict
for setting in utorrent_settings['settings']:
if setting[1] == 0: # int
settings_dict[setting[0]] = int(setting[2] if not setting[2].strip() == '' else '0')
elif setting[1] == 1: # bool
settings_dict[setting[0]] = True if setting[2] == 'true' else False
elif setting[1] == 2: # string
settings_dict[setting[0]] = setting[2]
#log.debug('uTorrent settings: %s', settings_dict)
except Exception, err:
log.error('Failed to get settings from uTorrent: %s', err)
return settings_dict
def set_settings(self, settings_dict = None):
if not settings_dict: settings_dict = {}
for key in settings_dict:
if isinstance(settings_dict[key], bool):
settings_dict[key] = 1 if settings_dict[key] else 0
action = 'action=setsetting' + ''.join(['&s=%s&v=%s' % (key, value) for (key, value) in settings_dict.items()])
return self._request(action)
def get_files(self, hash):
action = 'action=getfiles&hash=%s' % hash
return self._request(action)

View File

@@ -21,15 +21,25 @@ def addEvent(name, handler, priority = 100):
def createHandle(*args, **kwargs):
h = None
try:
parent = handler.im_self
bc = hasattr(parent, 'beforeCall')
if bc: parent.beforeCall(handler)
# Open handler
has_parent = hasattr(handler, 'im_self')
parent = None
if has_parent:
parent = handler.im_self
bc = hasattr(parent, 'beforeCall')
if bc: parent.beforeCall(handler)
# Main event
h = runHandler(name, handler, *args, **kwargs)
ac = hasattr(parent, 'afterCall')
if ac: parent.afterCall(handler)
# Close handler
if parent and has_parent:
ac = hasattr(parent, 'afterCall')
if ac: parent.afterCall(handler)
except:
h = runHandler(name, handler, *args, **kwargs)
log.error('Failed creating handler %s %s: %s', (name, handler, traceback.format_exc()))
return h
@@ -43,12 +53,7 @@ def removeEvent(name, handler):
e -= handler
def fireEvent(name, *args, **kwargs):
if not events.get(name): return
e = Event(name = name, threads = 10, asynch = kwargs.get('async', False), exc_info = True, traceback = True, lock = threading.RLock())
for event in events[name]:
e.handle(event['handler'], priority = event['priority'])
if not events.has_key(name): return
#log.debug('Firing event %s', name)
try:
@@ -59,7 +64,6 @@ def fireEvent(name, *args, **kwargs):
'single': False, # Return single handler
'merge': False, # Merge items
'in_order': False, # Fire them in specific order, waits for the other to finish
'async': False
}
# Do options
@@ -70,12 +74,32 @@ def fireEvent(name, *args, **kwargs):
options[x] = val
except: pass
# Make sure only 1 event is fired at a time when order is wanted
kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None
kwargs['event_return_on_result'] = options['single']
if len(events[name]) == 1:
# Fire
result = e(*args, **kwargs)
single = None
try:
single = events[name][0]['handler'](*args, **kwargs)
except:
log.error('Failed running single event: %s', traceback.format_exc())
# Don't load thread for single event
result = {
'single': (single is not None, single),
}
else:
e = Event(name = name, threads = 10, exc_info = True, traceback = True, lock = threading.RLock())
for event in events[name]:
e.handle(event['handler'], priority = event['priority'])
# Make sure only 1 event is fired at a time when order is wanted
kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None
kwargs['event_return_on_result'] = options['single']
# Fire
result = e(*args, **kwargs)
if options['single'] and not options['merge']:
results = None
@@ -133,14 +157,17 @@ def fireEvent(name, *args, **kwargs):
options['on_complete']()
return results
except KeyError, e:
pass
except Exception:
log.error('%s: %s', (name, traceback.format_exc()))
def fireEventAsync(*args, **kwargs):
kwargs['async'] = True
fireEvent(*args, **kwargs)
try:
t = threading.Thread(target = fireEvent, args = args, kwargs = kwargs)
t.setDaemon(True)
t.start()
return True
except Exception, e:
log.error('%s: %s', (args[0], e))
def errorHandler(error):
etype, value, tb = error

View File

@@ -1,6 +1,7 @@
from couchpotato.core.logger import CPLog
from string import ascii_letters, digits
from urllib import quote_plus
import os
import re
import traceback
import unicodedata
@@ -11,7 +12,8 @@ log = CPLog(__name__)
def toSafeString(original):
valid_chars = "-_.() %s%s" % (ascii_letters, digits)
cleanedFilename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore')
return ''.join(c for c in cleanedFilename if c in valid_chars)
valid_string = ''.join(c for c in cleanedFilename if c in valid_chars)
return ' '.join(valid_string.split())
def simplifyString(original):
string = stripAccents(original.lower())
@@ -37,8 +39,39 @@ def toUnicode(original, *args):
return toUnicode(ascii_text)
def ss(original, *args):
from couchpotato.environment import Env
return toUnicode(original, *args).encode(Env.get('encoding'))
u_original = toUnicode(original, *args)
try:
from couchpotato.environment import Env
return u_original.encode(Env.get('encoding'))
except Exception, e:
log.debug('Failed ss encoding char, force UTF8: %s', e)
return u_original.encode('UTF-8')
def sp(path, *args):
# Standardise encoding, normalise case, path and strip trailing '/' or '\'
if not path or len(path) == 0:
return path
# convert windows path (from remote box) to *nix path
if os.path.sep == '/' and '\\' in path:
path = '/' + path.replace(':', '').replace('\\', '/')
path = os.path.normcase(os.path.normpath(ss(path, *args)))
# Remove any trailing path separators
if path != os.path.sep:
path = path.rstrip(os.path.sep)
# Add a trailing separator in case it is a root folder on windows (crashes guessit)
if len(path) == 2 and path[1] == ':':
path = path + os.path.sep
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
path = re.sub('^//', '/', path)
return path
def ek(original, *args):
if isinstance(original, (str, unicode)):
@@ -62,7 +95,7 @@ def stripAccents(s):
def tryUrlencode(s):
new = u''
if isinstance(s, (dict)):
if isinstance(s, dict):
for key, value in s.iteritems():
new += u'&%s=%s' % (key, tryUrlencode(value))

View File

@@ -1,18 +1,14 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import natcmp
from flask.globals import current_app
from flask.helpers import json, make_response
from urllib import unquote
from werkzeug.urls import url_decode
import flask
import re
def getParams():
params = url_decode(getattr(flask.request, 'environ').get('QUERY_STRING', ''))
def getParams(params):
reg = re.compile('^[a-z0-9_\.]+$')
current = temp = {}
temp = {}
for param, value in sorted(params.iteritems()):
nest = re.split("([\[\]]+)", param)
@@ -36,6 +32,8 @@ def getParams():
current = current[item]
else:
temp[param] = toUnicode(unquote(value))
if temp[param].lower() in ['true', 'false']:
temp[param] = temp[param].lower() != 'false'
return dictToList(temp)
@@ -54,29 +52,3 @@ def dictToList(params):
new = params
return new
def getParam(attr, default = None):
try:
return getParams().get(attr, default)
except:
return default
def padded_jsonify(callback, *args, **kwargs):
content = str(callback) + '(' + json.dumps(dict(*args, **kwargs)) + ')'
return getattr(current_app, 'response_class')(content, mimetype = 'text/javascript')
def jsonify(mimetype, *args, **kwargs):
content = json.dumps(dict(*args, **kwargs))
return getattr(current_app, 'response_class')(content, mimetype = mimetype)
def jsonified(*args, **kwargs):
callback = getParam('callback_func', None)
if callback:
content = padded_jsonify(callback, *args, **kwargs)
else:
content = jsonify('application/json', *args, **kwargs)
response = make_response(content)
response.cache_control.no_cache = True
return response

View File

@@ -6,7 +6,7 @@ log = CPLog(__name__)
class RSS(object):
def getTextElements(self, xml, path):
''' Find elements and return tree'''
""" Find elements and return tree"""
textelements = []
try:
@@ -28,7 +28,7 @@ class RSS(object):
return elements
def getElement(self, xml, path):
''' Find element and return text'''
""" Find element and return text"""
try:
return xml.find(path)
@@ -36,7 +36,7 @@ class RSS(object):
return
def getTextElement(self, xml, path):
''' Find element and return text'''
""" Find element and return text"""
try:
return xml.find(path).text

View File

@@ -1,7 +1,8 @@
from couchpotato.core.helpers.encoding import simplifyString, toSafeString
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.logger import CPLog
import collections
import hashlib
import os.path
import os
import platform
import random
import re
@@ -10,6 +11,9 @@ import sys
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[','[[').replace(']','[]]').replace('[[','[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
@@ -101,11 +105,16 @@ def flattenList(l):
return l
def md5(text):
return hashlib.md5(text).hexdigest()
return hashlib.md5(ss(text)).hexdigest()
def sha1(text):
return hashlib.sha1(text).hexdigest()
def isLocalIP(ip):
ip = ip.lstrip('htps:/')
regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
def getExt(filename):
return os.path.splitext(filename)[1][1:]
@@ -113,12 +122,17 @@ def cleanHost(host):
if not host.startswith(('http://', 'https://')):
host = 'http://' + host
if not host.endswith('/'):
host += '/'
host = host.rstrip('/')
host += '/'
return host
def getImdb(txt, check_inside = True, multiple = False):
def getImdb(txt, check_inside = False, multiple = False):
if not check_inside:
txt = simplifyString(txt)
else:
txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
@@ -126,21 +140,27 @@ def getImdb(txt, check_inside = True, multiple = False):
output.close()
try:
ids = re.findall('(tt\d{7})', txt)
ids = re.findall('(tt\d{4,7})', txt)
if multiple:
return ids if len(ids) > 0 else []
return ids[0]
return list(set(['tt%07d' % tryInt(x[2:]) for x in ids])) if len(ids) > 0 else []
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
def tryInt(s):
def tryInt(s, default = 0):
try: return int(s)
except: return 0
except: return default
def tryFloat(s):
try: return float(s) if '.' in s else tryInt(s)
try:
if isinstance(s, str):
return float(s) if '.' in s else tryInt(s)
else:
return float(s)
except: return 0
def natsortKey(s):
@@ -149,6 +169,11 @@ def natsortKey(s):
def natcmp(a, b):
return cmp(natsortKey(a), natsortKey(b))
def toIterable(value):
if type(value) in [list, tuple]:
return value
return [value]
def getTitle(library_dict):
try:
try:
@@ -159,8 +184,11 @@ def getTitle(library_dict):
if title.default:
return title.title
except:
log.error('Could not get title for %s', library_dict.identifier)
return None
try:
return library_dict['info']['titles'][0]
except:
log.error('Could not get title for %s', library_dict.identifier)
return None
log.error('Could not get title for %s', library_dict['identifier'])
return None
@@ -170,16 +198,28 @@ def getTitle(library_dict):
def possibleTitles(raw_title):
titles = []
titles = [
toSafeString(raw_title).lower(),
raw_title.lower(),
simplifyString(raw_title)
]
titles.append(toSafeString(raw_title).lower())
titles.append(raw_title.lower())
titles.append(simplifyString(raw_title))
# replace some chars
new_title = raw_title.replace('&', 'and')
titles.append(simplifyString(new_title))
return list(set(titles))
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def splitString(str, split_on = ','):
return [x.strip() for x in str.split(split_on)] if str else []
def splitString(str, split_on = ',', clean = True):
list = [x.strip() for x in str.split(split_on)] if str else []
return filter(None, list) if clean else list
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
def isSubFolder(sub_folder, base_folder):
# Returns True is sub_folder is the same as or in base_folder
return base_folder.rstrip(os.path.sep) + os.path.sep in sub_folder.rstrip(os.path.sep) + os.path.sep

View File

@@ -1,20 +1,19 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.logger import CPLog
import glob
from importlib import import_module
import os
import sys
import traceback
log = CPLog(__name__)
class Loader(object):
class Loader(object):
plugins = {}
providers = {}
modules = {}
def preload(self, root = ''):
core = os.path.join(root, 'couchpotato', 'core')
self.paths = {
@@ -25,13 +24,19 @@ class Loader(object):
}
# Add providers to loader
provider_dir = os.path.join(root, 'couchpotato', 'core', 'providers')
for provider in os.listdir(provider_dir):
path = os.path.join(provider_dir, provider)
if os.path.isdir(path):
self.paths[provider + '_provider'] = (25, 'couchpotato.core.providers.' + provider, path)
self.addPath(root, ['couchpotato', 'core', 'providers'], 25, recursive = False)
# Add media to loader
self.addPath(root, ['couchpotato', 'core', 'media'], 25, recursive = True)
# Add custom plugin folder
from couchpotato.environment import Env
custom_plugin_dir = os.path.join(Env.get('data_dir'), 'custom_plugins')
if os.path.isdir(custom_plugin_dir):
sys.path.insert(0, custom_plugin_dir)
self.paths['custom_plugins'] = (30, '', custom_plugin_dir)
# Loop over all paths and add to module list
for plugin_type, plugin_tuple in self.paths.iteritems():
priority, module, dir_name = plugin_tuple
self.addFromDir(plugin_type, priority, module, dir_name)
@@ -39,11 +44,17 @@ class Loader(object):
def run(self):
did_save = 0
for priority in self.modules:
for priority in sorted(self.modules):
for module_name, plugin in sorted(self.modules[priority].iteritems()):
# Load module
try:
m = getattr(self.loadModule(module_name), plugin.get('name'))
if plugin.get('name')[:2] == '__':
continue
m = self.loadModule(module_name)
if m is None:
continue
log.info('Loading %s: %s', (plugin['type'], plugin['name']))
@@ -53,7 +64,7 @@ class Loader(object):
self.loadPlugins(m, plugin.get('name'))
except ImportError as e:
# todo:: subclass ImportError for missing requirements.
if (e.message.lower().startswith("missing")):
if e.message.lower().startswith("missing"):
log.error(e.message)
pass
# todo:: this needs to be more descriptive.
@@ -65,27 +76,35 @@ class Loader(object):
if did_save:
fireEvent('settings.save')
def addPath(self, root, base_path, priority, recursive = False):
root_path = os.path.join(root, *base_path)
for filename in os.listdir(root_path):
path = os.path.join(root_path, filename)
if os.path.isdir(path) and filename[:2] != '__':
if u'__init__.py' in os.listdir(path):
new_base_path = ''.join(s + '.' for s in base_path) + filename
self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path)
if recursive:
self.addPath(root, base_path + [filename], priority, recursive = True)
def addFromDir(self, plugin_type, priority, module, dir_name):
# Load dir module
try:
m = __import__(module)
splitted = module.split('.')
for sub in splitted[1:]:
m = getattr(m, sub)
if module and len(module) > 0:
self.addModule(priority, plugin_type, module, os.path.basename(dir_name))
if hasattr(m, 'config'):
fireEvent('settings.options', splitted[-1] + '_config', getattr(m, 'config'))
except:
raise
for cur_file in glob.glob(os.path.join(dir_name, '*')):
name = os.path.basename(cur_file)
if os.path.isdir(os.path.join(dir_name, name)):
for name in os.listdir(dir_name):
if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(dir_name, name, '__init__.py')):
module_name = '%s.%s' % (module, name)
self.addModule(priority, plugin_type, module_name, name)
def loadSettings(self, module, name, save = True):
if not hasattr(module, 'config'):
log.debug('Skip loading settings for plugin %s as it has no config section' % module.__file__)
return False
try:
for section in module.config:
fireEvent('settings.options', section['name'], section)
@@ -100,15 +119,14 @@ class Loader(object):
return False
def loadPlugins(self, module, name):
if not hasattr(module, 'start'):
log.debug('Skip startup for plugin %s as it has no start section' % module.__file__)
return False
try:
klass = module.start()
klass.registerPlugin()
if klass and getattr(klass, 'auto_register_static'):
klass.registerStatic(module.__file__)
module.start()
return True
except Exception, e:
except:
log.error('Failed loading plugin "%s": %s', (module.__file__, traceback.format_exc()))
return False
@@ -117,6 +135,7 @@ class Loader(object):
if not self.modules.get(priority):
self.modules[priority] = {}
module = module.lstrip('.')
self.modules[priority][module] = {
'priority': priority,
'module': module,
@@ -126,10 +145,9 @@ class Loader(object):
def loadModule(self, name):
try:
m = __import__(name)
splitted = name.split('.')
for sub in splitted[1:-1]:
m = getattr(m, sub)
return m
return import_module(name)
except ImportError:
log.debug('Skip loading module plugin %s: %s', (name, traceback.format_exc()))
return None
except:
raise

View File

@@ -1,11 +1,10 @@
import logging
import re
import traceback
class CPLog(object):
context = ''
replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key']
replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key', 'passkey']
def __init__(self, context = ''):
if context.endswith('.main'):
@@ -50,8 +49,8 @@ class CPLog(object):
msg = msg % tuple([ss(x) for x in list(replace_tuple)])
else:
msg = msg % ss(replace_tuple)
except:
self.logger.error(u'Failed encoding stuff to log: %s' % traceback.format_exc())
except Exception, e:
self.logger.error(u'Failed encoding stuff to log "%s": %s' % (msg, e))
if not Env.get('dev'):

View File

@@ -0,0 +1,52 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media
class MediaBase(Plugin):
_type = None
default_dict = {
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}, 'files':{}, 'info': {}},
'library': {'titles': {}, 'files':{}},
'files': {},
'status': {},
'category': {},
}
search_dict = mergeDicts({
'library': {
'related_libraries': {},
'root_library': {}
},
}, default_dict)
def initType(self):
addEvent('media.types', self.getType)
def getType(self):
return self._type
def createOnComplete(self, id):
def onComplete():
db = get_session()
media = db.query(Media).filter_by(id = id).first()
fireEventAsync('%s.searcher.single' % media.type, media.to_dict(self.search_dict), on_complete = self.createNotifyFront(id))
db.expire_all()
return onComplete
def createNotifyFront(self, media_id):
def notifyFront():
db = get_session()
media = db.query(Media).filter_by(id = media_id).first()
fireEvent('notify.frontend', type = '%s.update' % media.type, data = media.to_dict(self.default_dict))
db.expire_all()
return notifyFront

View File

@@ -0,0 +1,6 @@
from .main import Library
def start():
return Library()
config = []

View File

@@ -0,0 +1,13 @@
from couchpotato.core.event import addEvent
from couchpotato.core.plugins.base import Plugin
class LibraryBase(Plugin):
_type = None
def initType(self):
addEvent('library.types', self.getType)
def getType(self):
return self._type

View File

@@ -0,0 +1,18 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.media._base.library.base import LibraryBase
class Library(LibraryBase):
def __init__(self):
addEvent('library.title', self.title)
def title(self, library):
return fireEvent(
'library.query',
library,
condense = False,
include_year = False,
include_identifier = False,
single = True
)

View File

@@ -0,0 +1,6 @@
from .main import Matcher
def start():
return Matcher()
config = []

View File

@@ -0,0 +1,84 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class MatcherBase(Plugin):
type = None
def __init__(self):
if self.type:
addEvent('%s.matcher.correct' % self.type, self.correct)
def correct(self, chain, release, media, quality):
raise NotImplementedError()
def flattenInfo(self, info):
# Flatten dictionary of matches (chain info)
if isinstance(info, dict):
return dict([(key, self.flattenInfo(value)) for key, value in info.items()])
# Flatten matches
result = None
for match in info:
if isinstance(match, dict):
if result is None:
result = {}
for key, value in match.items():
if key not in result:
result[key] = []
result[key].append(value)
else:
if result is None:
result = []
result.append(match)
return result
def constructFromRaw(self, match):
if not match:
return None
parts = [
''.join([
y for y in x[1:] if y
]) for x in match
]
return ''.join(parts)[:-1].strip()
def simplifyValue(self, value):
if not value:
return value
if isinstance(value, basestring):
return simplifyString(value)
if isinstance(value, list):
return [self.simplifyValue(x) for x in value]
raise ValueError("Unsupported value type")
def chainMatch(self, chain, group, tags):
info = self.flattenInfo(chain.info[group])
found_tags = []
for tag, accepted in tags.items():
values = [self.simplifyValue(x) for x in info.get(tag, [None])]
if any([val in accepted for val in values]):
found_tags.append(tag)
log.debug('tags found: %s, required: %s' % (found_tags, tags.keys()))
if set(tags.keys()) == set(found_tags):
return True
return all([key in found_tags for key, value in tags.items()])

View File

@@ -0,0 +1,88 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import possibleTitles
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.matcher.base import MatcherBase
from caper import Caper
log = CPLog(__name__)
class Matcher(MatcherBase):
def __init__(self):
super(Matcher, self).__init__()
self.caper = Caper()
addEvent('matcher.parse', self.parse)
addEvent('matcher.match', self.match)
addEvent('matcher.flatten_info', self.flattenInfo)
addEvent('matcher.construct_from_raw', self.constructFromRaw)
addEvent('matcher.correct_title', self.correctTitle)
addEvent('matcher.correct_quality', self.correctQuality)
def parse(self, name, parser='scene'):
return self.caper.parse(name, parser)
def match(self, release, media, quality):
match = fireEvent('matcher.parse', release['name'], single = True)
if len(match.chains) < 1:
log.info2('Wrong: %s, unable to parse release name (no chains)', release['name'])
return False
for chain in match.chains:
if fireEvent('%s.matcher.correct' % media['type'], chain, release, media, quality, single = True):
return chain
return False
def correctTitle(self, chain, media):
root_library = media['library']['root_library']
if 'show_name' not in chain.info or not len(chain.info['show_name']):
log.info('Wrong: missing show name in parsed result')
return False
# Get the lower-case parsed show name from the chain
chain_words = [x.lower() for x in chain.info['show_name']]
# Build a list of possible titles of the media we are searching for
titles = root_library['info']['titles']
# Add year suffix titles (will result in ['<name_one>', '<name_one> <suffix_one>', '<name_two>', ...])
suffixes = [None, root_library['info']['year']]
titles = [
title + ((' %s' % suffix) if suffix else '')
for title in titles
for suffix in suffixes
]
# Check show titles match
# TODO check xem names
for title in titles:
for valid_words in [x.split(' ') for x in possibleTitles(title)]:
if valid_words == chain_words:
return True
return False
def correctQuality(self, chain, quality, quality_map):
if quality['identifier'] not in quality_map:
log.info2('Wrong: unknown preferred quality %s', quality['identifier'])
return False
if 'video' not in chain.info:
log.info2('Wrong: no video tags found')
return False
video_tags = quality_map[quality['identifier']]
if not self.chainMatch(chain, 'video', video_tags):
log.info2('Wrong: %s tags not in chain', video_tags)
return False
return True

View File

@@ -0,0 +1,6 @@
from .main import MediaPlugin
def start():
return MediaPlugin()
config = []

View File

@@ -0,0 +1,458 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import mergeDicts, splitString, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from couchpotato.core.settings.model import Library, LibraryTitle, Release, \
Media
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_, asc, not_, desc
from string import ascii_lowercase
log = CPLog(__name__)
class MediaPlugin(MediaBase):
def __init__(self):
addApiView('media.refresh', self.refresh, docs = {
'desc': 'Refresh a any media type by ID',
'params': {
'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView)
addEvent('app.load', self.addSingleListView)
addEvent('app.load', self.addSingleCharView)
addEvent('app.load', self.addSingleDeleteView)
addEvent('media.get', self.get)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
def refresh(self, id = '', **kwargs):
db = get_session()
for x in splitString(id):
media = db.query(Media).filter_by(id = x).first()
if media:
# Get current selected title
default_title = ''
for title in media.library.titles:
if title.default: default_title = title.title
fireEvent('notify.frontend', type = '%s.busy' % media.type, data = {'id': x})
fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
db.expire_all()
return {
'success': True,
}
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
db = get_session()
imdb_id = getImdb(str(media_id))
if imdb_id:
m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
else:
m = db.query(Media).filter_by(id = media_id).first()
results = None
if m:
results = m.to_dict(self.default_dict)
db.expire_all()
return results
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def list(self, types = None, status = None, release_status = None, limit_offset = None, starts_with = None, search = None, order = None):
db = get_session()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query movie ids
q = db.query(Media) \
.with_entities(Media.id) \
.group_by(Media.id)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.join(Media.releases)
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Release.status_id.in_(statuses))
# Filter on type
if types and len(types) > 0:
try: q = q.filter(Media.type.in_(types))
except: pass
# Only join when searching / ordering
if starts_with or search or order != 'release_order':
q = q.join(Media.library, Library.titles) \
.filter(LibraryTitle.default == True)
# Add search filters
filter_or = []
if starts_with:
starts_with = toUnicode(starts_with.lower())
if starts_with in ascii_lowercase:
filter_or.append(LibraryTitle.simple_title.startswith(starts_with))
else:
ignore = []
for letter in ascii_lowercase:
ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter)))
filter_or.append(not_(or_(*ignore)))
if search:
filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%'))
if len(filter_or) > 0:
q = q.filter(or_(*filter_or))
total_count = q.count()
if total_count == 0:
return 0, []
if order == 'release_order':
q = q.order_by(desc(Release.last_edit))
else:
q = q.order_by(asc(LibraryTitle.simple_title))
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
q = q.limit(limit).offset(offset)
# Get all media_ids in sorted order
media_ids = [m.id for m in q.all()]
# List release statuses
releases = db.query(Release) \
.filter(Release.media_id.in_(media_ids)) \
.all()
release_statuses = dict((m, set()) for m in media_ids)
releases_count = dict((m, 0) for m in media_ids)
for release in releases:
release_statuses[release.media_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.media_id] += 1
# Get main movie data
q2 = db.query(Media) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
q2 = q2.filter(Media.id.in_(media_ids))
results = q2.all()
# Create dict by movie id
movie_dict = {}
for movie in results:
movie_dict[movie.id] = movie
# List movies based on media_ids order
movies = []
for media_id in media_ids:
releases = []
for r in release_statuses.get(media_id):
x = splitString(r)
releases.append({'status_id': x[0], 'quality_id': x[1]})
# Merge releases with movie dict
movies.append(mergeDicts(movie_dict[media_id].to_dict({
'library': {'titles': {}, 'files':{}},
'files': {},
}), {
'releases': releases,
'releases_count': releases_count.get(media_id),
}))
db.expire_all()
return total_count, movies
def listView(self, **kwargs):
types = splitString(kwargs.get('types'))
status = splitString(kwargs.get('status'))
release_status = splitString(kwargs.get('release_status'))
limit_offset = kwargs.get('limit_offset')
starts_with = kwargs.get('starts_with')
search = kwargs.get('search')
order = kwargs.get('order')
total_movies, movies = self.list(
types = types,
status = status,
release_status = release_status,
limit_offset = limit_offset,
starts_with = starts_with,
search = search,
order = order
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
def tempList(*args, **kwargs):
return self.listView(types = media_type, *args, **kwargs)
addApiView('%s.list' % media_type, tempList)
def availableChars(self, types = None, status = None, release_status = None):
types = types or []
status = status or []
release_status = release_status or []
db = get_session()
# Make a list from string
if not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
q = db.query(Media)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.join(Media.releases) \
.filter(Release.status_id.in_(statuses))
# Filter on type
if types and len(types) > 0:
try: q = q.filter(Media.type.in_(types))
except: pass
q = q.join(Library, LibraryTitle) \
.with_entities(LibraryTitle.simple_title) \
.filter(LibraryTitle.default == True)
titles = q.all()
chars = set()
for title in titles:
try:
char = title[0][0]
char = char if char in ascii_lowercase else '#'
chars.add(str(char))
except:
log.error('Failed getting title for %s', title.libraries_id)
if len(chars) == 25:
break
db.expire_all()
return ''.join(sorted(chars))
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
def tempChar(*args, **kwargs):
return self.charView(types = media_type, *args, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
db = get_session()
media = db.query(Media).filter_by(id = media_id).first()
if media:
deleted = False
if delete_from == 'all':
db.delete(media)
db.commit()
deleted = True
else:
done_status = fireEvent('status.get', 'done', single = True)
total_releases = len(media.releases)
total_deleted = 0
new_movie_status = None
for release in media.releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.status_id != done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'done'
elif delete_from == 'manage':
if release.status_id == done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'active'
db.commit()
if total_releases == total_deleted:
db.delete(media)
db.commit()
deleted = True
elif new_movie_status:
new_status = fireEvent('status.get', new_movie_status, single = True)
media.profile_id = None
media.status_id = new_status.get('id')
db.commit()
else:
fireEvent('media.restatus', media.id, single = True)
if deleted:
fireEvent('notify.frontend', type = 'movie.deleted', data = media.to_dict())
db.expire_all()
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
def tempDelete(*args, **kwargs):
return self.deleteView(types = media_type, *args, **kwargs)
addApiView('%s.delete' % media_type, tempDelete)
def restatus(self, media_id):
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
db = get_session()
m = db.query(Media).filter_by(id = media_id).first()
if not m or len(m.library.titles) == 0:
log.debug('Can\'t restatus movie, doesn\'t seem to exist.')
return False
log.debug('Changing status for %s', m.library.titles[0].title)
if not m.profile:
m.status_id = done_status.get('id')
else:
move_to_wanted = True
for t in m.profile.types:
for release in m.releases:
if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish):
move_to_wanted = False
m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id')
db.commit()
return True

View File

@@ -0,0 +1,6 @@
from .main import Search
def start():
return Search()
config = []

View File

@@ -0,0 +1,59 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class Search(Plugin):
def __init__(self):
addApiView('search', self.search, docs = {
'desc': 'Search the info in providers for a movie',
'params': {
'q': {'desc': 'The (partial) movie name you want to search for'},
'type': {'desc': 'Search for a specific media type. Leave empty to search all.'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'movies': array,
'show': array,
etc
}"""}
})
addEvent('app.load', self.addSingleSearches)
def search(self, q = '', types = None, **kwargs):
# Make sure types is the correct instance
if isinstance(types, (str, unicode)):
types = [types]
elif isinstance(types, (list, tuple, set)):
types = list(types)
if not types:
result = fireEvent('info.search', q = q, merge = True)
else:
result = {}
for media_type in types:
result[media_type] = fireEvent('%s.search' % media_type)
return mergeDicts({
'success': True,
}, result)
def createSingleSearch(self, media_type):
def singleSearch(q, **kwargs):
return self.search(q, type = media_type, **kwargs)
return singleSearch
def addSingleSearches(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.search' % media_type, self.createSingleSearch(media_type))

View File

@@ -44,9 +44,8 @@
.search_form .input input {
border-radius: 0;
display: block;
width: 100%;
border: 0;
background: rgba(255,255,255,.08);
background: none;
color: #FFF;
font-size: 25px;
height: 100%;
@@ -60,6 +59,11 @@
.search_form.shown .input input {
opacity: 1;
}
.search_form input::-ms-clear {
width : 0;
height: 0;
}
@media all and (max-width: 480px) {
.search_form .input input {
@@ -106,7 +110,7 @@
background: #5c697b;
margin: 4px 0 0;
width: 470px;
min-height: 140px;
min-height: 50px;
box-shadow: 0 20px 20px -10px rgba(0,0,0,0.55);
display: none;
}
@@ -125,13 +129,13 @@
overflow-x: hidden;
}
.movie_result {
.media_result {
overflow: hidden;
height: 50px;
position: relative;
}
.movie_result .options {
.media_result .options {
position: absolute;
height: 100%;
top: 0;
@@ -143,46 +147,48 @@
border-radius: 0;
box-shadow: inset 0 1px 8px rgba(0,0,0,0.25);
}
.movie_result .options > .in_library_wanted {
.media_result .options > .in_library_wanted {
margin-top: -7px;
}
.movie_result .options > div {
.media_result .options > div {
border: 0;
}
.movie_result .options .thumbnail {
.media_result .options .thumbnail {
vertical-align: middle;
}
.movie_result .options select {
.media_result .options select {
vertical-align: middle;
display: inline-block;
margin-right: 10px;
}
.movie_result .options select[name=title] { width: 180px; }
.movie_result .options select[name=profile] { width: 90px; }
.media_result .options select[name=title] { width: 170px; }
.media_result .options select[name=profile] { width: 90px; }
.media_result .options select[name=category] { width: 80px; }
@media all and (max-width: 480px) {
.movie_result .options select[name=title] { width: 90px; }
.movie_result .options select[name=profile] { width: 60px; }
.media_result .options select[name=title] { width: 90px; }
.media_result .options select[name=profile] { width: 50px; }
.media_result .options select[name=category] { width: 50px; }
}
.movie_result .options .button {
.media_result .options .button {
vertical-align: middle;
display: inline-block;
}
.movie_result .options .message {
.media_result .options .message {
height: 100%;
font-size: 20px;
color: #fff;
line-height: 20px;
}
.movie_result .data {
.media_result .data {
position: absolute;
height: 100%;
top: 0;
@@ -193,20 +199,20 @@
border-top: 1px solid rgba(255,255,255, 0.08);
transition: all .4s cubic-bezier(0.9,0,0.1,1);
}
.movie_result .data.open {
left: 100%;
.media_result .data.open {
left: 100% !important;
}
.movie_result:last-child .data { border-bottom: 0; }
.media_result:last-child .data { border-bottom: 0; }
.movie_result .in_wanted, .movie_result .in_library {
.media_result .in_wanted, .media_result .in_library {
position: absolute;
bottom: 2px;
left: 14px;
font-size: 11px;
}
.movie_result .thumbnail {
.media_result .thumbnail {
width: 34px;
min-height: 100%;
display: block;
@@ -214,33 +220,58 @@
vertical-align: top;
}
.movie_result .info {
.media_result .info {
position: absolute;
top: 20%;
left: 15px;
right: 60px;
right: 7px;
vertical-align: middle;
}
.movie_result .info h2 {
.media_result .info h2 {
margin: 0;
font-weight: normal;
font-size: 20px;
padding: 0;
}
.search_form .info h2 {
position: absolute;
width: 100%;
}
.media_result .info h2 .title {
display: block;
margin: 0;
text-overflow: ellipsis;
overflow: hidden;
white-space: nowrap;
width: 100%;
}
.movie_result .info h2 span {
padding: 0 5px;
position: absolute;
right: -60px;
}
.search_form .info h2 .title {
position: absolute;
width: 88%;
}
.media_result .info h2 .year {
padding: 0 5px;
text-align: center;
position: absolute;
width: 12%;
right: 0;
}
@media all and (max-width: 480px) {
.search_form .info h2 .year {
font-size: 12px;
margin-top: 7px;
}
}
.search_form .mask,
.movie_result .mask {
.media_result .mask {
position: absolute;
height: 100%;
width: 100%;

View File

@@ -0,0 +1,188 @@
Block.Search = new Class({
Extends: BlockBase,
cache: {},
create: function(){
var self = this;
var focus_timer = 0;
self.el = new Element('div.search_form').adopt(
new Element('div.input').adopt(
self.input = new Element('input', {
'placeholder': 'Search & add a new media',
'events': {
'keyup': self.keyup.bind(self),
'focus': function(){
if(focus_timer) clearTimeout(focus_timer);
self.el.addClass('focused')
if(this.get('value'))
self.hideResults(false)
},
'blur': function(){
focus_timer = (function(){
self.el.removeClass('focused')
}).delay(100);
}
}
}),
new Element('a.icon2', {
'events': {
'click': self.clear.bind(self),
'touchend': self.clear.bind(self)
}
})
),
self.result_container = new Element('div.results_container', {
'tween': {
'duration': 200
},
'events': {
'mousewheel': function(e){
(e).stopPropagation();
}
}
}).adopt(
self.results = new Element('div.results')
)
);
self.mask = new Element('div.mask').inject(self.result_container).fade('hide');
},
clear: function(e){
var self = this;
(e).preventDefault();
if(self.last_q === ''){
self.input.blur()
self.last_q = null;
}
else {
self.last_q = '';
self.input.set('value', '');
self.input.focus()
self.media = {}
self.results.empty()
self.el.removeClass('filled')
}
},
hideResults: function(bool){
var self = this;
if(self.hidden == bool) return;
self.el[bool ? 'removeClass' : 'addClass']('shown');
if(bool){
History.removeEvent('change', self.hideResults.bind(self, !bool));
self.el.removeEvent('outerClick', self.hideResults.bind(self, !bool));
}
else {
History.addEvent('change', self.hideResults.bind(self, !bool));
self.el.addEvent('outerClick', self.hideResults.bind(self, !bool));
}
self.hidden = bool;
},
keyup: function(e){
var self = this;
self.el[self.q() ? 'addClass' : 'removeClass']('filled')
if(self.q() != self.last_q){
if(self.api_request && self.api_request.isRunning())
self.api_request.cancel();
if(self.autocomplete_timer) clearTimeout(self.autocomplete_timer)
self.autocomplete_timer = self.autocomplete.delay(300, self)
}
},
autocomplete: function(){
var self = this;
if(!self.q()){
self.hideResults(true)
return
}
self.list()
},
list: function(){
var self = this,
q = self.q(),
cache = self.cache[q];
self.hideResults(false);
if(!cache){
self.mask.fade('in');
if(!self.spinner)
self.spinner = createSpinner(self.mask);
self.api_request = Api.request('search', {
'data': {
'q': q
},
'onComplete': self.fill.bind(self, q)
})
}
else
self.fill(q, cache)
self.last_q = q;
},
fill: function(q, json){
var self = this;
self.cache[q] = json
self.media = {}
self.results.empty()
Object.each(json, function(media, type){
if(typeOf(media) == 'array'){
Object.each(media, function(m){
var m = new Block.Search[m.type.capitalize() + 'Item'](m);
$(m).inject(self.results)
self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m
if(q == m.imdb)
m.showOptions()
});
}
})
// Calculate result heights
var w = window.getSize(),
rc = self.result_container.getCoordinates();
self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px')
self.mask.fade('out')
},
loading: function(bool){
this.el[bool ? 'addClass' : 'removeClass']('loading')
},
q: function(){
return this.input.get('value').trim();
}
});

View File

@@ -0,0 +1,75 @@
from .main import Searcher
def start():
return Searcher()
config = [{
'name': 'searcher',
'order': 20,
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'label': 'Basics',
'description': 'General search options',
'options': [
{
'name': 'preferred_method',
'label': 'First search',
'description': 'Which of the methods do you prefer',
'default': 'both',
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrents', 'torrent')],
},
],
}, {
'tab': 'searcher',
'subtab': 'category',
'subtab_label': 'Categories',
'name': 'filter',
'label': 'Global filters',
'description': 'Prefer, ignore & required words in release names',
'options': [
{
'name': 'preferred_words',
'label': 'Preferred',
'default': '',
'placeholder': 'Example: CtrlHD, Amiable, Wiki',
'description': 'Words that give the releases a higher score.'
},
{
'name': 'required_words',
'label': 'Required',
'default': '',
'placeholder': 'Example: DTS, AC3 & English',
'description': 'Release should contain at least one set of words. Sets are separated by "," and each word within a set must be separated with "&"'
},
{
'name': 'ignored_words',
'label': 'Ignored',
'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs, vain',
'description': 'Ignores releases that match any of these sets. (Works like explained above)'
},
],
},
],
}, {
'name': 'nzb',
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'label': 'NZB',
'wizard': True,
'options': [
{
'name': 'retention',
'label': 'Usenet Retention',
'default': 1500,
'type': 'int',
'unit': 'days'
},
],
},
],
}]

View File

@@ -0,0 +1,45 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class SearcherBase(Plugin):
in_progress = False
def __init__(self):
super(SearcherBase, self).__init__()
addEvent('searcher.progress', self.getProgress)
addEvent('%s.searcher.progress' % self.getType(), self.getProgress)
self.initCron()
def initCron(self):
""" Set the searcher cronjob
Make sure to reset cronjob after setting has changed
"""
_type = self.getType()
def setCrons():
fireEvent('schedule.cron', '%s.searcher.all' % _type, self.searchAll,
day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
addEvent('app.load', setCrons)
addEvent('setting.save.%s_searcher.cron_day.after' % _type, setCrons)
addEvent('setting.save.%s_searcher.cron_hour.after' % _type, setCrons)
addEvent('setting.save.%s_searcher.cron_minute.after' % _type, setCrons)
def getProgress(self, **kwargs):
""" Return progress of current searcher"""
progress = {
self.getType(): self.in_progress
}
return progress

View File

@@ -0,0 +1,218 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.variable import md5, getTitle, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.settings.model import Media, Release, ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
import datetime
import re
import time
import traceback
log = CPLog(__name__)
class Searcher(SearcherBase):
def __init__(self):
addEvent('searcher.protocols', self.getSearchProtocols)
addEvent('searcher.contains_other_quality', self.containsOtherQuality)
addEvent('searcher.correct_year', self.correctYear)
addEvent('searcher.correct_name', self.correctName)
addEvent('searcher.correct_words', self.correctWords)
addEvent('searcher.search', self.search)
addApiView('searcher.full_search', self.searchAllView, docs = {
'desc': 'Starts a full search for all media',
})
addApiView('searcher.progress', self.getProgressForAll, docs = {
'desc': 'Get the progress of all media searches',
'return': {'type': 'object', 'example': """{
'movie': False || object, total & to_go,
'show': False || object, total & to_go,
}"""},
})
def searchAllView(self):
results = {}
for _type in fireEvent('media.types'):
results[_type] = fireEvent('%s.searcher.all_view' % _type)
return results
def getProgressForAll(self):
progress = fireEvent('searcher.progress', merge = True)
return progress
def search(self, protocols, media, quality):
results = []
for search_protocol in protocols:
protocol_results = fireEvent('provider.search.%s.%s' % (search_protocol, media['type']), media, quality, merge = True)
if protocol_results:
results += protocol_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
download_preference = self.conf('preferred_method', section = 'searcher')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent'))
return sorted_results
def getSearchProtocols(self):
download_protocols = fireEvent('download.enabled_protocols', merge = True)
provider_protocols = fireEvent('provider.enabled_protocols', merge = True)
if download_protocols and len(list(set(provider_protocols) & set(download_protocols))) == 0:
log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_protocols))
return []
for useless_provider in list(set(provider_protocols) - set(download_protocols)):
log.debug('Provider for "%s" enabled, but no downloader.', useless_provider)
search_protocols = download_protocols
if len(search_protocols) == 0:
log.error('There aren\'t any downloaders enabled. Please pick one in settings.')
return []
return search_protocols
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None):
if not preferred_quality: preferred_quality = {}
name = nzb['name']
size = nzb.get('size', 0)
nzb_words = re.split('\W+', simplifyString(name))
qualities = fireEvent('quality.all', single = True)
found = {}
for quality in qualities:
# Main in words
if quality['identifier'] in nzb_words:
found[quality['identifier']] = True
# Alt in words
if list(set(nzb_words) & set(quality['alternative'])):
found[quality['identifier']] = True
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
if guess:
found[guess['identifier']] = True
# Hack for older movies that don't contain quality tag
year_name = fireEvent('scanner.name_year', name, single = True)
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
if size > 3000: # Assume dvdr
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size)
found['dvdr'] = True
else: # Assume dvdrip
log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', size)
found['dvdrip'] = True
# Allow other qualities
for allowed in preferred_quality.get('allow'):
if found.get(allowed):
del found[allowed]
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def correctYear(self, haystack, year, year_range):
if not isinstance(haystack, (list, tuple, set)):
haystack = [haystack]
year_name = {}
for string in haystack:
year_name = fireEvent('scanner.name_year', string, single = True)
if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)):
log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year))
return True
log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year))
return False
def correctName(self, check_name, movie_name):
check_names = [check_name]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip())
except: pass
for check_name in list(set(check_names)):
check_movie = fireEvent('scanner.name_year', check_name, single = True)
try:
check_words = filter(None, re.split('\W+', check_movie.get('name', '')))
movie_words = filter(None, re.split('\W+', simplifyString(movie_name)))
if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0:
return True
except:
pass
return False
def correctWords(self, rel_name, media):
media_title = fireEvent('library.title', media['library'], single = True)
media_words = re.split('\W+', simplifyString(media_title))
rel_name = simplifyString(rel_name)
rel_words = re.split('\W+', rel_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = list(set(required_words + splitString(media['category']['required'].lower())))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(rel_words) & set(req))) == len(req)
if len(required_words) > 0 and req_match == 0:
log.info2('Wrong: Required word missing: %s', rel_name)
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = list(set(ignored_words + splitString(media['category']['ignored'].lower())))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored)
if len(ignored_words) > 0 and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", rel_name)
return False
# Ignore porn stuff
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'cock', 'dick']
pron_words = list(set(rel_words) & set(pron_tags) - set(media_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', rel_name)
return False
return True
class SearchSetupError(Exception):
pass

View File

@@ -0,0 +1,6 @@
from couchpotato.core.media import MediaBase
class MovieTypeBase(MediaBase):
_type = 'movie'

View File

@@ -0,0 +1,6 @@
from .main import MovieBase
def start():
return MovieBase()
config = []

View File

@@ -0,0 +1,185 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Media
import time
log = CPLog(__name__)
class MovieBase(MovieTypeBase):
_type = 'movie'
def __init__(self):
# Initialize this type
super(MovieBase, self).__init__()
self.initType()
addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addApiView('movie.edit', self.edit, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'},
'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addEvent('movie.add', self.add)
def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None):
if not params: params = {}
if not params.get('identifier'):
msg = 'Can\'t add movie without imdb identifier.'
log.error(msg)
fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
return False
else:
try:
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True)
if not is_movie:
msg = 'Can\'t add movie, seems to be a TV show.'
log.error(msg)
fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
return False
except:
pass
library = fireEvent('library.add.movie', single = True, attrs = params, update_after = update_library)
# Status
status_active, snatched_status, ignored_status, done_status, downloaded_status = \
fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True)
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id')
db = get_session()
m = db.query(Media).filter_by(library_id = library.get('id')).first()
added = True
do_search = False
search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
if not m:
m = Media(
library_id = library.get('id'),
profile_id = params.get('profile_id', default_profile.get('id')),
status_id = status_id if status_id else status_active.get('id'),
category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None,
)
db.add(m)
db.commit()
onComplete = None
if search_after:
onComplete = self.createOnComplete(m.id)
fireEventAsync('library.update.movie', params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete)
search_after = False
elif force_readd:
# Clean snatched history
for release in m.releases:
if release.status_id in [downloaded_status.get('id'), snatched_status.get('id'), done_status.get('id')]:
if params.get('ignore_previous', False):
release.status_id = ignored_status.get('id')
else:
fireEvent('release.delete', release.id, single = True)
m.profile_id = params.get('profile_id', default_profile.get('id'))
m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else (m.category_id or None)
else:
log.debug('Movie already exists, not updating: %s', params)
added = False
if force_readd:
m.status_id = status_id if status_id else status_active.get('id')
m.last_edit = int(time.time())
do_search = True
db.commit()
# Remove releases
available_status = fireEvent('status.get', 'available', single = True)
for rel in m.releases:
if rel.status_id is available_status.get('id'):
db.delete(rel)
db.commit()
movie_dict = m.to_dict(self.default_dict)
if do_search and search_after:
onComplete = self.createOnComplete(m.id)
onComplete()
if added:
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = 'Successfully added "%s" to your wanted list.' % params.get('title', ''))
db.expire_all()
return movie_dict
def addView(self, **kwargs):
add_dict = self.add(params = kwargs)
return {
'success': True if add_dict else False,
'movie': add_dict,
}
def edit(self, id = '', **kwargs):
db = get_session()
available_status = fireEvent('status.get', 'available', single = True)
ids = splitString(id)
for media_id in ids:
m = db.query(Media).filter_by(id = media_id).first()
if not m:
continue
m.profile_id = kwargs.get('profile_id')
cat_id = kwargs.get('category_id')
if cat_id is not None:
m.category_id = tryInt(cat_id) if tryInt(cat_id) > 0 else None
# Remove releases
for rel in m.releases:
if rel.status_id is available_status.get('id'):
db.delete(rel)
db.commit()
# Default title
if kwargs.get('default_title'):
for title in m.library.titles:
title.default = toUnicode(kwargs.get('default_title', '')).lower() == toUnicode(title.title).lower()
db.commit()
fireEvent('media.restatus', m.id)
movie_dict = m.to_dict(self.search_dict)
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))
db.expire_all()
return {
'success': True,
}

View File

@@ -14,6 +14,7 @@ var MovieList = new Class({
movies: [],
movies_added: {},
total_movies: 0,
letters: {},
filter: null,
@@ -23,7 +24,7 @@ var MovieList = new Class({
self.offset = 0;
self.filter = self.options.filter || {
'startswith': null,
'starts_with': null,
'search': null
}
@@ -48,11 +49,11 @@ var MovieList = new Class({
self.changeView('list');
else
self.changeView(self.getSavedView() || self.options.view || 'details');
self.getMovies();
App.addEvent('movie.added', self.movieAdded.bind(self))
App.addEvent('movie.deleted', self.movieDeleted.bind(self))
App.on('movie.added', self.movieAdded.bind(self))
App.on('movie.deleted', self.movieDeleted.bind(self))
},
movieDeleted: function(notification){
@@ -62,7 +63,9 @@ var MovieList = new Class({
self.movies.each(function(movie){
if(movie.get('id') == notification.data.id){
movie.destroy();
delete self.movies_added[notification.data.id]
delete self.movies_added[notification.data.id];
self.setCounter(self.counter_count-1);
self.total_movies--;
}
})
}
@@ -73,9 +76,11 @@ var MovieList = new Class({
movieAdded: function(notification){
var self = this;
self.fireEvent('movieAdded', notification);
if(self.options.add_new && !self.movies_added[notification.data.id] && notification.data.status.identifier == self.options.status){
window.scroll(0,0);
self.createMovie(notification.data, 'top');
self.setCounter(self.counter_count+1);
self.checkIfEmpty();
}
@@ -115,7 +120,7 @@ var MovieList = new Class({
self.createMovie(movie);
});
self.total_movies = total;
self.total_movies += total;
self.setCounter(total);
},
@@ -125,8 +130,41 @@ var MovieList = new Class({
if(!self.navigation_counter) return;
self.counter_count = count;
self.navigation_counter.set('text', (count || 0) + ' movies');
if (self.empty_message) {
self.empty_message.destroy();
self.empty_message = null;
}
if(self.total_movies && count == 0 && !self.empty_message){
var message = (self.filter.search ? 'for "'+self.filter.search+'"' : '') +
(self.filter.starts_with ? ' in <strong>'+self.filter.starts_with+'</strong>' : '');
self.empty_message = new Element('.message', {
'html': 'No movies found ' + message + '.<br/>'
}).grab(
new Element('a', {
'text': 'Reset filter',
'events': {
'click': function(){
self.filter = {
'starts_with': null,
'search': null
};
self.navigation_search_input.set('value', '');
self.reset();
self.activateLetter();
self.getMovies(true);
self.last_search_value = '';
}
}
})
).inject(self.movie_list);
}
},
createMovie: function(movie, inject_at){
@@ -151,66 +189,69 @@ var MovieList = new Class({
self.el.addClass('with_navigation')
self.navigation = new Element('div.alph_nav').grab(
new Element('div').adopt(
self.navigation_alpha = new Element('ul.numbers', {
self.navigation = new Element('div.alph_nav').adopt(
self.mass_edit_form = new Element('div.mass_edit_form').adopt(
new Element('span.select').adopt(
self.mass_edit_select = new Element('input[type=checkbox].inlay', {
'events': {
'change': self.massEditToggleAll.bind(self)
}
}),
self.mass_edit_selected = new Element('span.count', {'text': 0}),
self.mass_edit_selected_label = new Element('span', {'text': 'selected'})
),
new Element('div.quality').adopt(
self.mass_edit_quality = new Element('select'),
new Element('a.button.orange', {
'text': 'Change quality',
'events': {
'click': self.changeQualitySelected.bind(self)
}
})
),
new Element('div.delete').adopt(
new Element('span[text=or]'),
new Element('a.button.red', {
'text': 'Delete',
'events': {
'click': self.deleteSelected.bind(self)
}
})
),
new Element('div.refresh').adopt(
new Element('span[text=or]'),
new Element('a.button.green', {
'text': 'Refresh',
'events': {
'click': self.refreshSelected.bind(self)
}
})
)
),
new Element('div.menus').adopt(
self.navigation_counter = new Element('span.counter[title=Total]'),
self.filter_menu = new Block.Menu(self, {
'class': 'filter'
}),
self.navigation_actions = new Element('ul.actions', {
'events': {
'click:relay(li)': function(e, el){
self.movie_list.empty()
self.activateLetter(el.get('data-letter'))
self.getMovies()
var a = 'active';
self.navigation_actions.getElements('.'+a).removeClass(a);
self.changeView(el.get('data-view'));
this.addClass(a);
el.inject(el.getParent(), 'top');
el.getSiblings().hide()
setTimeout(function(){
el.getSiblings().setStyle('display', null);
}, 100)
}
}
}),
self.navigation_counter = new Element('span.counter[title=Total]'),
self.navigation_actions = new Element('ul.inlay.actions.reversed'),
self.navigation_search_input = new Element('input.search.inlay', {
'title': 'Search through ' + self.options.identifier,
'placeholder': 'Search through ' + self.options.identifier,
'events': {
'keyup': self.search.bind(self),
'change': self.search.bind(self)
}
}),
self.navigation_menu = new Block.Menu(self),
self.mass_edit_form = new Element('div.mass_edit_form').adopt(
new Element('span.select').adopt(
self.mass_edit_select = new Element('input[type=checkbox].inlay', {
'events': {
'change': self.massEditToggleAll.bind(self)
}
}),
self.mass_edit_selected = new Element('span.count', {'text': 0}),
self.mass_edit_selected_label = new Element('span', {'text': 'selected'})
),
new Element('div.quality').adopt(
self.mass_edit_quality = new Element('select'),
new Element('a.button.orange', {
'text': 'Change quality',
'events': {
'click': self.changeQualitySelected.bind(self)
}
})
),
new Element('div.delete').adopt(
new Element('span[text=or]'),
new Element('a.button.red', {
'text': 'Delete',
'events': {
'click': self.deleteSelected.bind(self)
}
})
),
new Element('div.refresh').adopt(
new Element('span[text=or]'),
new Element('a.button.green', {
'text': 'Refresh',
'events': {
'click': self.refreshSelected.bind(self)
}
})
)
)
self.navigation_menu = new Block.Menu(self, {
'class': 'extra'
})
)
).inject(self.el, 'top');
@@ -223,20 +264,56 @@ var MovieList = new Class({
}).inject(self.mass_edit_quality)
});
self.filter_menu.addLink(
self.navigation_search_input = new Element('input', {
'title': 'Search through ' + self.options.identifier,
'placeholder': 'Search through ' + self.options.identifier,
'events': {
'keyup': self.search.bind(self),
'change': self.search.bind(self)
}
})
).addClass('search');
var available_chars;
self.filter_menu.addEvent('open', function(){
self.navigation_search_input.focus();
// Get available chars and highlight
if(!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible()))
Api.request('media.available_chars', {
'data': Object.merge({
'status': self.options.status
}, self.filter),
'onSuccess': function(json){
available_chars = json.chars
json.chars.split('').each(function(c){
self.letters[c.capitalize()].addClass('available')
})
}
});
});
self.filter_menu.addLink(
self.navigation_alpha = new Element('ul.numbers', {
'events': {
'click:relay(li.available)': function(e, el){
self.activateLetter(el.get('data-letter'))
self.getMovies(true)
}
}
})
);
// Actions
['mass_edit', 'details', 'list'].each(function(view){
self.navigation_actions.adopt(
new Element('li.'+view+(self.current_view == view ? '.active' : '')+'[data-view='+view+']', {
'events': {
'click': function(e){
var a = 'active';
self.navigation_actions.getElements('.'+a).removeClass(a);
self.changeView(this.get('data-view'));
this.addClass(a);
}
}
}).adopt(new Element('span'))
)
var current = self.current_view == view;
new Element('li', {
'class': 'icon2 ' + view + (current ? ' active ' : ''),
'data-view': view
}).inject(self.navigation_actions, current ? 'top' : 'bottom');
});
// All
@@ -253,21 +330,6 @@ var MovieList = new Class({
}).inject(self.navigation_alpha);
});
// Get available chars and highlight
if(self.navigation.isDisplayed() || self.navigation.isVisible())
Api.request('movie.available_chars', {
'data': Object.merge({
'status': self.options.status
}, self.filter),
'onSuccess': function(json){
json.chars.split('').each(function(c){
self.letters[c.capitalize()].addClass('available')
})
}
});
// Add menu or hide
if (self.options.menu.length > 0)
self.options.menu.each(function(menu_item){
@@ -310,7 +372,7 @@ var MovieList = new Class({
'click': function(e){
(e).preventDefault();
this.set('text', 'Deleting..')
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': ids.join(','),
'delete_from': self.options.identifier
@@ -322,14 +384,15 @@ var MovieList = new Class({
self.movies.each(function(movie){
if (movie.isSelected()){
$(movie).destroy()
erase_movies.include(movie)
erase_movies.include(movie);
}
});
erase_movies.each(function(movie){
self.movies.erase(movie);
movie.destroy()
movie.destroy();
self.setCounter(self.counter_count-1);
self.total_movies--;
});
self.calculateSelected();
@@ -362,7 +425,7 @@ var MovieList = new Class({
var self = this;
var ids = self.getSelectedMovies()
Api.request('movie.refresh', {
Api.request('media.refresh', {
'data': {
'id': ids.join(','),
}
@@ -448,8 +511,7 @@ var MovieList = new Class({
self.activateLetter();
self.filter.search = search_value;
self.movie_list.empty();
self.getMovies();
self.getMovies(true);
self.last_search_value = search_value;
@@ -461,11 +523,10 @@ var MovieList = new Class({
var self = this;
self.reset();
self.movie_list.empty();
self.getMovies();
self.getMovies(true);
},
getMovies: function(){
getMovies: function(reset){
var self = this;
if(self.scrollspy){
@@ -489,13 +550,17 @@ var MovieList = new Class({
}
Api.request(self.options.api_call || 'movie.list', {
Api.request(self.options.api_call || 'media.list', {
'data': Object.merge({
'type': 'movie',
'status': self.options.status,
'limit_offset': self.options.limit + ',' + self.offset
'limit_offset': self.options.limit ? self.options.limit + ',' + self.offset : null
}, self.filter),
'onSuccess': function(json){
if(reset)
self.movie_list.empty();
if(self.loader_first){
var lf = self.loader_first;
self.loader_first.addClass('hide')
@@ -507,7 +572,7 @@ var MovieList = new Class({
}
self.store(json.movies);
self.addMovies(json.movies, json.total);
self.addMovies(json.movies, json.total || json.movies.length);
if(self.scrollspy) {
self.load_more.set('text', 'load more movies');
self.scrollspy.start();

View File

@@ -1,9 +1,13 @@
var MovieAction = new Class({
class_name: 'action icon',
Implements: [Options],
initialize: function(movie){
class_name: 'action icon2',
initialize: function(movie, options){
var self = this;
self.setOptions(options);
self.movie = movie;
self.create();
@@ -14,11 +18,39 @@ var MovieAction = new Class({
create: function(){},
disable: function(){
this.el.addClass('disable')
if(this.el)
this.el.addClass('disable')
},
enable: function(){
this.el.removeClass('disable')
if(this.el)
this.el.removeClass('disable')
},
getTitle: function(){
var self = this;
try {
return self.movie.getTitle();
}
catch(e){
try {
return self.movie.original_title ? self.movie.original_title : self.movie.titles[0];
}
catch(e){
return 'Unknown';
}
}
},
get: function(key){
var self = this;
try {
return self.movie.get(key)
}
catch(e){
return self.movie[key]
}
},
createMask: function(){
@@ -62,10 +94,10 @@ MA.IMDB = new Class({
create: function(){
var self = this;
self.id = self.movie.get('identifier');
self.id = self.movie.get('imdb') || self.movie.get('identifier');
self.el = new Element('a.imdb', {
'title': 'Go to the IMDB page of ' + self.movie.getTitle(),
'title': 'Go to the IMDB page of ' + self.getTitle(),
'href': 'http://www.imdb.com/title/'+self.id+'/',
'target': '_blank'
});
@@ -82,8 +114,8 @@ MA.Release = new Class({
create: function(){
var self = this;
self.el = new Element('a.releases.icon.download', {
'title': 'Show the releases that are available for ' + self.movie.getTitle(),
self.el = new Element('a.releases.download', {
'title': 'Show the releases that are available for ' + self.getTitle(),
'events': {
'click': self.show.bind(self)
}
@@ -94,16 +126,56 @@ MA.Release = new Class({
else
self.showHelper();
App.on('movie.searcher.ended', function(notification){
if(self.movie.data.id != notification.data.id) return;
self.releases = null;
if(self.options_container){
self.options_container.destroy();
self.options_container = null;
}
});
},
show: function(e){
var self = this;
if(e)
(e).preventDefault();
if(self.releases)
self.createReleases();
else {
self.movie.busy(true);
Api.request('release.for_movie', {
'data': {
'id': self.movie.data.id
},
'onComplete': function(json){
self.movie.busy(false, 1);
if(json && json.releases){
self.releases = json.releases;
self.createReleases();
}
else
alert('Something went wrong, check the logs.');
}
});
}
},
createReleases: function(){
var self = this;
if(!self.options_container){
self.options_container = new Element('div.options').adopt(
self.release_container = new Element('div.releases.table').adopt(
self.trynext_container = new Element('div.buttons.try_container')
)
self.options_container = new Element('div.options').grab(
self.release_container = new Element('div.releases.table')
);
// Header
@@ -117,7 +189,7 @@ MA.Release = new Class({
new Element('span.provider', {'text': 'Provider'})
).inject(self.release_container)
self.movie.data.releases.sortBy('-info.score').each(function(release){
self.releases.each(function(release){
var status = Status.get(release.status_id),
quality = Quality.getProfile(release.quality_id) || {},
@@ -138,7 +210,7 @@ MA.Release = new Class({
}
// Create release
new Element('div', {
var item = new Element('div', {
'class': 'item '+status.identifier,
'id': 'release_'+release.id
}).adopt(
@@ -149,11 +221,11 @@ MA.Release = new Class({
new Element('span.age', {'text': self.get(release, 'age')}),
new Element('span.score', {'text': self.get(release, 'score')}),
new Element('span.provider', { 'text': provider, 'title': provider }),
release.info['detail_url'] ? new Element('a.info.icon', {
release.info['detail_url'] ? new Element('a.info.icon2', {
'href': release.info['detail_url'],
'target': '_blank'
}) : null,
new Element('a.download.icon', {
}) : new Element('a'),
new Element('a.download.icon2', {
'events': {
'click': function(e){
(e).preventDefault();
@@ -162,16 +234,16 @@ MA.Release = new Class({
}
}
}),
new Element('a.delete.icon', {
new Element('a.delete.icon2', {
'events': {
'click': function(e){
(e).preventDefault();
self.ignore(release);
this.getParent('.item').toggleClass('ignored')
}
}
})
).inject(self.release_container)
).inject(self.release_container);
release['el'] = item;
if(status.identifier == 'ignored' || status.identifier == 'failed' || status.identifier == 'snatched'){
if(!self.last_release || (self.last_release && self.last_release.status.identifier != 'snatched' && status.identifier == 'snatched'))
@@ -180,35 +252,68 @@ MA.Release = new Class({
else if(!self.next_release && status.identifier == 'available'){
self.next_release = release;
}
var update_handle = function(notification) {
if(notification.data.id != release.id) return;
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
status = Status.get(release.status_id),
new_status = Status.get(notification.data.status_id);
release.status_id = new_status.id
release.el.set('class', 'item ' + new_status.identifier);
var status_el = release.el.getElement('.release_status');
status_el.set('class', 'release_status ' + new_status.identifier);
status_el.set('text', new_status.identifier);
if(!q && (new_status.identifier == 'snatched' || new_status.identifier == 'seeding' || new_status.identifier == 'done'))
var q = self.addQuality(release.quality_id);
if(new_status && q && !q.hasClass(new_status.identifier)) {
q.removeClass(status.identifier).addClass(new_status.identifier);
q.set('title', q.get('title').replace(status.label, new_status.label));
}
}
App.on('release.update_status', update_handle);
});
if(self.last_release){
self.release_container.getElement('#release_'+self.last_release.id).addClass('last_release');
}
if(self.last_release)
self.release_container.getElements('#release_'+self.last_release.id).addClass('last_release');
if(self.next_release){
self.release_container.getElement('#release_'+self.next_release.id).addClass('next_release');
}
if(self.next_release)
self.release_container.getElements('#release_'+self.next_release.id).addClass('next_release');
if(self.next_release || self.last_release){
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top');
var nr = self.next_release,
lr = self.last_release;
self.trynext_container.adopt(
new Element('span.or', {
'text': 'This movie is snatched, if anything went wrong, download'
}),
self.last_release ? new Element('a.button.orange', {
lr ? new Element('a.button.orange', {
'text': 'the same release again',
'events': {
'click': self.trySameRelease.bind(self)
'click': function(){
self.download(lr);
}
}
}) : null,
self.next_release && self.last_release ? new Element('span.or', {
nr && lr ? new Element('span.or', {
'text': ','
}) : null,
self.next_release ? [new Element('a.button.green', {
'text': self.last_release ? 'another release' : 'the best release',
nr ? [new Element('a.button.green', {
'text': lr ? 'another release' : 'the best release',
'events': {
'click': self.tryNextRelease.bind(self)
'click': function(){
self.download(nr);
}
}
}),
new Element('span.or', {
@@ -217,18 +322,15 @@ MA.Release = new Class({
)
}
self.last_release = null;
self.next_release = null;
}
},
show: function(e){
var self = this;
if(e)
(e).preventDefault();
self.createReleases();
// Show it
self.options_container.inject(self.movie, 'top');
self.movie.slide('in', self.options_container);
},
showHelper: function(e){
@@ -236,19 +338,34 @@ MA.Release = new Class({
if(e)
(e).preventDefault();
self.createReleases();
self.trynext_container = new Element('div.buttons.trynext').inject(self.movie.info_container);
var has_available = false,
has_snatched = false;
if(self.next_release || self.last_release){
self.movie.data.releases.each(function(release){
if(has_available && has_snatched) return;
var status = Status.get(release.status_id);
if(['snatched', 'downloaded', 'seeding'].contains(status.identifier))
has_snatched = true;
if(['available'].contains(status.identifier))
has_available = true;
});
if(has_available || has_snatched){
self.trynext_container = new Element('div.buttons.trynext').inject(self.movie.info_container);
self.trynext_container.adopt(
self.next_release ? [new Element('a.icon.readd', {
'text': self.last_release ? 'Download another release' : 'Download the best release',
has_available ? [new Element('a.icon2.readd', {
'text': has_snatched ? 'Download another release' : 'Download the best release',
'events': {
'click': self.tryNextRelease.bind(self)
}
}),
new Element('a.icon.download', {
new Element('a.icon2.download', {
'text': 'pick one yourself',
'events': {
'click': function(){
@@ -256,27 +373,10 @@ MA.Release = new Class({
}
}
})] : null,
new Element('a.icon.completed', {
new Element('a.icon2.completed', {
'text': 'mark this movie done',
'events': {
'click': function(){
Api.request('movie.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': 'wanted'
},
'onComplete': function(){
var movie = $(self.movie);
movie.set('tween', {
'duration': 300,
'onComplete': function(){
self.movie.destroy()
}
});
movie.tween('height', 0);
}
});
}
'click': self.markMovieDone.bind(self)
}
})
)
@@ -285,28 +385,34 @@ MA.Release = new Class({
},
get: function(release, type){
return release.info[type] || 'n/a'
return release.info[type] !== undefined ? release.info[type] : 'n/a'
},
download: function(release){
var self = this;
var release_el = self.release_container.getElement('#release_'+release.id),
icon = release_el.getElement('.download.icon');
icon = release_el.getElement('.download.icon2');
self.movie.busy(true);
if(icon)
icon.addClass('icon spinner').removeClass('download');
Api.request('release.download', {
Api.request('release.manual_download', {
'data': {
'id': release.id
},
'onComplete': function(json){
self.movie.busy(false);
if(icon)
icon.removeClass('icon spinner');
if(json.success)
icon.addClass('completed');
if(json.success){
if(icon)
icon.addClass('completed');
release_el.getElement('.release_status').set('text', 'snatched');
}
else
icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.');
if(icon)
icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.');
}
});
},
@@ -317,29 +423,41 @@ MA.Release = new Class({
Api.request('release.ignore', {
'data': {
'id': release.id
}
},
})
},
tryNextRelease: function(movie_id){
markMovieDone: function(){
var self = this;
self.createReleases();
if(self.last_release)
self.ignore(self.last_release);
if(self.next_release)
self.download(self.next_release);
Api.request('media.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': 'wanted'
},
'onComplete': function(){
var movie = $(self.movie);
movie.set('tween', {
'duration': 300,
'onComplete': function(){
self.movie.destroy()
}
});
movie.tween('height', 0);
}
});
},
trySameRelease: function(movie_id){
tryNextRelease: function(){
var self = this;
if(self.last_release)
self.download(self.last_release);
Api.request('movie.searcher.try_next', {
'data': {
'id': self.movie.get('id')
}
});
}
@@ -354,7 +472,7 @@ MA.Trailer = new Class({
var self = this;
self.el = new Element('a.trailer', {
'title': 'Watch the trailer of ' + self.movie.getTitle(),
'title': 'Watch the trailer of ' + self.getTitle(),
'events': {
'click': self.watch.bind(self)
}
@@ -365,14 +483,14 @@ MA.Trailer = new Class({
watch: function(offset){
var self = this;
var data_url = 'http://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18'
var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18'
var url = data_url.substitute({
'title': encodeURI(self.movie.getTitle()),
'year': self.movie.get('year'),
'title': encodeURI(self.getTitle()),
'year': self.get('year'),
'offset': offset || 1
}),
size = $(self.movie).getSize(),
height = (size.x/16)*9,
height = self.options.height || (size.x/16)*9,
id = 'trailer-'+randomString();
self.player_container = new Element('div[id='+id+']');
@@ -478,6 +596,11 @@ MA.Edit = new Class({
self.profile_select = new Element('select', {
'name': 'profile'
}),
self.category_select = new Element('select', {
'name': 'category'
}).grab(
new Element('option', {'value': -1, 'text': 'None'})
),
new Element('a.button.edit', {
'text': 'Save & Search',
'events': {
@@ -497,7 +620,34 @@ MA.Edit = new Class({
});
Quality.getActiveProfiles().each(function(profile){
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
self.category_select.hide();
else {
self.category_select.show();
categories.each(function(category){
var category_id = category.data.id;
new Element('option', {
'value': category_id,
'text': category.data.label
}).inject(self.category_select);
if(self.movie.category && self.movie.category.data && self.movie.category.data.id == category_id)
self.category_select.set('value', category_id);
});
}
// Fill profiles
var profiles = Quality.getActiveProfiles();
if(profiles.length == 1)
self.profile_select.hide();
profiles.each(function(profile){
var profile_id = profile.id ? profile.id : profile.data.id;
@@ -506,8 +656,9 @@ MA.Edit = new Class({
'text': profile.label ? profile.label : profile.data.label
}).inject(self.profile_select);
if(self.movie.profile && self.movie.profile.data && self.movie.profile.data.id == profile_id)
if(self.movie.get('profile_id') == profile_id)
self.profile_select.set('value', profile_id);
});
}
@@ -523,7 +674,8 @@ MA.Edit = new Class({
'data': {
'id': self.movie.get('id'),
'default_title': self.title_select.get('value'),
'profile_id': self.profile_select.get('value')
'profile_id': self.profile_select.get('value'),
'category_id': self.category_select.get('value')
},
'useSpinner': true,
'spinnerTarget': $(self.movie),
@@ -558,7 +710,7 @@ MA.Refresh = new Class({
var self = this;
(e).preventDefault();
Api.request('movie.refresh', {
Api.request('media.refresh', {
'data': {
'id': self.movie.get('id')
}
@@ -654,6 +806,7 @@ MA.Delete = new Class({
var self = this;
(e).preventDefault();
self.movie.removeView();
self.movie.slide('out');
},
@@ -668,7 +821,7 @@ MA.Delete = new Class({
self.callChain();
},
function(){
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': self.movie.list.options.identifier
@@ -702,16 +855,45 @@ MA.Files = new Class({
self.el = new Element('a.directory', {
'title': 'Available files',
'events': {
'click': self.showFiles.bind(self)
'click': self.show.bind(self)
}
});
},
showFiles: function(e){
show: function(e){
var self = this;
(e).preventDefault();
if(self.releases)
self.showFiles();
else {
self.movie.busy(true);
Api.request('release.for_movie', {
'data': {
'id': self.movie.data.id
},
'onComplete': function(json){
self.movie.busy(false, 1);
if(json && json.releases){
self.releases = json.releases;
self.showFiles();
}
else
alert('Something went wrong, check the logs.');
}
});
}
},
showFiles: function(){
var self = this;
if(!self.options_container){
self.options_container = new Element('div.options').adopt(
self.files_container = new Element('div.files.table')
@@ -724,7 +906,7 @@ MA.Files = new Class({
new Element('span.is_available', {'text': 'Available'})
).inject(self.files_container)
Array.each(self.movie.data.releases, function(release){
Array.each(self.releases, function(release){
var rel = new Element('div.release').inject(self.files_container);

View File

@@ -11,9 +11,10 @@ var Movie = new Class({
self.view = options.view || 'details';
self.list = list;
self.el = new Element('div.movie.inlay');
self.el = new Element('div.movie');
self.profile = Quality.getProfile(data.profile_id) || {};
self.category = CategoryList.getCategory(data.category_id) || {};
self.parent(self, options);
self.addEvents();
@@ -22,22 +23,49 @@ var Movie = new Class({
addEvents: function(){
var self = this;
App.addEvent('movie.update.'+self.data.id, function(notification){
self.busy(false)
self.update.delay(2000, self, notification);
});
self.global_events = {}
['movie.busy', 'searcher.started'].each(function(listener){
App.addEvent(listener+'.'+self.data.id, function(notification){
if(notification.data)
// Do refresh with new data
self.global_events['movie.update'] = function(notification){
if(self.data.id != notification.data.id) return;
self.busy(false);
self.removeView();
self.update.delay(2000, self, notification);
}
App.on('movie.update', self.global_events['movie.update']);
// Add spinner on load / search
['movie.busy', 'movie.searcher.started'].each(function(listener){
self.global_events[listener] = function(notification){
if(notification.data && self.data.id == notification.data.id)
self.busy(true)
});
}
App.on(listener, self.global_events[listener]);
})
App.addEvent('searcher.ended.'+self.data.id, function(notification){
if(notification.data)
// Remove spinner
self.global_events['movie.searcher.ended'] = function(notification){
if(notification.data && self.data.id == notification.data.id)
self.busy(false)
});
}
App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']);
// Reload when releases have updated
self.global_events['release.update_status'] = function(notification){
var data = notification.data
if(data && self.data.id == data.media_id){
if(!self.data.releases)
self.data.releases = [];
self.data.releases.push({'quality_id': data.quality_id, 'status_id': data.status_id});
self.updateReleases();
}
}
App.on('release.update_status', self.global_events['release.update_status']);
},
destroy: function(){
@@ -50,13 +78,12 @@ var Movie = new Class({
self.list.checkIfEmpty();
// Remove events
App.removeEvents('movie.update.'+self.data.id);
['movie.busy', 'searcher.started'].each(function(listener){
App.removeEvents(listener+'.'+self.data.id);
})
Object.each(self.global_events, function(handle, listener){
App.off(listener, handle);
});
},
busy: function(set_busy){
busy: function(set_busy, timeout){
var self = this;
if(!set_busy){
@@ -70,9 +97,9 @@ var Movie = new Class({
self.spinner.el.destroy();
self.spinner = null;
self.mask = null;
}, 400);
}, timeout || 400);
}
}, 1000)
}, timeout || 1000)
}
else if(!self.spinner) {
self.createMask();
@@ -107,8 +134,10 @@ var Movie = new Class({
self.data = notification.data;
self.el.empty();
self.removeView();
self.profile = Quality.getProfile(self.data.profile_id) || {};
self.category = CategoryList.getCategory(self.data.category_id) || {};
self.create();
self.busy(false);
@@ -139,9 +168,6 @@ var Movie = new Class({
'text': self.data.library.year || 'n/a'
})
),
self.rating = new Element('div.rating.icon', {
'text': self.data.library.rating
}),
self.description = new Element('div.description', {
'text': self.data.library.plot
}),
@@ -149,8 +175,8 @@ var Movie = new Class({
'events': {
'click': function(e){
var releases = self.el.getElement('.actions .releases');
if(releases)
releases.fireEvent('click', [e])
if(releases.isVisible())
releases.fireEvent('click', [e])
}
}
})
@@ -178,12 +204,26 @@ var Movie = new Class({
});
// Add releases
self.updateReleases();
Object.each(self.options.actions, function(action, key){
self.action[key.toLowerCase()] = action = new self.options.actions[key](self)
if(action.el)
self.actions.adopt(action)
});
},
updateReleases: function(){
var self = this;
if(!self.data.releases || self.data.releases.length == 0) return;
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'done'))
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
@@ -192,16 +232,6 @@ var Movie = new Class({
}
});
Object.each(self.options.actions, function(action, key){
self.action[key.toLowerCase()] = action = new self.options.actions[key](self)
if(action.el)
self.actions.adopt(action)
});
if(!self.data.library.rating)
self.rating.hide();
},
addQuality: function(quality_id){
@@ -244,10 +274,10 @@ var Movie = new Class({
if(direction == 'in'){
self.temp_view = self.view;
self.changeView('details')
self.changeView('details');
self.el.addEvent('outerClick', function(){
self.removeView()
self.removeView();
self.slide('out')
})
el.show();
@@ -257,7 +287,8 @@ var Movie = new Class({
self.el.removeEvents('outerClick')
setTimeout(function(){
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
if(self.el)
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
}, 600);
self.data_container.removeClass('hide_right');
@@ -267,9 +298,10 @@ var Movie = new Class({
changeView: function(new_view){
var self = this;
self.el
.removeClass(self.view+'_view')
.addClass(new_view+'_view')
if(self.el)
self.el
.removeClass(self.view+'_view')
.addClass(new_view+'_view')
self.view = new_view;
},

View File

@@ -0,0 +1,229 @@
Block.Search.MovieItem = new Class({
Implements: [Options, Events],
initialize: function(info, options){
var self = this;
self.setOptions(options);
self.info = info;
self.alternative_titles = [];
self.create();
},
create: function(){
var self = this,
info = self.info;
self.el = new Element('div.media_result', {
'id': info.imdb
}).adopt(
self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', {
'src': info.images.poster[0],
'height': null,
'width': null
}) : null,
self.options_el = new Element('div.options.inlay'),
self.data_container = new Element('div.data', {
'events': {
'click': self.showOptions.bind(self)
}
}).adopt(
self.info_container = new Element('div.info').adopt(
new Element('h2').adopt(
self.title = new Element('span.title', {
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
}),
self.year = info.year ? new Element('span.year', {
'text': info.year
}) : null
)
)
)
)
if(info.titles)
info.titles.each(function(title){
self.alternativeTitle({
'title': title
});
})
},
alternativeTitle: function(alternative){
var self = this;
self.alternative_titles.include(alternative);
},
getTitle: function(){
var self = this;
try {
return self.info.original_title ? self.info.original_title : self.info.titles[0];
}
catch(e){
return 'Unknown';
}
},
get: function(key){
return this.info[key]
},
showOptions: function(){
var self = this;
self.createOptions();
self.data_container.addClass('open');
self.el.addEvent('outerClick', self.closeOptions.bind(self))
},
closeOptions: function(){
var self = this;
self.data_container.removeClass('open');
self.el.removeEvents('outerClick')
},
add: function(e){
var self = this;
if(e)
(e).preventDefault();
self.loadingMask();
Api.request('movie.add', {
'data': {
'identifier': self.info.imdb,
'title': self.title_select.get('value'),
'profile_id': self.profile_select.get('value'),
'category_id': self.category_select.get('value')
},
'onComplete': function(json){
self.options_el.empty();
self.options_el.adopt(
new Element('div.message', {
'text': json.success ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs'
})
);
self.mask.fade('out');
self.fireEvent('added');
},
'onFailure': function(){
self.options_el.empty();
self.options_el.adopt(
new Element('div.message', {
'text': 'Something went wrong, check the logs for more info.'
})
);
self.mask.fade('out');
}
});
},
createOptions: function(){
var self = this,
info = self.info;
if(!self.options_el.hasClass('set')){
if(self.info.in_library){
var in_library = [];
self.info.in_library.releases.each(function(release){
in_library.include(release.quality.label)
});
}
self.options_el.grab(
new Element('div', {
'class': self.info.in_wanted && self.info.in_wanted.profile_id || in_library ? 'in_library_wanted' : ''
}).adopt(
self.info.in_wanted && self.info.in_wanted.profile_id ? new Element('span.in_wanted', {
'text': 'Already in wanted list: ' + Quality.getProfile(self.info.in_wanted.profile_id).get('label')
}) : (in_library ? new Element('span.in_library', {
'text': 'Already in library: ' + in_library.join(', ')
}) : null),
self.title_select = new Element('select', {
'name': 'title'
}),
self.profile_select = new Element('select', {
'name': 'profile'
}),
self.category_select = new Element('select', {
'name': 'category'
}).grab(
new Element('option', {'value': -1, 'text': 'None'})
),
self.add_button = new Element('a.button', {
'text': 'Add',
'events': {
'click': self.add.bind(self)
}
})
)
);
Array.each(self.alternative_titles, function(alt){
new Element('option', {
'text': alt.title
}).inject(self.title_select)
})
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
self.category_select.hide();
else {
self.category_select.movie();
categories.each(function(category){
new Element('option', {
'value': category.data.id,
'text': category.data.label
}).inject(self.category_select);
});
}
// Fill profiles
var profiles = Quality.getActiveProfiles();
if(profiles.length == 1)
self.profile_select.hide();
profiles.each(function(profile){
new Element('option', {
'value': profile.id ? profile.id : profile.data.id,
'text': profile.label ? profile.label : profile.data.label
}).inject(self.profile_select)
});
self.options_el.addClass('set');
if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
!(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
self.add();
}
},
loadingMask: function(){
var self = this;
self.mask = new Element('div.mask').inject(self.el).fade('hide')
createSpinner(self.mask)
self.mask.fade('in')
},
toElement: function(){
return this.el
}
});

View File

@@ -0,0 +1,6 @@
from .main import MovieLibraryPlugin
def start():
return MovieLibraryPlugin()
config = []

View File

@@ -1,9 +1,8 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.media._base.library.base import LibraryBase
from couchpotato.core.settings.model import Library, LibraryTitle, File
from string import ascii_letters
import time
@@ -11,29 +10,52 @@ import traceback
log = CPLog(__name__)
class LibraryPlugin(Plugin):
class MovieLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
addEvent('library.add', self.add)
addEvent('library.update', self.update)
addEvent('library.update_release_date', self.updateReleaseDate)
addEvent('library.query', self.query)
addEvent('library.add.movie', self.add)
addEvent('library.update.movie', self.update)
addEvent('library.update.movie.release_date', self.updateReleaseDate)
def query(self, library, first = True, include_year = True, **kwargs):
if library.get('type') != 'movie':
return
titles = [title['title'] for title in library['titles']]
# Add year identifier to titles
if include_year:
titles = [title + (' %s' % str(library['year'])) for title in titles]
if first:
return titles[0] if titles else None
return titles
def add(self, attrs = {}, update_after = True):
# movies don't yet contain these, so lets make sure to set defaults
type = attrs.get('type', 'movie')
primary_provider = attrs.get('primary_provider', 'imdb')
db = get_session()
l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first()
l = db.query(Library).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = Library(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id')
status_id = status.get('id'),
info = {},
parent = None
)
title = LibraryTitle(
@@ -49,37 +71,39 @@ class LibraryPlugin(Plugin):
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
handle('library.update.movie', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
db.expire_all()
return library_dict
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(Library).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
library_dict = None
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
if library.status_id == done_status.get('id') and not force:
do_update = False
else:
info = fireEvent('movie.info', merge = True, identifier = identifier)
info = fireEvent('movie.info', merge = True, identifier = identifier)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Main info
if do_update:
@@ -87,7 +111,7 @@ class LibraryPlugin(Plugin):
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.info = info
library.info.update(info)
db.commit()
# Titles
@@ -132,6 +156,7 @@ class LibraryPlugin(Plugin):
library_dict = library.to_dict(self.default_dict)
db.expire_all()
return library_dict
def updateReleaseDate(self, identifier):
@@ -145,11 +170,12 @@ class LibraryPlugin(Plugin):
else:
dates = library.info.get('release_date')
if dates and dates.get('expires', 0) < time.time() or not dates:
if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
library.info = mergeDicts(library.info, {'release_date': dates })
library.info.update({'release_date': dates })
db.commit()
db.expire_all()
return dates

View File

@@ -0,0 +1,73 @@
from .main import MovieSearcher
import random
def start():
return MovieSearcher()
config = [{
'name': 'moviesearcher',
'order': 20,
'groups': [
{
'tab': 'searcher',
'name': 'movie_searcher',
'label': 'Movie search',
'description': 'Search options for movies',
'advanced': True,
'options': [
{
'name': 'always_search',
'default': False,
'migrate_from': 'searcher',
'type': 'bool',
'label': 'Always search',
'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.',
},
{
'name': 'run_on_launch',
'migrate_from': 'searcher',
'label': 'Run on launch',
'advanced': True,
'default': 0,
'type': 'bool',
'description': 'Force run the searcher after (re)start.',
},
{
'name': 'search_on_add',
'label': 'Search after add',
'advanced': True,
'default': 1,
'type': 'bool',
'description': 'Disable this to only search for movies on cron.',
},
{
'name': 'cron_day',
'migrate_from': 'searcher',
'label': 'Day',
'advanced': True,
'default': '*',
'type': 'string',
'description': '<strong>*</strong>: Every day, <strong>*/2</strong>: Every 2 days, <strong>1</strong>: Every first of the month. See <a href="http://packages.python.org/APScheduler/cronschedule.html">APScheduler</a> for details.',
},
{
'name': 'cron_hour',
'migrate_from': 'searcher',
'label': 'Hour',
'advanced': True,
'default': random.randint(0, 23),
'type': 'string',
'description': '<strong>*</strong>: Every hour, <strong>*/8</strong>: Every 8 hours, <strong>3</strong>: At 3, midnight.',
},
{
'name': 'cron_minute',
'migrate_from': 'searcher',
'label': 'Minute',
'advanced': True,
'default': random.randint(0, 59),
'type': 'string',
'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour."
},
],
},
],
}]

View File

@@ -0,0 +1,350 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Media, Release
from couchpotato.environment import Env
from datetime import date
import random
import re
import time
import traceback
log = CPLog(__name__)
class MovieSearcher(SearcherBase, MovieTypeBase):
in_progress = False
def __init__(self):
super(MovieSearcher, self).__init__()
addEvent('movie.searcher.all', self.searchAll)
addEvent('movie.searcher.all_view', self.searchAllView)
addEvent('movie.searcher.single', self.single)
addEvent('movie.searcher.try_next_release', self.tryNextRelease)
addEvent('movie.searcher.could_be_released', self.couldBeReleased)
addEvent('searcher.correct_release', self.correctRelease)
addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
'params': {
'id': {'desc': 'The id of the movie'},
},
})
addApiView('movie.searcher.full_search', self.searchAllView, docs = {
'desc': 'Starts a full search for all wanted movies',
})
addApiView('movie.searcher.progress', self.getProgress, docs = {
'desc': 'Get the progress of current full search',
'return': {'type': 'object', 'example': """{
'progress': False || object, total & to_go,
}"""},
})
if self.conf('run_on_launch'):
addEvent('app.load', self.searchAll)
def searchAllView(self, **kwargs):
fireEventAsync('movie.searcher.all')
return {
'success': not self.in_progress
}
def searchAll(self):
if self.in_progress:
log.info('Search already in progress')
fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress')
return
self.in_progress = True
fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started')
db = get_session()
movies = db.query(Media).filter(
Media.status.has(identifier = 'active')
).all()
random.shuffle(movies)
self.in_progress = {
'total': len(movies),
'to_go': len(movies),
}
try:
search_protocols = fireEvent('searcher.protocols', single = True)
for movie in movies:
movie_dict = movie.to_dict({
'category': {},
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {},
})
try:
self.single(movie_dict, search_protocols)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
fireEvent('library.update.movie', movie_dict['library']['identifier'], force = True)
except:
log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
self.in_progress['to_go'] -= 1
# Break if CP wants to shut down
if self.shuttingDown():
break
except SearchSetupError:
pass
self.in_progress = False
def single(self, movie, search_protocols = None, manual = False):
# movies don't contain 'type' yet, so just set to default here
if not movie.has_key('type'):
movie['type'] = 'movie'
# Find out search type
try:
if not search_protocols:
search_protocols = fireEvent('searcher.protocols', single = True)
except SearchSetupError:
return
done_status = fireEvent('status.get', 'done', single = True)
if not movie['profile'] or (movie['status_id'] == done_status.get('id') and not manual):
log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.')
return
db = get_session()
pre_releases = fireEvent('quality.pre_releases', single = True)
release_dates = fireEvent('library.update.movie.release_date', identifier = movie['library']['identifier'], merge = True)
available_status, ignored_status, failed_status = fireEvent('status.get', ['available', 'ignored', 'failed'], single = True)
found_releases = []
too_early_to_search = []
default_title = getTitle(movie['library'])
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
fireEvent('media.delete', movie['id'], single = True)
return
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'id': movie['id']}, message = 'Searching for "%s"' % default_title)
ret = False
for quality_type in movie['profile']['types']:
if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates, movie['library']['year']):
too_early_to_search.append(quality_type['quality']['identifier'])
continue
has_better_quality = 0
# See if better quality is available
for release in movie['releases']:
if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id'), failed_status.get('id')]:
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality is 0:
log.info('Search for %s in %s', (default_title, quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
# Check if movie isn't deleted while searching
if not db.query(Media).filter_by(id = movie.get('id')).first():
break
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality_type, single = True)
# Try find a valid result and download it
if fireEvent('release.try_download_result', results, movie, quality_type, manual, single = True):
ret = True
# Remove releases that aren't found anymore
for release in movie.get('releases', []):
if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('id'), single = True)
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('media.restatus', movie['id'])
break
# Break if CP wants to shut down
if self.shuttingDown() or ret:
break
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'id': movie['id']})
return ret
def correctRelease(self, nzb = None, media = None, quality = None, **kwargs):
if media.get('type') != 'movie': return
media_title = fireEvent('library.title', media['library'], single = True)
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
return False
# Check for required and ignored words
if not fireEvent('searcher.correct_words', nzb['name'], media, single = True):
return False
preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
if fireEvent('searcher.contains_other_quality', nzb, movie_year = media['library']['year'], preferred_quality = preferred_quality, single = True):
log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label']))
return False
# File to small
if nzb['size'] and preferred_quality['size_min'] > nzb['size']:
log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min']))
return False
# File to large
if nzb['size'] and preferred_quality.get('size_max') < nzb['size']:
log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max']))
return False
# Provider specific functions
get_more = nzb.get('get_more_info')
if get_more:
get_more(nzb)
extra_check = nzb.get('extra_check')
if extra_check and not extra_check(nzb):
return False
if imdb_results:
return True
# Check if nzb contains imdb link
if getImdb(nzb.get('description', '')) == media['library']['identifier']:
return True
for raw_title in media['library']['titles']:
for movie_title in possibleTitles(raw_title['title']):
movie_words = re.split('\W+', simplifyString(movie_title))
if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 1, single = True):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 0, single = True):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['library']['year']))
return False
def couldBeReleased(self, is_pre_release, dates, year = None):
now = int(time.time())
now_year = date.today().year
if (year is None or year < now_year - 1) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)):
return True
else:
# Don't allow movies with years to far in the future
if year is not None and year > now_year + 1:
return False
# For movies before 1972
if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True
if is_pre_release:
# Prerelease 1 week before theaters
if dates.get('theater') - 604800 < now:
return True
else:
# 12 weeks after theater release
if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now:
return True
if dates.get('dvd') > 0:
# 4 weeks before dvd release
if dates.get('dvd') - 2419200 < now:
return True
# Dvd should be released
if dates.get('dvd') < now:
return True
return False
def tryNextReleaseView(self, id = None, **kwargs):
trynext = self.tryNextRelease(id, manual = True)
return {
'success': trynext
}
def tryNextRelease(self, media_id, manual = False):
snatched_status, done_status, ignored_status = fireEvent('status.get', ['snatched', 'done', 'ignored'], single = True)
try:
db = get_session()
rels = db.query(Release) \
.filter_by(media_id = media_id) \
.filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \
.all()
for rel in rels:
rel.status_id = ignored_status.get('id')
db.commit()
movie_dict = fireEvent('media.get', media_id = media_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict['library']))
fireEvent('movie.searcher.single', movie_dict, manual = manual)
return True
except:
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
class SearchSetupError(Exception):
pass

View File

@@ -0,0 +1,107 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media, Library
from couchpotato.environment import Env
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_
class Suggestion(Plugin):
def __init__(self):
addApiView('suggestion.view', self.suggestView)
addApiView('suggestion.ignore', self.ignoreView)
def suggestView(self, limit = 6, **kwargs):
movies = splitString(kwargs.get('movies', ''))
ignored = splitString(kwargs.get('ignored', ''))
seen = splitString(kwargs.get('seen', ''))
cached_suggestion = self.getCache('suggestion_cached')
if cached_suggestion:
suggestions = cached_suggestion
else:
if not movies or len(movies) == 0:
db = get_session()
active_movies = db.query(Media) \
.options(joinedload_all('library')) \
.filter(or_(*[Media.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
if not ignored or len(ignored) == 0:
ignored = splitString(Env.prop('suggest_ignore', default = ''))
if not seen or len(seen) == 0:
movies.extend(splitString(Env.prop('suggest_seen', default = '')))
suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks
return {
'success': True,
'count': len(suggestions),
'suggestions': suggestions[:int(limit)]
}
def ignoreView(self, imdb = None, limit = 6, remove_only = False, mark_seen = False, **kwargs):
ignored = splitString(Env.prop('suggest_ignore', default = ''))
seen = splitString(Env.prop('suggest_seen', default = ''))
new_suggestions = []
if imdb:
if mark_seen:
seen.append(imdb)
Env.prop('suggest_seen', ','.join(set(seen)))
elif not remove_only:
ignored.append(imdb)
Env.prop('suggest_ignore', ','.join(set(ignored)))
new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored, seen = seen)
return {
'result': True,
'ignore_count': len(ignored),
'suggestions': new_suggestions[limit - 1:limit]
}
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None):
# Combine with previous suggestion_cache
cached_suggestion = self.getCache('suggestion_cached') or []
new_suggestions = []
ignored = [] if not ignored else ignored
seen = [] if not seen else seen
if ignore_imdb:
for cs in cached_suggestion:
if cs.get('imdb') != ignore_imdb:
new_suggestions.append(cs)
# Get new results and add them
if len(new_suggestions) - 1 < limit:
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
db = get_session()
active_movies = db.query(Media) \
.join(Library) \
.with_entities(Library.identifier) \
.filter(Media.status_id.in_([active_status.get('id'), done_status.get('id')])).all()
movies = [x[0] for x in active_movies]
movies.extend(seen)
ignored.extend([x.get('imdb') for x in cached_suggestion])
suggestions = fireEvent('movie.suggest', movies = movies, ignore = list(set(ignored)), single = True)
if suggestions:
new_suggestions.extend(suggestions)
self.setCache('suggestion_cached', new_suggestions, timeout = 3024000)
return new_suggestions

View File

@@ -0,0 +1,160 @@
.suggestions {
}
.suggestions > h2 {
height: 40px;
}
.suggestions .media_result {
display: inline-block;
width: 33.333%;
height: 150px;
}
@media all and (max-width: 960px) {
.suggestions .media_result {
width: 50%;
}
}
@media all and (max-width: 600px) {
.suggestions .media_result {
width: 100%;
}
}
.suggestions .media_result .data {
left: 100px;
background: #4e5969;
border: none;
}
.suggestions .media_result .data .info {
top: 10px;
left: 15px;
right: 15px;
bottom: 10px;
overflow: hidden;
}
.suggestions .media_result .data .info h2 {
white-space: normal;
max-height: 120px;
font-size: 18px;
line-height: 18px;
}
.suggestions .media_result .data .info .rating,
.suggestions .media_result .data .info .genres,
.suggestions .media_result .data .info .year {
position: static;
display: block;
padding: 0;
opacity: .6;
}
.suggestions .media_result .data .info .year {
margin: 10px 0 0;
}
.suggestions .media_result .data .info .rating {
font-size: 20px;
float: right;
margin-top: -20px;
}
.suggestions .media_result .data .info .rating:before {
content: "\e031";
font-family: 'Elusive-Icons';
font-size: 14px;
margin: 0 5px 0 0;
vertical-align: bottom;
}
.suggestions .media_result .data .info .genres {
font-size: 11px;
font-style: italic;
text-align: right;
}
.suggestions .media_result .data .info .plot {
display: block;
font-size: 11px;
overflow: hidden;
text-align: justify;
height: 100%;
z-index: 2;
top: 64px;
position: absolute;
background: #4e5969;
cursor: pointer;
transition: all .4s ease-in-out;
padding: 0 3px 10px 0;
}
.suggestions .media_result .data:before {
bottom: 0;
content: '';
display: block;
height: 10px;
right: 0;
left: 0;
bottom: 10px;
position: absolute;
background: linear-gradient(
0deg,
rgba(78, 89, 105, 1) 0%,
rgba(78, 89, 105, 0) 100%
);
z-index: 3;
pointer-events: none;
}
.suggestions .media_result .data .info .plot.full {
top: 0;
overflow: auto;
}
.suggestions .media_result .data {
cursor: default;
}
.suggestions .media_result .options {
left: 100px;
}
.suggestions .media_result .options select[name=title] { width: 100%; }
.suggestions .media_result .options select[name=profile] { width: 100%; }
.suggestions .media_result .options select[name=category] { width: 100%; }
.suggestions .media_result .button {
position: absolute;
margin: 2px 0 0 0;
right: 15px;
bottom: 15px;
}
.suggestions .media_result .thumbnail {
width: 100px;
}
.suggestions .media_result .actions {
position: absolute;
top: 10px;
right: 10px;
display: none;
width: 140px;
}
.suggestions .media_result:hover .actions {
display: block;
}
.suggestions .media_result:hover h2 .title {
opacity: 0;
}
.suggestions .media_result .data.open .actions {
display: none;
}
.suggestions .media_result .actions a {
margin-left: 10px;
vertical-align: middle;
}

View File

@@ -0,0 +1,153 @@
var SuggestList = new Class({
Implements: [Options, Events],
initialize: function(options){
var self = this;
self.setOptions(options);
self.create();
},
create: function(){
var self = this;
self.el = new Element('div.suggestions', {
'events': {
'click:relay(a.delete)': function(e, el){
(e).stop();
$(el).getParent('.media_result').destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': el.get('data-ignore')
},
'onComplete': self.fill.bind(self)
});
},
'click:relay(a.eye-open)': function(e, el){
(e).stop();
$(el).getParent('.media_result').destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': el.get('data-seen'),
'mark_seen': 1
},
'onComplete': self.fill.bind(self)
});
}
}
}).grab(
new Element('h2', {
'text': 'You might like these'
})
);
self.api_request = Api.request('suggestion.view', {
'onComplete': self.fill.bind(self)
});
},
fill: function(json){
var self = this;
if(!json || json.count == 0){
self.el.hide();
}
else {
Object.each(json.suggestions, function(movie){
var m = new Block.Search.MovieItem(movie, {
'onAdded': function(){
self.afterAdded(m, movie)
}
});
m.data_container.grab(
new Element('div.actions').adopt(
new Element('a.add.icon2', {
'title': 'Add movie with your default quality',
'data-add': movie.imdb,
'events': {
'click': m.showOptions.bind(m)
}
}),
$(new MA.IMDB(m)),
$(new MA.Trailer(m, {
'height': 150
})),
new Element('a.delete.icon2', {
'title': 'Don\'t suggest this movie again',
'data-ignore': movie.imdb
}),
new Element('a.eye-open.icon2', {
'title': 'Seen it, like it, don\'t add',
'data-seen': movie.imdb
})
)
);
m.data_container.removeEvents('click');
var plot = false;
if(m.info.plot && m.info.plot.length > 0)
plot = m.info.plot;
// Add rating
m.info_container.adopt(
m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', {
'text': parseFloat(m.info.rating.imdb[0]),
'title': parseInt(m.info.rating.imdb[1]) + ' votes'
}) : null,
m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', {
'text': m.info.genres.slice(0, 3).join(', ')
}) : null,
m.plot = plot ? new Element('span.plot', {
'text': plot,
'events': {
'click': function(){
this.toggleClass('full')
}
}
}) : null
)
$(m).inject(self.el);
});
}
self.fireEvent('loaded');
},
afterAdded: function(m, movie){
var self = this;
setTimeout(function(){
$(m).destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': movie.imdb,
'remove_only': true
},
'onComplete': self.fill.bind(self)
});
}, 3000);
},
toElement: function(){
return this.el;
}
})

View File

@@ -0,0 +1,6 @@
from .main import ShowBase
def start():
return ShowBase()
config = []

View File

@@ -0,0 +1,239 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from couchpotato.core.settings.model import Media
import time
log = CPLog(__name__)
class ShowBase(MediaBase):
_type = 'show'
def __init__(self):
super(ShowBase, self).__init__()
addApiView('show.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addEvent('show.add', self.add)
def addView(self, **kwargs):
add_dict = self.add(params = kwargs)
return {
'success': True if add_dict else False,
'show': add_dict,
}
def add(self, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None):
"""
params
{'category_id': u'-1',
'identifier': u'tt1519931',
'profile_id': u'12',
'thetvdb_id': u'158661',
'title': u'Haven'}
"""
log.debug("show.add")
# Add show parent to db first; need to update library so maps will be in place (if any)
parent = self.addToDatabase(params = params, update_library = True, type = 'show')
# TODO: add by airdate
# Add by Season/Episode numbers
self.addBySeasonEpisode(parent,
params = params,
force_readd = force_readd,
search_after = search_after,
update_library = update_library,
status_id = status_id
)
def addBySeasonEpisode(self, parent, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None):
identifier = params.get('id')
# 'tvdb' will always be the master for our purpose. All mapped data can be mapped
# to another source for downloading, but it will always be remapped back to tvdb numbering
# when renamed so media can be used in media players that use tvdb for info provider
#
# This currently means the episode must actually exist in tvdb in order to be found but
# the numbering can be different
#master = 'tvdb'
#destination = 'scene'
#destination = 'anidb'
#destination = 'rage'
#destination = 'trakt'
# TODO: auto mode. if anime exists use it. if scene exists use it else use tvdb
# XXX: We should abort adding show, etc if either tvdb or xem is down or we will have incorrent mappings
# I think if tvdb gets error we wont have anydata anyway, but we must make sure XEM returns!!!!
# Only the master should return results here; all other info providers should just return False
# since we are just interested in the structure at this point.
seasons = fireEvent('season.info', merge = True, identifier = identifier)
if seasons is not None:
for season in seasons:
# Make sure we are only dealing with 'tvdb' responses at this point
if season.get('primary_provider', None) != 'thetvdb':
continue
season_id = season.get('id', None)
if season_id is None: continue
season_params = {'season_identifier': season_id}
# Calling all info providers; merge your info now for individual season
single_season = fireEvent('season.info', merge = True, identifier = identifier, params = season_params)
single_season['category_id'] = params.get('category_id')
single_season['profile_id'] = params.get('profile_id')
single_season['title'] = single_season.get('original_title', None)
single_season['identifier'] = season_id
single_season['parent_identifier'] = identifier
log.info("Adding Season %s" % season_id)
s = self.addToDatabase(params = single_season, type = "season")
episode_params = {'season_identifier': season_id}
episodes = fireEvent('episode.info', merge = True, identifier = identifier, params = episode_params)
if episodes is not None:
for episode in episodes:
# Make sure we are only dealing with 'tvdb' responses at this point
if episode.get('primary_provider', None) != 'thetvdb':
continue
episode_id = episode.get('id', None)
if episode_id is None: continue
try:
episode_number = int(episode.get('episodenumber', None))
except (ValueError, TypeError):
continue
try:
absolute_number = int(episode.get('absolute_number', None))
except (ValueError, TypeError):
absolute_number = None
episode_params = {'season_identifier': season_id,
'episode_identifier': episode_id,
'episode': episode_number}
if absolute_number:
episode_params['absolute'] = absolute_number
# Calling all info providers; merge your info now for individual episode
single_episode = fireEvent('episode.info', merge = True, identifier = identifier, params = episode_params)
single_episode['category_id'] = params.get('category_id')
single_episode['profile_id'] = params.get('profile_id')
single_episode['title'] = single_episode.get('original_title', None)
single_episode['identifier'] = episode_id
single_episode['parent_identifier'] = single_season['identifier']
log.info("Adding [%sx%s] %s - %s" % (season_id,
episode_number,
params['title'],
single_episode.get('original_title', '')))
e = self.addToDatabase(params = single_episode, type = "episode")
# Start searching now that all the media has been added
if search_after:
onComplete = self.createOnComplete(parent['id'])
onComplete()
return parent
def addToDatabase(self, params = {}, type = "show", force_readd = True, search_after = False, update_library = False, status_id = None):
log.debug("show.addToDatabase")
if not params.get('identifier'):
msg = 'Can\'t add show without imdb identifier.'
log.error(msg)
fireEvent('notify.frontend', type = 'show.is_tvshow', message = msg)
return False
#else:
#try:
#is_show = fireEvent('movie.is_show', identifier = params.get('identifier'), single = True)
#if not is_show:
#msg = 'Can\'t add show, seems to be a TV show.'
#log.error(msg)
#fireEvent('notify.frontend', type = 'show.is_tvshow', message = msg)
#return False
#except:
#pass
library = fireEvent('library.add.%s' % type, single = True, attrs = params, update_after = update_library)
if not library:
return False
# Status
status_active, snatched_status, ignored_status, done_status, downloaded_status = \
fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True)
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id', None)
db = get_session()
m = db.query(Media).filter_by(library_id = library.get('id')).first()
added = True
do_search = False
if not m:
m = Media(
type = type,
library_id = library.get('id'),
profile_id = params.get('profile_id', default_profile.get('id')),
status_id = status_id if status_id else status_active.get('id'),
category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None,
)
db.add(m)
db.commit()
onComplete = None
if search_after:
onComplete = self.createOnComplete(m.id)
fireEventAsync('library.update.%s' % type, params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete)
search_after = False
elif force_readd:
# Clean snatched history
for release in m.releases:
if release.status_id in [downloaded_status.get('id'), snatched_status.get('id'), done_status.get('id')]:
if params.get('ignore_previous', False):
release.status_id = ignored_status.get('id')
else:
fireEvent('release.delete', release.id, single = True)
m.profile_id = params.get('profile_id', default_profile.get('id'))
m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None
else:
log.debug('Show already exists, not updating: %s', params)
added = False
if force_readd:
m.status_id = status_id if status_id else status_active.get('id')
m.last_edit = int(time.time())
do_search = True
db.commit()
# Remove releases
available_status = fireEvent('status.get', 'available', single = True)
for rel in m.releases:
if rel.status_id is available_status.get('id'):
db.delete(rel)
db.commit()
show_dict = m.to_dict(self.default_dict)
if do_search and search_after:
onComplete = self.createOnComplete(m.id)
onComplete()
if added:
fireEvent('notify.frontend', type = 'show.added', data = show_dict, message = 'Successfully added "%s" to your wanted list.' % params.get('title', ''))
db.expire_all()
return show_dict

View File

@@ -0,0 +1,232 @@
Block.Search.ShowItem = new Class({
Implements: [Options, Events],
initialize: function(info, options){
var self = this;
self.setOptions(options);
self.info = info;
self.alternative_titles = [];
self.create();
},
create: function(){
var self = this,
info = self.info;
self.el = new Element('div.media_result', {
'id': info.id
}).adopt(
self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', {
'src': info.images.poster[0],
'height': null,
'width': null
}) : null,
self.options_el = new Element('div.options.inlay'),
self.data_container = new Element('div.data', {
'events': {
'click': self.showOptions.bind(self)
}
}).adopt(
self.info_container = new Element('div.info').adopt(
new Element('h2').adopt(
self.title = new Element('span.title', {
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
}),
self.year = info.year ? new Element('span.year', {
'text': info.year
}) : null
)
)
)
)
if(info.titles)
info.titles.each(function(title){
self.alternativeTitle({
'title': title
});
})
},
alternativeTitle: function(alternative){
var self = this;
self.alternative_titles.include(alternative);
},
getTitle: function(){
var self = this;
try {
return self.info.original_title ? self.info.original_title : self.info.titles[0];
}
catch(e){
return 'Unknown';
}
},
get: function(key){
return this.info[key]
},
showOptions: function(){
var self = this;
self.createOptions();
self.data_container.addClass('open');
self.el.addEvent('outerClick', self.closeOptions.bind(self))
},
closeOptions: function(){
var self = this;
self.data_container.removeClass('open');
self.el.removeEvents('outerClick')
},
add: function(e){
var self = this;
if(e)
(e).preventDefault();
self.loadingMask();
Api.request('show.add', {
'data': {
'identifier': self.info.id,
'id': self.info.id,
'type': self.info.type,
'primary_provider': self.info.primary_provider,
'title': self.title_select.get('value'),
'profile_id': self.profile_select.get('value'),
'category_id': self.category_select.get('value')
},
'onComplete': function(json){
self.options_el.empty();
self.options_el.adopt(
new Element('div.message', {
'text': json.added ? 'Show successfully added.' : 'Show didn\'t add properly. Check logs'
})
);
self.mask.fade('out');
self.fireEvent('added');
},
'onFailure': function(){
self.options_el.empty();
self.options_el.adopt(
new Element('div.message', {
'text': 'Something went wrong, check the logs for more info.'
})
);
self.mask.fade('out');
}
});
},
createOptions: function(){
var self = this,
info = self.info;
if(!self.options_el.hasClass('set')){
if(self.info.in_library){
var in_library = [];
self.info.in_library.releases.each(function(release){
in_library.include(release.quality.label)
});
}
self.options_el.grab(
new Element('div', {
'class': self.info.in_wanted && self.info.in_wanted.profile_id || in_library ? 'in_library_wanted' : ''
}).adopt(
self.info.in_wanted && self.info.in_wanted.profile_id ? new Element('span.in_wanted', {
'text': 'Already in wanted list: ' + Quality.getProfile(self.info.in_wanted.profile_id).get('label')
}) : (in_library ? new Element('span.in_library', {
'text': 'Already in library: ' + in_library.join(', ')
}) : null),
self.title_select = new Element('select', {
'name': 'title'
}),
self.profile_select = new Element('select', {
'name': 'profile'
}),
self.category_select = new Element('select', {
'name': 'category'
}).grab(
new Element('option', {'value': -1, 'text': 'None'})
),
self.add_button = new Element('a.button', {
'text': 'Add',
'events': {
'click': self.add.bind(self)
}
})
)
);
Array.each(self.alternative_titles, function(alt){
new Element('option', {
'text': alt.title
}).inject(self.title_select)
})
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
self.category_select.hide();
else {
self.category_select.show();
categories.each(function(category){
new Element('option', {
'value': category.data.id,
'text': category.data.label
}).inject(self.category_select);
});
}
// Fill profiles
var profiles = Quality.getActiveProfiles();
if(profiles.length == 1)
self.profile_select.hide();
profiles.each(function(profile){
new Element('option', {
'value': profile.id ? profile.id : profile.data.id,
'text': profile.label ? profile.label : profile.data.label
}).inject(self.profile_select)
});
self.options_el.addClass('set');
if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
!(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
self.add();
}
},
loadingMask: function(){
var self = this;
self.mask = new Element('div.mask').inject(self.el).fade('hide')
createSpinner(self.mask)
self.mask.fade('in')
},
toElement: function(){
return this.el
}
});

View File

@@ -0,0 +1,6 @@
from .main import EpisodeLibraryPlugin
def start():
return EpisodeLibraryPlugin()
config = []

View File

@@ -0,0 +1,266 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.settings.model import EpisodeLibrary, SeasonLibrary, LibraryTitle, File
from couchpotato.core.media._base.library.base import LibraryBase
from couchpotato.core.helpers.variable import tryInt
from string import ascii_letters
import time
import traceback
log = CPLog(__name__)
class EpisodeLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
addEvent('library.query', self.query)
addEvent('library.identifier', self.identifier)
addEvent('library.add.episode', self.add)
addEvent('library.update.episode', self.update)
addEvent('library.update.episode_release_date', self.updateReleaseDate)
def query(self, library, first = True, condense = True, include_identifier = True, **kwargs):
if library is list or library.get('type') != 'episode':
return
# Get the titles of the season
if not library.get('related_libraries', {}).get('season', []):
log.warning('Invalid library, unable to determine title.')
return
titles = fireEvent(
'library.query',
library['related_libraries']['season'][0],
first=False,
include_identifier=include_identifier,
condense=condense,
single=True
)
identifier = fireEvent('library.identifier', library, single = True)
# Add episode identifier to titles
if include_identifier and identifier.get('episode'):
titles = [title + ('E%02d' % identifier['episode']) for title in titles]
if first:
return titles[0] if titles else None
return titles
def identifier(self, library):
if library.get('type') != 'episode':
return
identifier = {
'season': None,
'episode': None
}
scene_map = library['info'].get('map_episode', {}).get('scene')
if scene_map:
# Use scene mappings if they are available
identifier['season'] = scene_map.get('season')
identifier['episode'] = scene_map.get('episode')
else:
# Fallback to normal season/episode numbers
identifier['season'] = library.get('season_number')
identifier['episode'] = library.get('episode_number')
# Cast identifiers to integers
# TODO this will need changing to support identifiers with trailing 'a', 'b' characters
identifier['season'] = tryInt(identifier['season'], None)
identifier['episode'] = tryInt(identifier['episode'], None)
return identifier
def add(self, attrs = {}, update_after = True):
type = attrs.get('type', 'episode')
primary_provider = attrs.get('primary_provider', 'thetvdb')
db = get_session()
parent_identifier = attrs.get('parent_identifier', None)
parent = None
if parent_identifier:
parent = db.query(SeasonLibrary).filter_by(primary_provider = primary_provider, identifier = attrs.get('parent_identifier')).first()
l = db.query(EpisodeLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = EpisodeLibrary(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {},
parent = parent,
season_number = tryInt(attrs.get('seasonnumber', None)),
episode_number = tryInt(attrs.get('episodenumber', None)),
absolute_number = tryInt(attrs.get('absolute_number', None))
)
title = LibraryTitle(
title = toUnicode(attrs.get('title')),
simple_title = self.simplifyTitle(attrs.get('title')),
)
l.titles.append(title)
db.add(l)
db.commit()
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update.episode', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
db.expire_all()
return library_dict
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(EpisodeLibrary).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
parent_identifier = None
if library.parent is not None:
parent_identifier = library.parent.identifier
if library.status_id == done_status.get('id') and not force:
do_update = False
episode_params = {'season_identifier': parent_identifier,
'episode_identifier': identifier,
'episode': library.episode_number,
'absolute': library.absolute_number,}
info = fireEvent('episode.info', merge = True, params = episode_params)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Main info
if do_update:
library.plot = toUnicode(info.get('plot', ''))
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.season_number = tryInt(info.get('seasonnumber', None))
library.episode_number = tryInt(info.get('episodenumber', None))
library.absolute_number = tryInt(info.get('absolute_number', None))
try:
library.last_updated = int(info.get('lastupdated'))
except:
library.last_updated = int(time.time())
library.info.update(info)
db.commit()
# Titles
[db.delete(title) for title in library.titles]
db.commit()
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
counter = 0
for title in titles:
if not title:
continue
title = toUnicode(title)
t = LibraryTitle(
title = title,
simple_title = self.simplifyTitle(title),
default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
)
library.titles.append(t)
counter += 1
db.commit()
# Files
images = info.get('images', [])
for image_type in ['poster']:
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
try:
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
library.files.append(file_obj)
db.commit()
break
except:
log.debug('Failed to attach to library: %s', traceback.format_exc())
library_dict = library.to_dict(self.default_dict)
db.expire_all()
return library_dict
def updateReleaseDate(self, identifier):
'''XXX: Not sure what this is for yet in relation to an episode'''
pass
#db = get_session()
#library = db.query(EpisodeLibrary).filter_by(identifier = identifier).first()
#if not library.info:
#library_dict = self.update(identifier, force = True)
#dates = library_dict.get('info', {}).get('release_date')
#else:
#dates = library.info.get('release_date')
#if dates and dates.get('expires', 0) < time.time() or not dates:
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
#library.info.update({'release_date': dates })
#db.commit()
#db.expire_all()
#return dates
#TODO: Add to base class
def simplifyTitle(self, title):
title = toUnicode(title)
nr_prefix = '' if title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return nr_prefix + title

View File

@@ -0,0 +1,6 @@
from .main import SeasonLibraryPlugin
def start():
return SeasonLibraryPlugin()
config = []

View File

@@ -0,0 +1,242 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.settings.model import SeasonLibrary, ShowLibrary, LibraryTitle, File
from couchpotato.core.media._base.library.base import LibraryBase
from couchpotato.core.helpers.variable import tryInt
from string import ascii_letters
import time
import traceback
log = CPLog(__name__)
class SeasonLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
addEvent('library.query', self.query)
addEvent('library.identifier', self.identifier)
addEvent('library.add.season', self.add)
addEvent('library.update.season', self.update)
addEvent('library.update.season_release_date', self.updateReleaseDate)
def query(self, library, first = True, condense = True, include_identifier = True, **kwargs):
if library is list or library.get('type') != 'season':
return
# Get the titles of the show
if not library.get('related_libraries', {}).get('show', []):
log.warning('Invalid library, unable to determine title.')
return
titles = fireEvent(
'library.query',
library['related_libraries']['show'][0],
first=False,
condense=condense,
single=True
)
# Add season map_names if they exist
if 'map_names' in library['info']:
season_names = library['info']['map_names'].get(str(library['season_number']), {})
# Add titles from all locations
# TODO only add name maps from a specific location
for location, names in season_names.items():
titles += [name for name in names if name and name not in titles]
identifier = fireEvent('library.identifier', library, single = True)
# Add season identifier to titles
if include_identifier and identifier.get('season') is not None:
titles = [title + (' S%02d' % identifier['season']) for title in titles]
if first:
return titles[0] if titles else None
return titles
def identifier(self, library):
if library.get('type') != 'season':
return
return {
'season': tryInt(library['season_number'], None)
}
def add(self, attrs = {}, update_after = True):
type = attrs.get('type', 'season')
primary_provider = attrs.get('primary_provider', 'thetvdb')
db = get_session()
parent_identifier = attrs.get('parent_identifier', None)
parent = None
if parent_identifier:
parent = db.query(ShowLibrary).filter_by(primary_provider = primary_provider, identifier = attrs.get('parent_identifier')).first()
l = db.query(SeasonLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = SeasonLibrary(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {},
parent = parent,
)
title = LibraryTitle(
title = toUnicode(attrs.get('title')),
simple_title = self.simplifyTitle(attrs.get('title')),
)
l.titles.append(title)
db.add(l)
db.commit()
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update.season', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
db.expire_all()
return library_dict
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(SeasonLibrary).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
parent_identifier = None
if library.parent is not None:
parent_identifier = library.parent.identifier
if library.status_id == done_status.get('id') and not force:
do_update = False
season_params = {'season_identifier': identifier}
info = fireEvent('season.info', merge = True, identifier = parent_identifier, params = season_params)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Main info
if do_update:
library.plot = toUnicode(info.get('plot', ''))
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.season_number = tryInt(info.get('seasonnumber', None))
library.info.update(info)
db.commit()
# Titles
[db.delete(title) for title in library.titles]
db.commit()
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
counter = 0
for title in titles:
if not title:
continue
title = toUnicode(title)
t = LibraryTitle(
title = title,
simple_title = self.simplifyTitle(title),
# XXX: default was None; so added a quick hack since we don't really need titiles for seasons anyway
#default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
default = True,
)
library.titles.append(t)
counter += 1
db.commit()
# Files
images = info.get('images', [])
for image_type in ['poster']:
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
try:
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
library.files.append(file_obj)
db.commit()
break
except:
log.debug('Failed to attach to library: %s', traceback.format_exc())
library_dict = library.to_dict(self.default_dict)
db.expire_all()
return library_dict
def updateReleaseDate(self, identifier):
'''XXX: Not sure what this is for yet in relation to a tvshow'''
pass
#db = get_session()
#library = db.query(SeasonLibrary).filter_by(identifier = identifier).first()
#if not library.info:
#library_dict = self.update(identifier, force = True)
#dates = library_dict.get('info', {}).get('release_date')
#else:
#dates = library.info.get('release_date')
#if dates and dates.get('expires', 0) < time.time() or not dates:
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
#library.info.update({'release_date': dates })
#db.commit()
#db.expire_all()
#return dates
#TODO: Add to base class
def simplifyTitle(self, title):
title = toUnicode(title)
nr_prefix = '' if title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return nr_prefix + title

View File

@@ -0,0 +1,6 @@
from .main import ShowLibraryPlugin
def start():
return ShowLibraryPlugin()
config = []

View File

@@ -0,0 +1,229 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.settings.model import ShowLibrary, LibraryTitle, File
from couchpotato.core.media._base.library.base import LibraryBase
from qcond.helpers import simplify
from qcond import QueryCondenser
from string import ascii_letters
import time
import traceback
log = CPLog(__name__)
class ShowLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
self.query_condenser = QueryCondenser()
addEvent('library.query', self.query)
addEvent('library.add.show', self.add)
addEvent('library.update.show', self.update)
addEvent('library.update.show_release_date', self.updateReleaseDate)
def query(self, library, first = True, condense = True, **kwargs):
if library is list or library.get('type') != 'show':
return
titles = [title['title'] for title in library['titles']]
if condense:
# Use QueryCondenser to build a list of optimal search titles
condensed_titles = self.query_condenser.distinct(titles)
if condensed_titles:
# Use condensed titles if we got a valid result
titles = condensed_titles
else:
# Fallback to simplifying titles
titles = [simplify(title) for title in titles]
if first:
return titles[0] if titles else None
return titles
def add(self, attrs = {}, update_after = True):
type = attrs.get('type', 'show')
primary_provider = attrs.get('primary_provider', 'thetvdb')
db = get_session()
l = db.query(ShowLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = ShowLibrary(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {},
parent = None,
)
title = LibraryTitle(
title = toUnicode(attrs.get('title')),
simple_title = self.simplifyTitle(attrs.get('title')),
)
l.titles.append(title)
db.add(l)
db.commit()
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update.show', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
db.expire_all()
return library_dict
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(ShowLibrary).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
info = fireEvent('show.info', merge = True, identifier = identifier)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no show info to work with: %s', identifier)
return False
# Main info
if do_update:
library.plot = toUnicode(info.get('plot', ''))
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.show_status = toUnicode(info.get('status', '').lower())
library.airs_time = info.get('airs_time', None)
# Bits
days_of_week_map = {
u'Monday': 1,
u'Tuesday': 2,
u'Wednesday': 4,
u'Thursday': 8,
u'Friday': 16,
u'Saturday': 32,
u'Sunday': 64,
u'Daily': 127,
}
try:
library.airs_dayofweek = days_of_week_map.get(info.get('airs_dayofweek'))
except:
library.airs_dayofweek = 0
try:
library.last_updated = int(info.get('lastupdated'))
except:
library.last_updated = int(time.time())
library.info.update(info)
db.commit()
# Titles
[db.delete(title) for title in library.titles]
db.commit()
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
counter = 0
for title in titles:
if not title:
continue
title = toUnicode(title)
t = LibraryTitle(
title = title,
simple_title = self.simplifyTitle(title),
default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
)
library.titles.append(t)
counter += 1
db.commit()
# Files
images = info.get('images', [])
for image_type in ['poster']:
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
try:
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
library.files.append(file_obj)
db.commit()
break
except:
log.debug('Failed to attach to library: %s', traceback.format_exc())
library_dict = library.to_dict(self.default_dict)
db.expire_all()
return library_dict
def updateReleaseDate(self, identifier):
'''XXX: Not sure what this is for yet in relation to a show'''
pass
#db = get_session()
#library = db.query(ShowLibrary).filter_by(identifier = identifier).first()
#if not library.info:
#library_dict = self.update(identifier, force = True)
#dates = library_dict.get('info', {}).get('release_date')
#else:
#dates = library.info.get('release_date')
#if dates and dates.get('expires', 0) < time.time() or not dates:
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
#library.info.update({'release_date': dates })
#db.commit()
#db.expire_all()
#return dates
#TODO: Add to base class
def simplifyTitle(self, title):
title = toUnicode(title)
nr_prefix = '' if title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return nr_prefix + title

View File

@@ -0,0 +1,6 @@
from .main import ShowMatcher
def start():
return ShowMatcher()
config = []

View File

@@ -0,0 +1,127 @@
from couchpotato import CPLog
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import dictIsSubset, tryInt, toIterable
from couchpotato.core.media._base.matcher.base import MatcherBase
from couchpotato.core.providers.base import MultiProvider
log = CPLog(__name__)
class ShowMatcher(MultiProvider):
def getTypes(self):
return [Season, Episode]
class Base(MatcherBase):
# TODO come back to this later, think this could be handled better, this is starting to get out of hand....
quality_map = {
'bluray_1080p': {'resolution': ['1080p'], 'source': ['bluray']},
'bluray_720p': {'resolution': ['720p'], 'source': ['bluray']},
'bdrip_1080p': {'resolution': ['1080p'], 'source': ['BDRip']},
'bdrip_720p': {'resolution': ['720p'], 'source': ['BDRip']},
'brrip_1080p': {'resolution': ['1080p'], 'source': ['BRRip']},
'brrip_720p': {'resolution': ['720p'], 'source': ['BRRip']},
'webdl_1080p': {'resolution': ['1080p'], 'source': ['webdl', ['web', 'dl']]},
'webdl_720p': {'resolution': ['720p'], 'source': ['webdl', ['web', 'dl']]},
'webdl_480p': {'resolution': ['480p'], 'source': ['webdl', ['web', 'dl']]},
'hdtv_720p': {'resolution': ['720p'], 'source': ['hdtv']},
'hdtv_sd': {'resolution': ['480p', None], 'source': ['hdtv']},
}
def __init__(self):
super(Base, self).__init__()
addEvent('%s.matcher.correct_identifier' % self.type, self.correctIdentifier)
def correct(self, chain, release, media, quality):
log.info("Checking if '%s' is valid", release['name'])
log.info2('Release parsed as: %s', chain.info)
if not fireEvent('matcher.correct_quality', chain, quality, self.quality_map, single = True):
log.info('Wrong: %s, quality does not match', release['name'])
return False
if not fireEvent('%s.matcher.correct_identifier' % self.type, chain, media):
log.info('Wrong: %s, identifier does not match', release['name'])
return False
if not fireEvent('matcher.correct_title', chain, media):
log.info("Wrong: '%s', undetermined naming.", (' '.join(chain.info['show_name'])))
return False
return True
def correctIdentifier(self, chain, media):
raise NotImplementedError()
def getChainIdentifier(self, chain):
if 'identifier' not in chain.info:
return None
identifier = self.flattenInfo(chain.info['identifier'])
# Try cast values to integers
for key, value in identifier.items():
if isinstance(value, list):
if len(value) <= 1:
value = value[0]
else:
log.warning('Wrong: identifier contains multiple season or episode values, unsupported')
return None
identifier[key] = tryInt(value, value)
return identifier
class Episode(Base):
type = 'episode'
def correctIdentifier(self, chain, media):
identifier = self.getChainIdentifier(chain)
if not identifier:
log.info2('Wrong: release identifier is not valid (unsupported or missing identifier)')
return False
# TODO - Parse episode ranges from identifier to determine if they are multi-part episodes
if any([x in identifier for x in ['episode_from', 'episode_to']]):
log.info2('Wrong: releases with identifier ranges are not supported yet')
return False
required = fireEvent('library.identifier', media['library'], single = True)
# TODO - Support air by date episodes
# TODO - Support episode parts
if identifier != required:
log.info2('Wrong: required identifier (%s) does not match release identifier (%s)', (required, identifier))
return False
return True
class Season(Base):
type = 'season'
def correctIdentifier(self, chain, media):
identifier = self.getChainIdentifier(chain)
if not identifier:
log.info2('Wrong: release identifier is not valid (unsupported or missing identifier)')
return False
# TODO - Parse episode ranges from identifier to determine if they are season packs
if any([x in identifier for x in ['episode_from', 'episode_to']]):
log.info2('Wrong: releases with identifier ranges are not supported yet')
return False
required = fireEvent('library.identifier', media['library'], single = True)
if identifier != required:
log.info2('Wrong: required identifier (%s) does not match release identifier (%s)', (required, identifier))
return False
return True

View File

@@ -0,0 +1,7 @@
from .main import ShowSearcher
import random
def start():
return ShowSearcher()
config = []

View File

@@ -0,0 +1,189 @@
from couchpotato import Env, get_session
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import getTitle, toIterable
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.main import SearchSetupError
from couchpotato.core.media.show._base import ShowBase
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media
from qcond import QueryCondenser
from qcond.helpers import simplify
log = CPLog(__name__)
class ShowSearcher(Plugin):
type = ['show', 'season', 'episode']
in_progress = False
def __init__(self):
super(ShowSearcher, self).__init__()
self.query_condenser = QueryCondenser()
for type in toIterable(self.type):
addEvent('%s.searcher.single' % type, self.single)
addEvent('searcher.correct_release', self.correctRelease)
def single(self, media, search_protocols = None, manual = False):
show, season, episode = self.getLibraries(media['library'])
db = get_session()
if media['type'] == 'show':
for library in season:
# TODO ideally we shouldn't need to fetch the media for each season library here
m = db.query(Media).filter_by(library_id = library['library_id']).first()
fireEvent('season.searcher.single', m.to_dict(ShowBase.search_dict))
return
# Find out search type
try:
if not search_protocols:
search_protocols = fireEvent('searcher.protocols', single = True)
except SearchSetupError:
return
done_status, available_status, ignored_status, failed_status = fireEvent('status.get', ['done', 'available', 'ignored', 'failed'], single = True)
if not media['profile'] or media['status_id'] == done_status.get('id'):
log.debug('Episode doesn\'t have a profile or already done, assuming in manage tab.')
return
#pre_releases = fireEvent('quality.pre_releases', single = True)
found_releases = []
too_early_to_search = []
default_title = fireEvent('library.query', media['library'], condense = False, single=True)
if not default_title:
log.error('No proper info found for episode, removing it from library to cause it from having more issues.')
#fireEvent('episode.delete', episode['id'], single = True)
return
if not show or not season:
log.error('Unable to find show or season library in database, missing required data for searching')
return
fireEvent('notify.frontend', type = 'show.searcher.started.%s' % media['id'], data = True, message = 'Searching for "%s"' % default_title)
ret = False
has_better_quality = None
for quality_type in media['profile']['types']:
# TODO check air date?
#if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates, movie['library']['year']):
# too_early_to_search.append(quality_type['quality']['identifier'])
# continue
has_better_quality = 0
# See if better quality is available
for release in media['releases']:
if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id'), failed_status.get('id')]:
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality is 0:
log.info('Search for %s S%02d%s in %s', (
getTitle(show),
season['season_number'],
"E%02d" % episode['episode_number'] if episode and len(episode) == 1 else "",
quality_type['quality']['label'])
)
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = fireEvent('searcher.search', search_protocols, media, quality, single = True)
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
# Check if movie isn't deleted while searching
if not db.query(Media).filter_by(id = media.get('id')).first():
break
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, media, quality_type, single = True)
# Try find a valid result and download it
if fireEvent('release.try_download_result', results, media, quality_type, manual, single = True):
ret = True
# Remove releases that aren't found anymore
for release in media.get('releases', []):
if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('id'), single = True)
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('media.restatus', media['id'])
break
# Break if CP wants to shut down
if self.shuttingDown() or ret:
break
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
elif media['type'] == 'season' and not ret and has_better_quality is 0:
# If nothing was found, start searching for episodes individually
log.info('No season pack found, starting individual episode search')
for library in episode:
# TODO ideally we shouldn't need to fetch the media for each episode library here
m = db.query(Media).filter_by(library_id = library['library_id']).first()
fireEvent('episode.searcher.single', m.to_dict(ShowBase.search_dict))
fireEvent('notify.frontend', type = 'show.searcher.ended.%s' % media['id'], data = True)
return ret
def correctRelease(self, release = None, media = None, quality = None, **kwargs):
if media.get('type') not in ['season', 'episode']: return
retention = Env.setting('retention', section = 'nzb')
if release.get('seeders') is None and 0 < retention < release.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (release['age'], retention, release['name']))
return False
# Check for required and ignored words
if not fireEvent('searcher.correct_words', release['name'], media, single = True):
return False
# TODO Matching is quite costly, maybe we should be caching release matches somehow? (also look at caper optimizations)
match = fireEvent('matcher.match', release, media, quality, single = True)
if match:
return match.weight
return False
def getLibraries(self, library):
if 'related_libraries' not in library:
log.warning("'related_libraries' missing from media library, unable to continue searching")
return None, None, None
libraries = library['related_libraries']
# Show always collapses as there can never be any multiples
show = libraries.get('show', [])
show = show[0] if len(show) else None
# Season collapses if the subject is a season or episode
season = libraries.get('season', [])
if library['type'] in ['season', 'episode']:
season = season[0] if len(season) else None
# Episode collapses if the subject is a episode
episode = libraries.get('episode', [])
if library['type'] == 'episode':
episode = episode[0] if len(episode) else None
return show, season, episode

View File

@@ -0,0 +1,17 @@
from migrate.changeset.schema import create_column
from sqlalchemy.schema import MetaData, Column, Table, Index
from sqlalchemy.types import Integer
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
category_column = Column('category_id', Integer)
movie = Table('movie', meta, category_column)
create_column(category_column, movie)
Index('ix_movie_category_id', movie.c.category_id).create()
def downgrade(migrate_engine):
pass

View File

@@ -1,4 +1,4 @@
config = {
config = [{
'name': 'notification_providers',
'groups': [
{
@@ -10,4 +10,4 @@ config = {
'options': [],
},
],
}
}]

View File

@@ -1,6 +1,5 @@
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.request import jsonified
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import Provider
from couchpotato.environment import Env
@@ -18,7 +17,7 @@ class Notification(Provider):
listen_to = [
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message',
'core.message.important',
]
dont_listen_to = []
@@ -33,7 +32,9 @@ class Notification(Provider):
addEvent(listener, self.createNotifyHandler(listener))
def createNotifyHandler(self, listener):
def notify(message = None, group = {}, data = None):
def notify(message = None, group = None, data = None):
if not group: group = {}
if not self.conf('on_snatch', default = True) and listener == 'movie.snatched':
return
return self._notify(message = message, data = data if data else group, listener = listener)
@@ -46,11 +47,12 @@ class Notification(Provider):
def _notify(self, *args, **kwargs):
if self.isEnabled():
return self.notify(*args, **kwargs)
return False
def notify(self, message = '', data = {}, listener = None):
pass
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
def test(self):
def test(self, **kwargs):
test_type = self.testNotifyName()
@@ -62,7 +64,9 @@ class Notification(Provider):
listener = 'test'
)
return jsonified({'success': success})
return {
'success': success
}
def testNotifyName(self):
return 'notify.%s.test' % self.getName().lower()

View File

@@ -10,19 +10,20 @@ class Boxcar(Notification):
url = 'https://boxcar.io/devices/providers/7MNNXY3UIzVBwvzkKwkC/notifications'
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
message = message.strip()
params = {
data = {
'email': self.conf('email'),
'notification[from_screen_name]': self.default_title,
'notification[message]': toUnicode(message),
'notification[from_remote_service_id]': int(time.time()),
}
self.urlopen(self.url, params = params)
self.urlopen(self.url, data = data)
except:
log.error('Check your email and added services on boxcar.io')
return False

View File

@@ -2,15 +2,16 @@ from couchpotato import get_session
from couchpotato.api import addApiView, addNonBlockApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.request import jsonified, getParam
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.core.settings.model import Notification as Notif
from couchpotato.environment import Env
from operator import itemgetter
from sqlalchemy.sql.expression import or_
import threading
import time
import traceback
import uuid
log = CPLog(__name__)
@@ -18,9 +19,13 @@ log = CPLog(__name__)
class CoreNotifier(Notification):
m_lock = threading.Lock()
messages = []
listeners = []
m_lock = None
listen_to = [
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message', 'core.message.important',
]
def __init__(self):
super(CoreNotifier, self).__init__()
@@ -51,10 +56,15 @@ class CoreNotifier(Notification):
addApiView('notification.listener', self.listener)
fireEvent('schedule.interval', 'core.check_messages', self.checkMessages, hours = 12, single = True)
fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds = 15, single = True)
addEvent('app.load', self.clean)
addEvent('app.load', self.checkMessages)
self.messages = []
self.listeners = []
self.m_lock = threading.Lock()
def clean(self):
db = get_session()
@@ -62,11 +72,9 @@ class CoreNotifier(Notification):
db.commit()
def markAsRead(self):
def markAsRead(self, ids = None, **kwargs):
ids = None
if getParam('ids'):
ids = splitString(getParam('ids'))
ids = splitString(ids) if ids else None
db = get_session()
@@ -79,14 +87,13 @@ class CoreNotifier(Notification):
db.commit()
return jsonified({
return {
'success': True
})
}
def listView(self):
def listView(self, limit_offset = None, **kwargs):
db = get_session()
limit_offset = getParam('limit_offset', None)
q = db.query(Notif)
@@ -105,29 +112,33 @@ class CoreNotifier(Notification):
ndict['type'] = 'notification'
notifications.append(ndict)
return jsonified({
return {
'success': True,
'empty': len(notifications) == 0,
'notifications': notifications
})
}
def checkMessages(self):
prop_name = 'messages.last_check'
last_check = tryInt(Env.prop(prop_name, default = 0))
messages = fireEvent('cp.messages', last_check = last_check, single = True)
messages = fireEvent('cp.messages', last_check = last_check, single = True) or []
for message in messages:
if message.get('time') > last_check:
fireEvent('core.message', message = message.get('message'), data = message)
message['sticky'] = True # Always sticky core messages
message_type = 'core.message.important' if message.get('important') else 'core.message'
fireEvent(message_type, message = message.get('message'), data = message)
if last_check < message.get('time'):
last_check = message.get('time')
Env.prop(prop_name, value = last_check)
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
db = get_session()
@@ -148,7 +159,10 @@ class CoreNotifier(Notification):
return True
def frontend(self, type = 'notification', data = {}, message = None):
def frontend(self, type = 'notification', data = None, message = None):
if not data: data = {}
log.debug('Notifying frontend')
self.m_lock.acquire()
notification = {
@@ -168,10 +182,12 @@ class CoreNotifier(Notification):
'result': [notification],
})
except:
break
log.debug('Failed sending to listener: %s', traceback.format_exc())
self.listeners = []
self.m_lock.release()
self.cleanMessages()
log.debug('Done notifying frontend')
def addListener(self, callback, last_id = None):
@@ -183,59 +199,75 @@ class CoreNotifier(Notification):
'result': messages,
})
self.m_lock.acquire()
self.listeners.append((callback, last_id))
self.m_lock.release()
def removeListener(self, callback):
self.m_lock.acquire()
new_listeners = []
for list_tuple in self.listeners:
try:
listener, last_id = list_tuple
if listener == callback:
self.listeners.remove(list_tuple)
if listener != callback:
new_listeners.append(list_tuple)
except:
pass
def cleanMessages(self):
self.m_lock.acquire()
for message in self.messages:
if message['time'] < (time.time() - 15):
self.messages.remove(message)
log.debug('Failed removing listener: %s', traceback.format_exc())
self.listeners = new_listeners
self.m_lock.release()
def cleanMessages(self):
if len(self.messages) == 0:
return
log.debug('Cleaning messages')
self.m_lock.acquire()
time_ago = (time.time() - 15)
self.messages[:] = [m for m in self.messages if (m['time'] > time_ago)]
self.m_lock.release()
log.debug('Done cleaning messages')
def getMessages(self, last_id):
log.debug('Getting messages with id: %s', last_id)
self.m_lock.acquire()
recent = []
index = 0
for i in xrange(len(self.messages)):
index = len(self.messages) - i - 1
if self.messages[index]["message_id"] == last_id: break
recent = self.messages[index:]
try:
index = map(itemgetter('message_id'), self.messages).index(last_id)
recent = self.messages[index + 1:]
except:
pass
self.m_lock.release()
log.debug('Returning for %s %s messages', (last_id, len(recent)))
return recent or []
return recent
def listener(self):
def listener(self, init = False, **kwargs):
messages = []
# Get unread
if getParam('init'):
if init:
db = get_session()
notifications = db.query(Notif) \
.filter(or_(Notif.read == False, Notif.added > (time.time() - 259200))) \
.all()
for n in notifications:
ndict = n.to_dict()
ndict['type'] = 'notification'
messages.append(ndict)
return jsonified({
return {
'success': True,
'result': messages,
})
}

View File

@@ -10,8 +10,8 @@ var NotificationBase = new Class({
// Listener
App.addEvent('unload', self.stopPoll.bind(self));
App.addEvent('reload', self.startInterval.bind(self, [true]));
App.addEvent('notification', self.notify.bind(self));
App.addEvent('message', self.showMessage.bind(self));
App.on('notification', self.notify.bind(self));
App.on('message', self.showMessage.bind(self));
// Add test buttons to settings page
App.addEvent('load', self.addTestButtons.bind(self));
@@ -31,8 +31,8 @@ var NotificationBase = new Class({
});
window.addEvent('load', function(){
self.startInterval.delay($(window).getSize().x <= 480 ? 2000 : 300, self)
});
self.startInterval.delay($(window).getSize().x <= 480 ? 2000 : 100, self);
})
},
@@ -50,9 +50,9 @@ var NotificationBase = new Class({
, 'top');
self.notifications.include(result);
if(result.data.important !== undefined && !result.read){
if((result.data.important !== undefined || result.data.sticky !== undefined) && !result.read){
var sticky = true
App.fireEvent('message', [result.message, sticky, result])
App.trigger('message', [result.message, sticky, result])
}
else if(!result.read){
self.setBadge(self.notifications.filter(function(n){ return !n.read}).length)
@@ -147,7 +147,7 @@ var NotificationBase = new Class({
// Process data
if(json){
Array.each(json.result, function(result){
App.fireEvent(result.type, result);
App.trigger(result.type, result);
if(result.message && result.read === undefined)
self.showMessage(result.message);
})
@@ -157,7 +157,7 @@ var NotificationBase = new Class({
}
// Restart poll
self.startPoll()
self.startPoll.delay(1500, self);
},
showMessage: function(message, sticky, data){

View File

@@ -28,12 +28,23 @@ config = [{
'name': 'smtp_server',
'label': 'SMTP server',
},
{ 'name': 'smtp_port',
'label': 'SMTP server port',
'default': '25',
'type': 'int',
},
{
'name': 'ssl',
'label': 'Enable SSL',
'default': 0,
'type': 'bool',
},
{
'name': 'starttls',
'label': 'Enable StartTLS',
'default': 0,
'type': 'bool',
},
{
'name': 'smtp_user',
'label': 'SMTP user',

View File

@@ -2,7 +2,9 @@ from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.environment import Env
from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
import smtplib
import traceback
@@ -11,7 +13,8 @@ log = CPLog(__name__)
class Email(Notification):
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
# Extract all the settings from settings
from_address = self.conf('from')
@@ -20,18 +23,30 @@ class Email(Notification):
smtp_server = self.conf('smtp_server')
smtp_user = self.conf('smtp_user')
smtp_pass = self.conf('smtp_pass')
smtp_port = self.conf('smtp_port')
starttls = self.conf('starttls')
# Make the basic message
message = MIMEText(toUnicode(message))
message = MIMEText(toUnicode(message), _charset = Env.get('encoding'))
message['Subject'] = self.default_title
message['From'] = from_address
message['To'] = to_address
message['Date'] = formatdate(localtime = 1)
message['Message-ID'] = make_msgid()
try:
# Open the SMTP connection, via SSL if requested
log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port))
log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled"))
mailserver = smtplib.SMTP_SSL(smtp_server) if ssl == 1 else smtplib.SMTP(smtp_server)
if (starttls):
log.debug("Using StartTLS to initiate the connection with the SMTP server")
mailserver.starttls()
# Say hello to the server
mailserver.ehlo()
# Check too see if an login attempt should be attempted
if len(smtp_user) > 0:
log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else ""))

View File

@@ -43,7 +43,8 @@ class Growl(Notification):
else:
log.error('Failed register of growl: %s', traceback.format_exc())
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
self.register()

View File

@@ -1,7 +1,6 @@
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.request import getParams, jsonified
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import re
@@ -22,21 +21,17 @@ class NMJ(Notification):
addApiView(self.testNotifyName(), self.test)
addApiView('notify.nmj.auto_config', self.autoConfig)
def autoConfig(self):
def autoConfig(self, host = 'localhost', **kwargs):
params = getParams()
host = params.get('host', 'localhost')
database = ''
mount = ''
try:
terminal = telnetlib.Telnet(host)
except Exception:
log.error('Warning: unable to get a telnet session to %s', (host))
log.error('Warning: unable to get a telnet session to %s', host)
return self.failed()
log.debug('Connected to %s via telnet', (host))
log.debug('Connected to %s via telnet', host)
terminal.read_until('sh-3.00# ')
terminal.write('cat /tmp/source\n')
terminal.write('cat /tmp/netshare\n')
@@ -50,7 +45,7 @@ class NMJ(Notification):
device = match.group(2)
log.info('Found NMJ database %s on device %s', (database, device))
else:
log.error('Could not get current NMJ database on %s, NMJ is probably not running!', (host))
log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host)
return self.failed()
if device.startswith('NETWORK_SHARE/'):
@@ -58,28 +53,29 @@ class NMJ(Notification):
if match:
mount = match.group().replace('127.0.0.1', host)
log.info('Found mounting url on the Popcorn Hour in configuration: %s', (mount))
log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount)
else:
log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url')
return self.failed()
return jsonified({
return {
'success': True,
'database': database,
'mount': mount,
})
}
def addToLibrary(self, message = None, group = {}):
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
host = self.conf('host')
mount = self.conf('mount')
database = self.conf('database')
if mount:
log.debug('Try to mount network drive via url: %s', (mount))
log.debug('Try to mount network drive via url: %s', mount)
try:
data = self.urlopen(mount)
self.urlopen(mount)
except:
return False
@@ -102,20 +98,24 @@ class NMJ(Notification):
et = etree.fromstring(response)
result = et.findtext('returnValue')
except SyntaxError, e:
log.error('Unable to parse XML returned from the Popcorn Hour: %s', (e))
log.error('Unable to parse XML returned from the Popcorn Hour: %s', e)
return False
if int(result) > 0:
log.error('Popcorn Hour returned an errorcode: %s', (result))
log.error('Popcorn Hour returned an errorcode: %s', result)
return False
else:
log.info('NMJ started background scan')
return True
def failed(self):
return jsonified({'success': False})
return {
'success': False
}
def test(self):
return jsonified({'success': self.addToLibrary()})
def test(self, **kwargs):
return {
'success': self.addToLibrary()
}

View File

@@ -1,38 +0,0 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from flask.helpers import json
import base64
import traceback
log = CPLog(__name__)
class Notifo(Notification):
url = 'https://api.notifo.com/v1/send_notification'
def notify(self, message = '', data = {}, listener = None):
try:
params = {
'label': self.default_title,
'msg': toUnicode(message),
}
headers = {
'Authorization': "Basic %s" % base64.encodestring('%s:%s' % (self.conf('username'), self.conf('api_key')))[:-1]
}
handle = self.urlopen(self.url, params = params, headers = headers)
result = json.loads(handle)
if result['status'] != 'success' or result['response_message'] != 'OK':
raise Exception
except:
log.error('Notification failed: %s', traceback.format_exc())
return False
log.info('Notifo notification successful.')
return True

View File

@@ -8,19 +8,17 @@ log = CPLog(__name__)
class NotifyMyAndroid(Notification):
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
nma = pynma.PyNMA()
keys = splitString(self.conf('api_key'))
nma.addkey(keys)
nma.developerkey(self.conf('dev_key'))
# hacky fix for the event type
# as it seems to be part of the message now
self.event = message.split(' ')[0]
response = nma.push(
application = self.default_title,
event = self.event,
event = message.split(' ')[0],
description = message,
priority = self.conf('priority'),
batch_mode = len(keys) > 1

16
couchpotato/core/notifications/plex/__init__.py Normal file → Executable file
View File

@@ -17,10 +17,22 @@ config = [{
'type': 'enabler',
},
{
'name': 'host',
'name': 'media_server',
'label': 'Media Server',
'default': 'localhost',
'description': 'Default should be on localhost',
'description': 'Hostname/IP, default localhost'
},
{
'name': 'clients',
'default': '',
'description': 'Comma separated list of client names\'s (computer names). Top right when you start Plex'
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}

Some files were not shown because too many files have changed in this diff Show More