Compare commits

..

372 Commits

Author SHA1 Message Date
Ruud
796aff4514 Remove login_opener 2014-01-11 13:58:15 +01:00
Ruud
2a2fe448e7 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/providers/torrent/bitsoup/main.py
	couchpotato/core/providers/torrent/iptorrents/main.py
	couchpotato/core/providers/torrent/sceneaccess/main.py
	couchpotato/core/providers/torrent/torrentleech/main.py
	couchpotato/core/providers/torrent/torrentshack/main.py
2014-01-11 13:55:12 +01:00
Ruud
516cbd73bd Catch timeout errors when xbmc isn't available 2014-01-11 13:41:41 +01:00
Ruud Burger
680ae53cf4 Merge pull request #2682 from techmunk/deluge_improvements
Only request needed torrent ids from deluge.
2014-01-11 04:06:32 -08:00
Ruud Burger
23967d11dd Merge pull request #2681 from fuzeman/tv_searcher
[TV] Cleanup retrieval of media query and title
2014-01-11 04:02:04 -08:00
Techmunk
99b99a992d Only request needed torrent ids from deluge. 2014-01-11 15:46:24 +10:00
Dean Gardiner
7a3251f649 Added new version of 'library.title' to return the title of the media excluding year and identifiers. 2014-01-11 16:36:29 +13:00
Dean Gardiner
9ba8910281 Renamed 'library.title' to 'library.query' 2014-01-11 16:23:33 +13:00
Dean Gardiner
e83a3cf263 Renamed movie library.title 'include_identifier' to 'include_year', show library.title defaults to 'condense' enabled now. 2014-01-11 15:55:20 +13:00
Dean Gardiner
b3c2945d9b 'related_libraries' and 'root_library' references are now added to child libraries. 2014-01-11 15:11:32 +13:00
Dean Gardiner
fc3cf08675 Moved 'searcher.get_search_title' to 'library.title', include_identifier is enabled by default now and title condensing can be enabled by the 'condense' parameter now. 2014-01-11 15:11:31 +13:00
Ruud
fb9d52c2b9 Don't search for movies with year to far in the future 2014-01-11 00:26:59 +01:00
Ruud
5cc471cc87 Remove path on fail 2014-01-11 00:05:24 +01:00
Ruud
07c7171fbb Image download wasn't working anymore 2014-01-11 00:05:02 +01:00
Ruud
c15dd2dec9 Disable verify for now 2014-01-10 23:17:04 +01:00
Ruud
a408cc0246 Update renamer to not trigger twice
Keep track of status support on releases
2014-01-10 22:54:23 +01:00
Ruud
c2568432e7 Use requests lib for openurl 2014-01-10 14:04:16 +01:00
Ruud
91f3cda995 Update requests lib 2014-01-10 13:16:12 +01:00
Ruud
28aa908513 Add category_id to api docs 2014-01-08 00:08:23 +01:00
Ruud
5e24b11c21 Don't continue with bitsoup if table isn't found. fix #2633 2014-01-06 22:36:51 +01:00
Ruud Burger
8162cd31b7 Merge pull request #2652 from nikagl/patch-1
Library object has media instead of movies
2014-01-06 13:27:12 -08:00
Ruud
4cdf71513f Clean tags from beginning of string. fix #2654 2014-01-06 22:24:34 +01:00
Ruud
7e6d9c02f6 Add quality test name. closes #2664 2014-01-06 21:53:29 +01:00
Ruud
afc4f73e36 Don't try wait when not between time is given 2014-01-05 23:46:42 +01:00
Ruud
5ef0c52277 Create reusable url opener 2014-01-05 22:17:16 +01:00
Ruud
c23b014cff Set default timeout 2014-01-05 22:02:39 +01:00
Ruud
f13cddfb26 Don't return empty actor roles 2014-01-05 18:55:51 +01:00
Ruud
623f6f3ed0 Limit title and actor search for tmdb 2014-01-05 18:07:06 +01:00
Ruud
a158716c8b Move actor images to dict 2014-01-05 17:57:15 +01:00
Ruud
9df7f7b22c Speed up userscript info getter by removing actor info 2014-01-05 13:10:27 +01:00
nikagl
1ea6fdc9a7 Library object has media instead of movies
Make the renamer work again by scanning the media instead of non-existent movies attribute in the library object (fixing error: AttributeError: 'Library' object has no attribute 'movies')
2014-01-01 20:54:34 +01:00
Ruud
8e5c24282e Disable themoviedb in search 2013-12-31 13:12:34 +01:00
Ruud Burger
1b0c9f40cc Merge pull request #2647 from nikagl/patch-1
Update main.py
2013-12-31 02:21:41 -08:00
nikagl
c0111a467b Update main.py
Release table has media_id column, not movie_id
2013-12-31 11:02:32 +01:00
Ruud
266429311b Update Tornado 2013-12-30 23:27:40 +01:00
Ruud Burger
64175151f8 Merge pull request #2634 from dkboy/tv_bitsoup
Updated Bitsoup provider to include TV support
2013-12-29 15:35:50 -08:00
Ruud
d74342adee Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-12-30 00:34:33 +01:00
Ruud
4408d99524 Typo 2013-12-30 00:33:41 +01:00
Ruud Burger
0168b9cbea Merge pull request #2642 from mano3m/develop_renamer
Fix 100% CPU bugs
2013-12-29 15:06:05 -08:00
mano3m
e69421226b Remove leading '//' from *NIX paths
Fixes #2506,  #2021
2013-12-29 23:42:55 +01:00
mano3m
f08d34b816 Add a trailing separator for windows drive path
Fixes  #2581, #2526
2013-12-29 23:25:53 +01:00
dkboy
586957e840 Updated Bitsoup provider to include TV support
Updated Bitsoup Provider to include TV support as well as Movies.
2013-12-28 21:30:49 +13:00
Ruud Burger
4a36c3b6a8 Merge pull request #2631 from mano3m/develop_try_next
Download fixed
2013-12-27 11:18:07 -08:00
mano3m
be0b708d32 Add user-agent to newznab request
Fixes #2611

Note that urlib2.urlopen should just follow redirects so I dont
understand why we need 3b519aeac9
2013-12-27 20:11:27 +01:00
mano3m
1cea50bcfb Added logging 2013-12-27 19:34:53 +01:00
mano3m
55483cf736 Consider try_next as failed 2013-12-27 19:09:39 +01:00
Joel Kåberg
16f8a1159f Merge pull request #2624 from mano3m/develop_fix
Complete nzbget https
2013-12-23 14:22:50 -08:00
mano3m
d4d03a846e Complete nzbget https
Fixes what went broken :(
2013-12-23 23:08:26 +01:00
Ruud Burger
7bccc46583 Merge pull request #2623 from mano3m/develop_https
Add https functionality for nzbget
2013-12-23 12:35:21 -08:00
mano3m
dc61e9916f Add https functionality for nzbget
Fixes #2622
2013-12-23 15:39:45 +01:00
Joel Kåberg
cf2b5f72ae Revert "Added delete files button, #2596 (manuall merge)"
This reverts commit 0b01bbc52e.
2013-12-21 13:29:02 +01:00
Joel Kåberg
f2fc775963 Revert "Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv"
This reverts commit b8bce948c8, reversing
changes made to 0a996857dd.
2013-12-20 02:29:15 +01:00
Joel Kåberg
b8bce948c8 Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/providers/torrent/yify/main.py
2013-12-20 02:15:08 +01:00
Joel Kåberg
fe397caafc better score forumla for seeding/leeching 2013-12-20 02:08:20 +01:00
Joel Kåberg
787405ae62 Updated YIFY provider to use proxies and magnet links, #2560 (manuall merge) 2013-12-19 22:14:29 +01:00
Joel Kåberg
0b01bbc52e Added delete files button, #2596 (manuall merge) 2013-12-19 22:12:22 +01:00
Joel Kåberg
dafa70b7e3 fix seed/lech score formula, fix #2605 2013-12-19 21:41:17 +01:00
Joel Kåberg
0a996857dd Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv
Conflicts:
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/renamer/main.py
2013-12-19 20:57:12 +01:00
Joel Kåberg
32b9bc3345 Merge pull request #2612 from RuudBurger/manual_scan
Manual scan
2013-12-19 10:30:41 -08:00
Joel Kåberg
a7b8f992d3 Merge pull request #2614 from mano3m/develop_stalled
Don't consider stalled as failed when seeding
2013-12-19 10:30:23 -08:00
Joel Kåberg
0c66b8067e Merge pull request #2607 from mano3m/develop_no_ren
Mark release as downloaded if renamer is disabled.
2013-12-19 10:30:12 -08:00
mano3m
7b3645ea7c Don't consider stalled as failed when seeding
Fixes the issue where Transmission is seeding but still considering the
torrent stalled (new functionality of Transmission). CPS marks it as
failed and a perfectly good torrent gets deleted. Several people on the
forum have this issue,
2013-12-17 21:41:26 +01:00
mano3m
69569758d9 Make sure we return true on success 2013-12-16 22:51:04 +01:00
mano3m
55777531d5 Clean-up and dont mark status twice 2013-12-16 22:43:05 +01:00
Joel Kåberg
99ce8dacbf added api calls for manual scan (kudos to @mano3m) 2013-12-16 17:07:34 +01:00
Joel Kåberg
d49c663c64 Merge branches 'develop' and 'manual_scan' of https://github.com/RuudBurger/CouchPotatoServer into manual_scan
Conflicts:
	couchpotato/core/plugins/renamer/main.py
2013-12-16 07:29:31 +01:00
mano3m
e9a457e263 mark release das downloaded if renamer is disabled.
if the renamer is not enabled and the quality of the downloaded release
is not the finish quality, the release did not get a status update.
2013-12-15 21:03:40 +01:00
Joel Kåberg
26509f614c use identifier instead 2013-12-15 11:12:47 +01:00
Joel Kåberg
3e28d5a936 use year as identifier for movies 2013-12-15 11:02:57 +01:00
Joel Kåberg
95ff427873 ignore series from omdbapi (for now?) 2013-12-15 10:05:51 +01:00
Joel Kåberg
8ed10037df Merge pull request #2602 from saxicek/tv_tsh
update TorrentShack for tv branch
2013-12-15 00:37:59 -08:00
sax
7a090dd4a2 update TorrentShack for tv branch 2013-12-15 00:08:18 +01:00
Joel Kåberg
49f34cb48d movie > media 2013-12-14 23:57:20 +01:00
Joel Kåberg
2a76de50dd Merge branches 'develop' and 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv
Conflicts:
	couchpotato/core/plugins/renamer/main.py
2013-12-14 23:43:29 +01:00
Joel Kåberg
3b0e07100f Merge pull request #2545 from mano3m/develop_downloaders
Downloader and renamer improvements
2013-12-14 14:20:33 -08:00
Joel Kåberg
8adf7fc600 Merge remote-tracking branch 'remotes/origin/develop' into tv 2013-12-14 21:24:07 +01:00
Joel Kåberg
f4c053f56f fix season search for SCC provider 2013-12-14 21:22:46 +01:00
mano3m
74561500b5 Convert windows path to *nix path in sp
Fixes #2594

Note that os.path.normath converts '/' to '\\' on windows machines, but
unfortunately not the other way around...
2013-12-14 21:12:10 +01:00
Joel Kåberg
5cb5a1677d Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv 2013-12-14 20:35:03 +01:00
Joel Kåberg
9fb9f0ef5b Merge pull request #2599 from fuzeman/tv_searcher
[TV] Moved matcher to core/media, updated NZBIndex provider
2013-12-13 08:11:10 -08:00
Dean Gardiner
242d69a981 The nzbindex provider now uses the caper usenet parser to get release names from usenet subjects. 2013-12-13 16:31:26 +13:00
Dean Gardiner
eb151a4c5d Updated Caper to v0.3.1 2013-12-13 15:24:40 +13:00
Ruud
3b519aeac9 nzbmegasearch returns redirected url. fix #2597 2013-12-12 19:58:55 +01:00
mano3m
ea5d274f4d Add another check 2013-12-11 22:40:01 +01:00
mano3m
f57f2444fe Improved checking
Fixes #2539 ?
2013-12-11 22:11:33 +01:00
Dean Gardiner
2520b19798 Fixed bug in searcher where episode searches would be triggered if a season release has already been snatched at a better quality 2013-12-09 13:33:25 +13:00
Dean Gardiner
319c9e979a Split ShowMatcher into Episode and Season matchers, updated correctIdentifier method so there should be less false matches now. 2013-12-09 13:32:43 +13:00
Dean Gardiner
93aa5b1920 Updated Caper to v0.2.9 2013-12-09 11:23:57 +13:00
mano3m
fd768df9e5 Tabs to spaces 2013-12-08 17:35:05 +01:00
Joel Kåberg
4db68e4887 update contributing 2013-12-08 13:07:17 +01:00
Dean Gardiner
f648af66a6 Moved matcher plugin to core/media, moved some matcher related functions from ShowSearcher to ShowMatcher 2013-12-08 22:40:39 +13:00
Joel Kåberg
7c4185e1fa Merge branch 'develop' into tv 2013-12-07 23:40:42 +01:00
mano3m
6d4297a5fb Extend os.path.sep to all folder checks
Expands 50c5044fe8
2013-12-07 22:39:47 +01:00
mano3m
ab413f2f3e Dont remove historic data when doing a full scan.
Fixes #2572

Note that the dashboard already takes care of this and does it the right
way (keeping seeding and ignored releases).
2013-12-07 22:33:34 +01:00
mano3m
574255c4b6 Don't tag .ignore files 2013-12-07 22:33:34 +01:00
mano3m
008ba39856 Add backwards compatibility for the renamer API 2013-12-07 22:33:33 +01:00
mano3m
cff1b3abdb Provide IDs to check to all downloaders 2013-12-07 22:33:32 +01:00
mano3m
231c5b8ca1 Renamer rename to media 2013-12-07 22:33:31 +01:00
mano3m
640664494e Increase check_snatched readability
- Reduce rested if statements
- Add more comments
2013-12-07 22:31:16 +01:00
mano3m
951b7b8425 Update Synology and Pneumatic
As per black hole improvement
2013-12-07 22:31:16 +01:00
mano3m
c9980539f0 Improve black hole support
Also scan the 'from' folder if Black hole is used together with another
downloader.
2013-12-07 22:31:15 +01:00
Ruud Burger
7eb802b42a Merge pull request #2501 from mano3m/develop_xbmc
XBMC metadata update
2013-12-07 13:17:03 -08:00
Ruud Burger
2f4f3ce0fe Merge pull request #2578 from mano3m/develop_fnmatch
Fix fnmatch
2013-12-07 13:14:13 -08:00
mano3m
824ac86d18 Fix fnmatch
fnmatch does not accept regular expressions as presumed in
0c4851e436 See
http://docs.python.org/2/library/fnmatch.html

This patch actually completely broke tagging. All we need to do is make
sure any [ or ] used is conbverted into [[] or []].

Fixes #2557 and  #2362
2013-12-07 22:11:16 +01:00
mano3m
4553726423 [Notifications][XBMC] Add always do a full scan option to XBMC
Fixes #2498 (at least partially)
2013-12-07 15:09:30 +01:00
mano3m
f0bde7316d [Metadata][XBMC] Update new actors to actor_roles 2013-12-07 15:09:23 +01:00
Joel Kåberg
0fb06a3fd3 Merge pull request #2577 from fuzeman/tv_searcher
[TV] Season pack matching, better show search triggering
2013-12-07 01:10:27 -08:00
Dean Gardiner
1e39d643a8 Searching for a show now triggers searching for all the seasons, Season searches that fail to find anything now trigger individual episode searches. 2013-12-07 21:23:10 +13:00
Dean Gardiner
69d58663ef Profile and categories on seasons and episodes are now set to the same value as the Show 2013-12-07 21:21:15 +13:00
Dean Gardiner
e59b53fab2 Searching is now deferred until the entire show has been loaded into the database 2013-12-07 18:48:26 +13:00
Dean Gardiner
a66f6f0166 Fixed reference to 'movie.restatus' (should be 'media.restatus'), minor formatting changes 2013-12-07 18:11:06 +13:00
Dean Gardiner
1344f03b16 Fixed matcher bug when matching resolution on ['480p', None] 2013-12-07 18:11:05 +13:00
Dean Gardiner
a23c409939 Updated Caper to v0.2.6 2013-12-07 18:11:05 +13:00
Dean Gardiner
a6b1cc833f Added more TV qualities for testing (arh.. bit of a mess) 2013-12-07 18:11:04 +13:00
Joel Kåberg
d2c7e3ef56 update Nzbindex for tv branch 2013-12-06 12:55:02 +01:00
Joel Kåberg
6c87008d7b update Nzbclub for tv branch 2013-12-06 12:46:36 +01:00
Joel Kåberg
6b3af21e45 update Binsearch for tv branch 2013-12-06 12:38:54 +01:00
Joel Kåberg
5a5cc0005c Merge pull request #2574 from fuzeman/tv_searcher
[TV] Matching, serialization and UI notification fixes
2013-12-06 03:18:04 -08:00
Joel Kåberg
d65117c0e3 update TorrentPotato for tv branch 2013-12-06 12:16:58 +01:00
Dean Gardiner
d8884bb655 Changed '.searcher.single' call to use search_dict for media serialization 2013-12-06 23:58:47 +13:00
Dean Gardiner
afe9aed2eb Fixed bug where media default_dict contained related and root library attributes. 2013-12-06 23:58:45 +13:00
Dean Gardiner
01e64e989e Updated Caper to v0.2.5 - fixes 'H 264' tag bug 2013-12-06 23:58:44 +13:00
Dean Gardiner
9496df9e9d Fixed a bug where matching show names with a year would fail 2013-12-06 23:58:43 +13:00
Joel Kåberg
8b4c67b977 update Yify for tv branch 2013-12-06 11:44:59 +01:00
Joel Kåberg
f77a8f5573 update PassThePopcorn for tv branch 2013-12-06 11:40:58 +01:00
Joel Kåberg
de8aefebb7 update Bit-HDTV for tv branch 2013-12-06 11:03:24 +01:00
Joel Kåberg
8f0d22a6f2 update TPB for tv branch 2013-12-06 10:46:08 +01:00
Joel Kåberg
721190028b not needed 2013-12-06 09:34:02 +01:00
Joel Kåberg
50e565142e typo 2013-12-06 09:27:02 +01:00
Joel Kåberg
bead3e2b07 update PublicHD for tv branch 2013-12-06 09:26:20 +01:00
Joel Kåberg
71aa0cbb9a use buildUrl 2013-12-06 09:09:53 +01:00
Joel Kåberg
8de19cbd52 fixes 2013-12-06 08:19:13 +01:00
Joel Kåberg
8573832ff7 fixes 2013-12-06 08:05:34 +01:00
Joel Kåberg
7c1d3f8762 fixes 2013-12-06 08:05:15 +01:00
Joel Kåberg
9cd1adcdee fixes 2013-12-06 08:04:58 +01:00
Joel Kåberg
f017ac9dca use searcher.get_search_title 2013-12-06 07:46:50 +01:00
Joel Kåberg
907704e45f fix self.getSearchTitle() 2013-12-06 07:44:16 +01:00
Joel Kåberg
b17f937389 use include_identifier 2013-12-06 07:40:15 +01:00
Joel Kåberg
f591c56dd4 Merge pull request #2570 from fuzeman/tv_searcher
[TV] WEB-DL matcher fix, updated 'searcher.get_search_title'
2013-12-05 22:14:24 -08:00
Dean Gardiner
2fd54901e7 Added optional parameter 'include_identifier' to the 'searcher.get_search_title' event handler. 2013-12-06 14:04:00 +13:00
Dean Gardiner
1bf6c5a82e Changed 'searcher.get_search_title' to accept a 'library' instead of the 'media' as a parameter. 2013-12-06 13:53:56 +13:00
Dean Gardiner
45484461b5 Adjusted Matcher.chainMatch to support 'WEB DL' tags 2013-12-06 13:50:01 +13:00
Dean Gardiner
aa394f59ae Updated Caper to v0.2.4 2013-12-06 13:50:00 +13:00
Joel Kåberg
717111f5d2 cleanup Newznab provider 2013-12-05 21:11:45 +01:00
Joel Kåberg
e3461dc35f updated TorrentDay for tv branch 2013-12-05 17:13:34 +01:00
Joel Kåberg
9b834f62a9 updated Torrentleech for tv branch 2013-12-05 16:55:34 +01:00
Joel Kåberg
935938474c SCC Provider: remove debug info 2013-12-05 16:23:07 +01:00
Joel Kåberg
6573196186 update SCC for tv branch 2013-12-05 16:21:58 +01:00
Joel Kåberg
9a07f2ed65 use searcher.get_search_title and library.identifier (not present in movie library module?) 2013-12-05 15:40:21 +01:00
Joel Kåberg
613ff3b729 updated newznab provider for tv branch. see inline comments 2013-12-05 14:09:35 +01:00
Ruud Burger
def62fc865 Merge pull request #2568 from fuzeman/tv_searcher
[TV] Fixed bug with Library serialization when adding shows
2013-12-05 00:42:13 -08:00
Dean Gardiner
037c355836 Fixed bug with Library serialization when adding shows 2013-12-05 16:23:21 +13:00
Joel Kåberg
180b2bbffe Merge pull request #2549 from fuzeman/tv_searcher
[TV] Searcher cleanup and matcher updates
2013-12-03 23:16:11 -08:00
Dean Gardiner
143dcad4f3 Fixed incorrect reference to library 'season' and 'episode' attributes. 2013-12-04 19:50:48 +13:00
Dean Gardiner
b0e352ab6d Updated Caper to v0.2.3 and Logr to v0.2.2 to greatly improve matching performance 2013-12-04 19:29:02 +13:00
Dean Gardiner
5ea7dc5920 Moved 'searcher.get_media_identifier' into season and episode libraries as 'library.identifier' 2013-12-04 17:15:08 +13:00
Ruud
966f8c36b1 Make sure to use a valid cookie_secret. fix #2553 2013-12-02 12:09:14 +01:00
Dean Gardiner
3c675b5b8a searcher and matcher now uses the new related_libraries and root_library from media instead of using extra db queries 2013-12-02 23:27:26 +13:00
Dean Gardiner
11ea9b4e91 related_libraries are now only included on searches and added the root_library attribute 2013-12-02 23:26:31 +13:00
Dean Gardiner
e8a2139ecf Related libraries are now merged into {<type>: [<library>,...]} type 2013-12-02 21:18:48 +13:00
Ruud
50c5044fe8 Add path separator for check 2013-12-01 19:23:53 +01:00
Dean Gardiner
dc57d7b6d1 Added related_libraries to Library model. 2013-12-01 20:25:33 +13:00
Dean Gardiner
0925f1312d Fixed refresh action - changed show searcher to bind to 'season' and 'episode' media types for '.searcher.single' as well. 2013-12-01 20:22:07 +13:00
Dean Gardiner
efc02f66f5 Changed the IPTorrents show provider into a new season and episode provider, removed grouped cat_ids 2013-12-01 20:20:19 +13:00
Ruud
9ce8ffc14b movie_id > media_id 2013-11-30 16:52:08 +01:00
Ruud
bab07a05e7 Merge branch 'refs/heads/develop' into tv 2013-11-30 16:48:52 +01:00
Ruud
46b2d6ba6e movie_id > media_id 2013-11-30 16:48:46 +01:00
Ruud
1df9f7c83f Merge branch 'refs/heads/develop' into tv 2013-11-30 16:14:19 +01:00
Ruud
8aec5cf605 Better (custom) formhints 2013-11-30 14:59:52 +01:00
Ruud
54af80d5ad Don't wait for shutdown of scheduler 2013-11-30 12:51:35 +01:00
Ruud
8b2cd62211 Don't save stash on pull 2013-11-30 12:49:28 +01:00
Ruud
efdf77ef6c Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/renamer/main.py
2013-11-30 12:44:13 +01:00
Ruud
2fc4809821 Variable renaming movie to media 2013-11-30 12:41:06 +01:00
Ruud
bde6de1789 Move movie listing to media 2013-11-30 12:23:53 +01:00
Ruud
c72cca4ea2 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-30 11:52:56 +01:00
Ruud
0f071be762 Use Object.each for object looping 2013-11-30 11:52:41 +01:00
Joel Kåberg
cddf47f113 move long subtitle text into formhint 2013-11-29 22:28:51 +01:00
Joel Kåberg
76f3f5253a move long automation text into formhint 2013-11-29 22:14:28 +01:00
Joel Kåberg
d833a04293 move long texts into formhint 2013-11-29 22:10:02 +01:00
Joel Kåberg
2e96860380 directory properly removed 2013-11-27 07:58:23 +01:00
Ruud Burger
3e2e6385cf Properly split seed ratios and seed times 2013-11-26 17:10:56 +01:00
Joel Kåberg
ccc2028690 remove directory option in utorrent
doesn't behave as expected on windows
2013-11-26 15:51:12 +01:00
Joel Kåberg
81dbc1ca79 Merge pull request #2527 from RuudBurger/couchtart
TorrentPotato ready for prime time
2013-11-25 23:54:45 -08:00
Ruud
e9a3059be2 Allow longer description in formhint 2013-11-25 22:16:02 +01:00
Ruud Burger
a989c93505 Merge pull request #2523 from fuzeman/tv_searcher
[TV] Merge fixes, removed get_media_searcher_id event
2013-11-25 07:38:08 -08:00
Dean Gardiner
d122bd1b43 Removed 'searcher.download' (method was moved to the release plugin) 2013-11-25 19:47:05 +13:00
Dean Gardiner
ab81824f4c Minor changes to matcher and added extra show searcher logging 2013-11-25 19:29:29 +13:00
Dean Gardiner
4eb73e3609 Renamed Release.movie references to Release.media 2013-11-25 19:28:43 +13:00
Dean Gardiner
6bcb279f0e Updated Caper library 2013-11-25 17:37:08 +13:00
Dean Gardiner
f446c8ed33 Updated QueryCondenser library 2013-11-25 17:07:54 +13:00
Dean Gardiner
10a34f2b69 Removed the use of the 'searcher.get_media_searcher_id' event 2013-11-25 16:20:03 +13:00
Ruud
cc3ebd79e8 Remove extensions from qualities 2013-11-24 23:17:18 +01:00
Ruud
3e035f84b1 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/helpers/variable.py
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/movie/searcher/main.py
	couchpotato/core/plugins/quality/main.py
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/renamer/main.py
2013-11-24 23:13:25 +01:00
Ruud
3d5b33856f Add some quality tests 2013-11-24 22:45:17 +01:00
Ruud
8d2e3a1919 Add ratio and seed time styling 2013-11-24 21:43:54 +01:00
Joel Kåberg
f3380c4fed seed_time and seed_ratio 2013-11-24 19:33:29 +01:00
Joel Kåberg
8a58d7f973 use hostname instead of TorrentPotato (dashboard) 2013-11-24 14:51:03 +01:00
Ruud
37b98cb835 TorrentPotato styling of inputs 2013-11-24 00:52:51 +01:00
Ruud
50262112b8 Use release_name 2013-11-24 00:27:47 +01:00
Ruud
4b9f9862fc Change name and response 2013-11-23 12:07:00 +01:00
Ruud
df60d70592 Move it 2013-11-23 12:06:46 +01:00
mano3m
1b5bc1fa05 [Metadata][XBMC] Add fileinfo to nfo
Also fixed a int / int = int divide bug
2013-11-23 01:04:41 +01:00
mano3m
e4993eac24 [Metadata][XBMC] Add actors to CPS info and nfo 2013-11-23 01:04:40 +01:00
mano3m
bd1bb1ee91 [Metadata][XBMC] Add images to nfo 2013-11-23 01:04:40 +01:00
mano3m
2c1c57333c [Metadata][XBMC] Add trailer to nfo 2013-11-23 01:04:39 +01:00
mano3m
a466cbcf16 [Metadata][XBMC] Fix nfo data
Fixes #1412 and @Lennong MPAA section
2013-11-23 01:04:38 +01:00
Ruud
379f62a339 CouchTater fixes 2013-11-23 00:31:26 +01:00
Ruud
eaf2974f8d Better frontend notification and GUI updating 2013-11-22 23:00:33 +01:00
Ruud
99e641a30d Update dashboard when the search ends of added new movie 2013-11-22 16:47:55 +01:00
Ruud
88d6148500 Update libs 2013-11-22 16:09:15 +01:00
Ruud
f53364eb6c Update Tornado 2013-11-22 16:08:54 +01:00
Ruud
b8f78e311d Update scheduler module 2013-11-22 15:38:33 +01:00
Ruud
bb6e1e2909 Don't propagate core messages to other notification providers. 2013-11-22 15:17:35 +01:00
Ruud
c62c6664ce Merge branch 'refs/heads/fuzeman-feature/notifications/pushbullet' into develop 2013-11-22 01:44:41 +01:00
Ruud
8ae4e3be18 Merge branch 'feature/notifications/pushbullet' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-feature/notifications/pushbullet 2013-11-22 01:44:16 +01:00
Ruud
0065ff5086 Indentation cleanup 2013-11-22 01:34:50 +01:00
Ruud
28d073f934 Merge branch 'refs/heads/Damiya-fix2474' into develop 2013-11-22 01:30:10 +01:00
Ruud
df1cb0ae08 Merge branch 'fix2474' of git://github.com/Damiya/CouchPotatoServer into Damiya-fix2474 2013-11-22 01:29:57 +01:00
jchristi
31a1af43d5 Update fedora init file
This took me awhile to figure out when trying to install for the first time. Luckily, I had the sickbeard init file to reference.
2013-11-22 01:28:14 +01:00
Joel Kåberg
8951e9fc90 typo 2013-11-21 22:22:19 +01:00
Joel Kåberg
357166414c use .get() and added more options 2013-11-21 22:20:45 +01:00
Joel Kåberg
e1a311de40 initial couchtarter provider (torrent newznab)
initial ground work based on newznab provider
needs UI changes: http://i.imgur.com/4MiJUH5.png (need to add ratio and
seed hours also)

untested code
2013-11-21 19:55:36 +01:00
Kate von Roeder
ab923cc592 Sort directories so that we scan them in alphabetical order as well (keeps things nice and well ordered!) 2013-11-20 18:47:09 -08:00
Kate von Roeder
99947fb135 CSS fix for #1578 part 2 - Change text direction from RTL to LTR, fixing issue where root drives would show up as '\C:'. Weird! 2013-11-20 13:47:40 -08:00
Kate von Roeder
185cb0196a Fix for #1578 - Depends on stableSort, so added to PR#2500.
Object.each is not necessarily alphabetic when iterating an object's properties, so we pull the folders out of the object, add them to an array, and sort that.
2013-11-20 13:36:08 -08:00
Kate von Roeder
309ec50691 Array.sortBy should also use the new stablesort. 2013-11-20 09:15:25 -08:00
Kate von Roeder
f865484182 Add Array.stableSort from mootools forge.
Change calls to Array.sort to use new Array.stableSort. Fixes sorting problems on Chrome
2013-11-20 05:47:36 -08:00
Dean Gardiner
ed19fd0254 Added Pushbullet notifications 2013-11-20 22:04:11 +13:00
Ruud
cec88319fe Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-19 23:45:28 +01:00
Ruud
d31b7eb72d Add date and message-id to email notification 2013-11-19 23:45:12 +01:00
Joel Kåberg
b7d93b84dd option to set download directory in utorrent 2013-11-19 18:59:54 +01:00
Joel Kåberg
4008774908 append label unnecessary
just set the full path to the dir
2013-11-19 18:51:28 +01:00
Ruud
accce789ba Normalize path sp function 2013-11-19 09:16:47 +01:00
Ruud
091b1fefd2 Add category_id to movie add docs 2013-11-19 09:09:29 +01:00
Ruud
899b1f9b96 Add mobile web capable for Android
Thanks @Elziah
2013-11-18 23:03:40 +01:00
Ruud
0ce5c51c67 renamer.scan needs some files. fix #2481 2013-11-18 22:56:03 +01:00
Ruud
e6d76db250 Merge branch 'refs/heads/mano3m-develop_scan_basefolder' into manual_scan 2013-11-16 14:29:54 +01:00
Ruud
3b3288c53d Manual scan folder cleanup 2013-11-16 14:29:34 +01:00
Ruud
16cf220741 Merge branch 'develop_scan_basefolder' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_scan_basefolder
Conflicts:
	couchpotato/core/plugins/renamer/main.py
2013-11-16 13:45:06 +01:00
mano3m
d984f11cbf First attempt at creating a working directory selector 2013-11-02 08:48:52 +01:00
mano3m
ae666bd9b6 Add API call to scan a folder for multiple movies 2013-11-02 08:48:52 +01:00
Joel Kåberg
611c159373 Merge pull request #2356 from fuzeman/tv_searcher
[TV][Searcher] Release Matching and Snatching
2013-10-15 22:16:35 -07:00
Joel Kåberg
db65980ba4 Merge pull request #2354 from nrgaway/tv_xem
Tv xem
2013-10-15 21:51:41 -07:00
Dean Gardiner
180576f2b7 Minor change to ShowSearcher.correctMatch logging 2013-10-16 14:58:53 +13:00
Dean Gardiner
46d4d34da7 Minor cleanup to Searcher and Matcher 2013-10-16 14:58:52 +13:00
Dean Gardiner
3fa21560be Moved 'searcher.create_releases' from Searcher to Release. 2013-10-16 14:58:51 +13:00
Dean Gardiner
b902186389 Cleaned up usage of helper functions 2013-10-16 14:58:50 +13:00
Dean Gardiner
da87e68fad Implemented basic usage of QueryCondenser 2013-10-16 14:58:49 +13:00
Dean Gardiner
f23412ea7e Added qcond (Query Condenser) v0.1.0 library - https://github.com/fuzeman/QueryCondenser 2013-10-16 14:58:48 +13:00
Dean Gardiner
07abf7c83d Updated Caper to version 0.2.2 2013-10-16 14:58:47 +13:00
Dean Gardiner
6259684487 Moved caper matching into a new 'matcher' plugin. 2013-10-16 14:58:47 +13:00
Dean Gardiner
0a0935d635 Fix to Provider getCatId when returning the cet_backup_id 2013-10-16 14:58:46 +13:00
Dean Gardiner
fb5b17005f Cleaned up status.get calls in TV searcher 2013-10-16 14:58:45 +13:00
Dean Gardiner
e3745b5d74 Updated Caper library 2013-10-16 14:58:44 +13:00
Dean Gardiner
8d24d96804 Implemented 'searcher.get_media_searcher_id' in the TV searcher. 2013-10-16 14:58:43 +13:00
Dean Gardiner
529b535d9f Added 'searcher.get_media_searcher_id' event, Cleaned up some 'status.get' calls, Renamed some references of 'nzb' to 'rel'. 2013-10-16 14:58:42 +13:00
Dean Gardiner
0793668e5c Chain result weight now returned from TV searcher correctRelease function. 2013-10-16 14:58:41 +13:00
Dean Gardiner
8d368ecf29 'searcher.correct_release' can now return a float indicating the weight/accuracy which is used to scale the score. Fix to IPT _buildUrl method. 2013-10-16 14:58:40 +13:00
Dean Gardiner
2d2b0c9048 IPT provider now searches in multiple categories. 2013-10-16 14:58:40 +13:00
Dean Gardiner
fb0719d677 TV Searcher now supports xem scene mappings 2013-10-16 14:58:39 +13:00
Dean Gardiner
7ffa5dc7b6 Fixed IPT Show SD cat_ids 2013-10-16 14:58:38 +13:00
Dean Gardiner
32c289fd3d Renamed 'movie' -> 'media' in 'searcher.download' 2013-10-16 14:58:37 +13:00
Dean Gardiner
ff63b8a1c5 Added TV release snatching/downloading 2013-10-16 14:58:36 +13:00
Dean Gardiner
60d8934444 Created 'searcher.try_download_result' event from section in MovieSearcher.single 2013-10-16 14:58:35 +13:00
Jason Mehring
e0aba01866 more tvdb info provider guards 2013-10-15 17:54:06 -04:00
Jason Mehring
1ae498e3c8 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_xem 2013-10-15 16:25:05 -04:00
Jason Mehring
d1db099f71 grab tvdb fields more defensively 2013-10-15 16:24:49 -04:00
Ruud Burger
f4ef64290d Merge pull request #2352 from fuzeman/tv
[TV] Fixed show searching (broken in search merge)
2013-10-15 12:20:29 -07:00
Dean Gardiner
026151d1a1 Fixed show searching (broken in search merge) 2013-10-15 15:28:35 +13:00
Ruud
70dada8ef6 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/_base/media/main.py
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/media/movie/searcher/main.py
2013-10-08 10:02:40 +02:00
Ruud
9ef752f8a3 Rename mediaplugin 2013-10-08 09:22:20 +02:00
Ruud
d265a5bddd Remove refresh from movie media 2013-10-08 08:48:38 +02:00
Ruud
b2b6e3eb33 Cleanup show media 2013-10-08 08:46:04 +02:00
Ruud
2b6c7a8f94 Move media refresh to media plugin 2013-10-08 08:45:45 +02:00
Ruud
6070209d33 Attach shows to searcher 2013-10-07 23:37:17 +02:00
Ruud
fa78d18890 Merge searches 2013-10-07 23:23:46 +02:00
Ruud
40eaf2a96b Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/static/search.js
2013-10-07 23:12:59 +02:00
Ruud
73dd0916c0 Merge branch 'tv' of github.com:RuudBurger/CouchPotatoServer into tv 2013-10-03 21:20:24 +02:00
Joel Kåberg
77d32fe16b Merge pull request #2292 from nrgaway/tv_xem
Tv xem
2013-10-03 08:17:26 -07:00
Jason Mehring
7def0944a6 Implemented map_absolute. model was changed to implement. map_names now stores in EpisodeLibrary 2013-10-03 04:30:05 -04:00
Ruud
8782cd77d5 Import cleanup 2013-10-03 08:21:37 +02:00
Ruud
1b59fd9af0 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/plugins/renamer/main.py
2013-10-03 08:17:20 +02:00
Joel Kåberg
9dca8a03be Merge pull request #2290 from nrgaway/tv_xem
Tv xem
2013-10-02 05:11:10 -07:00
Jason Mehring
132f4882e5 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_xem 2013-10-02 01:29:55 -04:00
Jason Mehring
9e32a38288 fixed bug that was not storing xem maps in EpisodeLibrary.info 2013-10-02 01:29:21 -04:00
Joel Kåberg
c1b13cd076 Merge pull request #2284 from fuzeman/tv_searcher
[TV][Searcher] TV searching, parsing and Release creation
2013-10-01 03:29:51 -07:00
Dean Gardiner
820588aa5f Created 'searcher.create_releases' event to replace some shared functionality, releases are now created for TV search results. 2013-09-30 23:08:32 +13:00
Dean Gardiner
8fbf050510 Created 'searcher.search' event to replace some shared functionality, Fixed an issue in Release.download when snatching movies. 2013-09-30 22:49:54 +13:00
Dean Gardiner
dd5ae3c4ee Working TV correctRelease function with quality, identifier and show title checking. 2013-09-30 22:03:21 +13:00
Dean Gardiner
ab51707607 Added Caper (0.2.0-master) and Logr (0.2.1) libraries 2013-09-30 18:40:27 +13:00
Dean Gardiner
8acdc56df1 Added the start of the ShowSearcher correctRelease function 2013-09-30 14:53:33 +13:00
Dean Gardiner
d345a05b3c Switched IPTorrents provider to the MultiProvider layout, few fixes to provider base for MultiProvider 2013-09-30 14:53:32 +13:00
Dean Gardiner
5f427ec6ea Moved required/ignored word checking from 'correctMovie' into 'searcher.correct_words' event, Renamed 'movie.searcher.correct_movie' to 'searcher.correct_release' 2013-09-30 14:53:32 +13:00
Dean Gardiner
a95c030885 Fix for discovering the cat_ids structure when the 'ids' are of str type. 2013-09-30 14:49:42 +13:00
Dean Gardiner
bef6a74dfe Minor cleanup to getSearchTitle 2013-09-30 14:49:41 +13:00
Dean Gardiner
01da470c21 Few changes to getSearchTitle in case a title isn't found, Added check to ensure enough media was returned from _lookupMedia 2013-09-30 14:49:40 +13:00
Dean Gardiner
5fdf4d9085 Extended providers to support multiple media types
- 'cat_ids' now support media type groups
  - 'type' extended to allow a list of support media types
  - Added 'searcher.get_search_title' to return a title for media to be used in searches.
2013-09-30 14:49:40 +13:00
Dean Gardiner
bc51e263e1 Switched back to a single search method 'show.searcher.single' 2013-09-30 14:47:51 +13:00
Dean Gardiner
4c527f0931 Added 'show.refresh' API method and the base for season and episode searching. 2013-09-30 14:47:50 +13:00
Ruud
e9fc528a0f movie_id > media_id 2013-09-23 22:23:45 +02:00
Ruud
c9ba3c804e Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/plugins/dashboard/main.py
	couchpotato/core/plugins/renamer/main.py
	couchpotato/core/providers/torrent/sceneaccess/main.py
2013-09-23 22:14:06 +02:00
Joel Kåberg
ee9fe347c7 Merge pull request #2155 from nrgaway/tv_xem
Tv xem
2013-09-16 12:39:15 -07:00
Jason Mehring
515aafe112 bug fixes for add show 2013-09-13 22:02:22 -04:00
Jason Mehring
314016e1fa (WIP) Started intergrating xem 2013-09-13 03:28:03 -04:00
Jason Mehring
906a54ef09 Finished creating xem info provider 2013-09-13 03:11:51 -04:00
Jason Mehring
ec2facd056 Fix reference to Movie, its now Media 2013-09-13 03:10:22 -04:00
Jason Mehring
ddbfef575f Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-09-12 01:30:57 -04:00
Jason Mehring
8dd7a4771c partially implemented xem info provider (wip) 2013-09-12 01:30:38 -04:00
Jason Mehring
49ba1f1acd Reworked code to allow better intergration of other info providers. Initial prep for xem mapping 2013-09-12 01:00:14 -04:00
Ruud
c4d661535c Movie > Media 2013-09-11 23:25:51 +02:00
Ruud
bd52ab7ab1 Movie > Media 2013-09-11 23:18:31 +02:00
Ruud
cce0a8ec62 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/media/movie/library/movie/main.py
	couchpotato/core/plugins/dashboard/main.py
	couchpotato/core/plugins/profile/main.py
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/suggestion/main.py
2013-09-11 23:13:28 +02:00
Joel Kåberg
d02e62f89f Merge pull request #2139 from nrgaway/tv_tvdb
Tv tvdb
2013-09-11 07:52:42 -07:00
Jason Mehring
e180addc3c added posters for seasons 2013-09-11 00:36:12 -04:00
Jason Mehring
a37a4a8cd4 thetvdb, add alternate titles if they exist 2013-09-10 23:45:15 -04:00
Jason Mehring
8328c18728 set cache directory for thetvdb_api 2013-09-10 22:08:17 -04:00
Jason Mehring
7ae07d6c15 Opps, remove debug code for language 2013-09-10 21:58:47 -04:00
Jason Mehring
770bcf5bc6 Added ability to search thetvdb by language 2013-09-10 21:51:38 -04:00
Jason Mehring
7bd6a295d8 return False on fail. Everything caches now 2013-09-10 21:06:44 -04:00
Jason Mehring
4063761313 Changed model to accept Unicode value for airs_time and add last_updated field for episode. Now stores both as well as airs_daysofweek 2013-09-10 21:04:44 -04:00
Jason Mehring
d62b346a74 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-09-06 13:16:38 -04:00
Ruud
155732ab1a Rollback type remove 2013-09-02 00:13:05 +02:00
Ruud
b3713b7ae5 Merge branch 'refs/heads/develop' into tv 2013-09-02 00:09:16 +02:00
Jason Mehring
19d026756c Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-08-24 21:11:53 -04:00
Ruud
3cddd29425 Merge branch 'refs/heads/develop' into tv 2013-08-25 01:15:40 +02:00
Jason Mehring
23bde0b866 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_tvdb 2013-08-24 15:53:31 -04:00
Jason Mehring
6f895c1805 get apikey from config 2013-08-24 15:37:11 -04:00
Ruud
96089074ce Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/loader.py
2013-08-24 20:22:55 +02:00
Ruud
2ed53df008 Import cleanup 2013-08-24 19:14:09 +02:00
Ruud
060859483a Delete show provider 2013-08-24 18:36:44 +02:00
Ruud
eced476eaf Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/loader.py
	couchpotato/core/providers/info/_modifier/__init__.py
	couchpotato/core/providers/info/_modifier/main.py
	couchpotato/core/providers/movie/_modifier/main.py
	couchpotato/core/providers/show/_modifier/main.py
2013-08-24 18:30:00 +02:00
Ruud
8d5b55a753 Make info modifier multiprovider 2013-08-24 15:30:17 +02:00
Ruud
7296dc54d0 Move thetvdb to info providers 2013-08-24 15:29:57 +02:00
Ruud
e5e9cf7d5f Move info providers to proper folder 2013-08-24 15:20:00 +02:00
Ruud
b106229a78 Merge branch 'refs/heads/develop' into tv 2013-08-24 15:07:21 +02:00
Ruud
73efd5549f Merge branch 'refs/heads/develop' into tv 2013-08-24 14:30:09 +02:00
Joel Kåberg
8139016636 Merge pull request #2061 from nrgaway/tv_loader
fix loader error messages for modules that are selected recursively but ...
2013-08-23 12:51:34 -07:00
Jason Mehring
59c0d0416e fix loader error messages for modules that are selected recursively but are not really modules 2013-08-23 15:32:47 -04:00
Joel Kåberg
cd559ece04 Merge pull request #2058 from nrgaway/tv_refactored
Tv refactored
2013-08-23 07:47:29 -07:00
Joel Kåberg
120a4ad1ed Merge pull request #2057 from nrgaway/tv_development
Completed tvshow model
2013-08-23 07:47:14 -07:00
Jason Mehring
3363e164fd refactored Movie model to Media 2013-08-23 01:37:00 -04:00
Jason Mehring
6d6d5caeb6 Completed tvshow model 2013-08-23 00:53:34 -04:00
Joel Kåberg
21030e7cb4 Merge pull request #2052 from nrgaway/tv_database_2
Added Seasons
2013-08-22 12:40:20 -07:00
Jason Mehring
9b238ba712 Added Seasons. Show is the parent to Seasons and Episodes are the children if Season 2013-08-22 02:47:41 -04:00
Ruud
b3d2d5349b Rename database for TV branch 2013-08-20 23:02:43 +02:00
Ruud Burger
f9bad281de Merge pull request #2038 from nrgaway/tv_database
Tv database
2013-08-20 01:00:29 -07:00
Jason Mehring
72ce919989 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_database 2013-08-20 02:18:03 -04:00
Jason Mehring
ff782669f6 readded tvdb_api 2013-08-20 02:11:54 -04:00
Jason Mehring
36950993f1 removed tvdb_api since it was missing all files 2013-08-20 02:11:01 -04:00
Jason Mehring
7df93dc1b4 Moved library and refactored to its now location. Modified anything firing libray.add/update/_release date to now fire library.add.movie... 2013-08-20 01:54:47 -04:00
Ruud
a45913eee7 Default to movie type 2013-08-18 13:20:53 +02:00
Ruud
a25eac6c4e Make SceneAccess multiprovider 2013-08-18 11:47:07 +02:00
Ruud
dd0fcf0bc1 Add multiprovider for provider grouping 2013-08-18 11:45:45 +02:00
Ruud
2267235eca Rename type to protocol 2013-08-18 11:44:00 +02:00
Jason Mehring
029cf9ecac New model implemented to work with both Movies and TV Shows as well any future types. Currenly episodes are mapped directly to shows; no seasons yet. Will get around to that soon. This version allows you to add any tv show and it will appear in wanted list, but no searches are written yet :) 2013-08-18 03:28:41 -04:00
Ruud
f4217ecd3d Move registerPlugin to __new__ magic 2013-08-18 00:22:36 +02:00
Jason Mehring
31cd993506 EOD commit (WIP). So close to writing tv objects to database but too tired to finish. Currently storing a show as a movie using imdb metadata. Added another search button beside movie button 2013-08-17 04:15:51 -04:00
Jason Mehring
fb579561de added a --noreloader option flag on startup to prevent CP from auto reloading when in development and debugging mode 2013-08-16 17:10:52 -04:00
Jason Mehring
37eb424827 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_database 2013-08-16 15:41:37 -04:00
Ruud
4348451692 Merge branch 'refs/heads/develop' into tv
Conflicts:
	couchpotato/core/media/__init__.py
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/media/movie/searcher/__init__.py
	couchpotato/core/media/movie/searcher/main.py
2013-08-16 21:07:59 +02:00
Ruud
e93e55a0f7 Searcher conf section 2013-08-16 10:22:43 +02:00
Jason Mehring
bc11f90529 EOD commit (WIP). Commented out schema added yesterday in favour of a more global scheme. Added menu option in GUI to search for tv shows (placed on top of movie one for now). Partially implemented thetvdb provider. Search is working and returns a list of shows for GUI search along with posters. posters still need work. 2013-08-16 02:44:41 -04:00
Jason Mehring
8fcc246f25 Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_database 2013-08-15 20:48:57 -04:00
Ruud
4d7c38d6db Cleanup console log 2013-08-15 23:51:42 +02:00
Ruud
c8d79cde21 Add base for TV media type 2013-08-15 23:47:07 +02:00
Ruud
f4d792079b Give moviesearcher a unique name 2013-08-15 23:46:55 +02:00
Ruud
78ab419cd8 Move movie plugin to media folder 2013-08-15 23:38:14 +02:00
Ruud
3e93983f6e Move movie to new media type folder 2013-08-15 22:22:45 +02:00
Jason Mehring
6a4822cc26 merged upstream changes 2013-08-15 15:08:44 -04:00
Ruud
92b08bb5d5 Merge branch 'refs/heads/develop' into tv 2013-08-15 20:30:48 +02:00
Jason Mehring
e270e09969 EOD commit (WIP). Added partial Show, Episode schema. 2013-08-15 01:15:48 -04:00
Ruud
40cd5218db Change branch to "tv" 2013-08-14 23:24:21 +02:00
365 changed files with 39527 additions and 14581 deletions

View File

@@ -1,231 +0,0 @@
from esky.util import appdir_from_executable #@UnresolvedImport
from threading import Thread
from version import VERSION
from wx.lib.softwareupdate import SoftwareUpdate
import os
import sys
import time
import webbrowser
import wx
# Include proper dirs
if hasattr(sys, 'frozen'):
import libs
base_path = os.path.dirname(os.path.dirname(os.path.abspath(libs.__file__)))
else:
base_path = os.path.dirname(os.path.abspath(__file__))
lib_dir = os.path.join(base_path, 'libs')
sys.path.insert(0, base_path)
sys.path.insert(0, lib_dir)
from couchpotato.environment import Env
class TaskBarIcon(wx.TaskBarIcon):
TBMENU_OPEN = wx.NewId()
TBMENU_SETTINGS = wx.NewId()
TBMENU_EXIT = wx.ID_EXIT
closed = False
menu = False
enabled = False
def __init__(self, frame):
wx.TaskBarIcon.__init__(self)
self.frame = frame
icon = wx.Icon('icon.png', wx.BITMAP_TYPE_PNG)
self.SetIcon(icon)
self.Bind(wx.EVT_TASKBAR_LEFT_UP, self.OnTaskBarClick)
self.Bind(wx.EVT_TASKBAR_RIGHT_UP, self.OnTaskBarClick)
self.Bind(wx.EVT_MENU, self.onOpen, id = self.TBMENU_OPEN)
self.Bind(wx.EVT_MENU, self.onSettings, id = self.TBMENU_SETTINGS)
self.Bind(wx.EVT_MENU, self.onTaskBarClose, id = self.TBMENU_EXIT)
def OnTaskBarClick(self, evt):
menu = self.CreatePopupMenu()
self.PopupMenu(menu)
menu.Destroy()
def enable(self):
self.enabled = True
if self.menu:
self.open_menu.Enable(True)
self.setting_menu.Enable(True)
self.open_menu.SetText('Open')
def CreatePopupMenu(self):
if not self.menu:
self.menu = wx.Menu()
self.open_menu = self.menu.Append(self.TBMENU_OPEN, 'Open')
self.setting_menu = self.menu.Append(self.TBMENU_SETTINGS, 'About')
self.exit_menu = self.menu.Append(self.TBMENU_EXIT, 'Quit')
if not self.enabled:
self.open_menu.Enable(False)
self.setting_menu.Enable(False)
self.open_menu.SetText('Loading...')
return self.menu
def onOpen(self, event):
url = self.frame.parent.getSetting('base_url')
webbrowser.open(url)
def onSettings(self, event):
url = self.frame.parent.getSetting('base_url') + 'settings/about/'
webbrowser.open(url)
def onTaskBarClose(self, evt):
if self.closed:
return
self.closed = True
self.RemoveIcon()
wx.CallAfter(self.frame.Close)
def makeIcon(self, img):
if "wxMSW" in wx.PlatformInfo:
img = img.Scale(16, 16)
elif "wxGTK" in wx.PlatformInfo:
img = img.Scale(22, 22)
icon = wx.IconFromBitmap(img.CopyFromBitmap())
return icon
class MainFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, None, style = wx.FRAME_NO_TASKBAR)
self.parent = parent
self.tbicon = TaskBarIcon(self)
class WorkerThread(Thread):
def __init__(self, desktop):
Thread.__init__(self)
self.daemon = True
self._desktop = desktop
self.start()
def run(self):
# Get options via arg
from couchpotato.runner import getOptions
args = ['--quiet']
self.options = getOptions(base_path, args)
# Load settings
settings = Env.get('settings')
settings.setFile(self.options.config_file)
# Create data dir if needed
self.data_dir = os.path.expanduser(Env.setting('data_dir'))
if self.data_dir == '':
from couchpotato.core.helpers.variable import getDataDir
self.data_dir = getDataDir()
if not os.path.isdir(self.data_dir):
os.makedirs(self.data_dir)
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
try:
from couchpotato.runner import runCouchPotato
runCouchPotato(self.options, base_path, args, data_dir = self.data_dir, log_dir = self.log_dir, Env = Env, desktop = self._desktop)
except:
pass
self._desktop.frame.Close()
class CouchPotatoApp(wx.App, SoftwareUpdate):
settings = {}
events = {}
restart = False
closing = False
def OnInit(self):
# Updater
base_url = 'https://couchpota.to/updates/%s'
self.InitUpdates(base_url % VERSION + '/', base_url % 'changelog.html',
icon = wx.Icon('icon.png'))
self.frame = MainFrame(self)
self.frame.Bind(wx.EVT_CLOSE, self.onClose)
# CouchPotato thread
self.worker = WorkerThread(self)
return True
def onAppLoad(self):
self.frame.tbicon.enable()
def setSettings(self, settings = {}):
self.settings = settings
def getSetting(self, name):
return self.settings.get(name)
def addEvents(self, events = {}):
for name in events.iterkeys():
self.events[name] = events[name]
def onClose(self, event):
if not self.closing:
self.closing = True
self.frame.tbicon.onTaskBarClose(event)
onClose = self.events.get('onClose')
onClose(event)
def afterShutdown(self, restart = False):
self.frame.Destroy()
self.restart = restart
self.ExitMainLoop()
if __name__ == '__main__':
app = CouchPotatoApp(redirect = False)
app.MainLoop()
time.sleep(1)
if app.restart:
def appexe_from_executable(exepath):
appdir = appdir_from_executable(exepath)
exename = os.path.basename(exepath)
if sys.platform == "darwin":
if os.path.isdir(os.path.join(appdir, "Contents", "MacOS")):
return os.path.join(appdir, "Contents", "MacOS", exename)
return os.path.join(appdir, exename)
exe = appexe_from_executable(sys.executable)
os.chdir(os.path.dirname(exe))
os.execv(exe, [exe] + sys.argv[1:])

View File

@@ -1,15 +1,25 @@
#So you feel like posting a bug, sending me a pull request or just telling me how awesome I am. No problem!
## Got a issue/feature request or submitting a pull request?
##Just make sure you think of the following things:
Make sure you think of the following things:
* Search through the existing (and closed) issues first. See if you can get your answer there.
## Issue
* Search through the existing (and closed) issues first, see if you can get your answer there.
* Double check the result manually, because it could be an external issue.
* Post logs! Without seeing what is going on, I can't reproduce the error.
* What is the movie + quality you are searching for.
* What are you settings for the specific problem.
* What providers are you using. (While your logs include these, scanning through hundred of lines of log isn't my hobby).
* Give me a short step by step of how to reproduce.
* Also check the logs before submitting, obvious errors like permission or http errors are often not related to CP.
* What is the movie + quality you are searching for?
* What are you're settings for the specific problem?
* What providers are you using? (While you're logs include these, scanning through hundred of lines of log isn't our hobby)
* Post the logs from config directory, please do not copy paste the UI. Use pastebin to store these logs!
* Give a short step by step of how to reproduce the error.
* What hardware / OS are you using and what are the limits? NAS can be slow and maybe have a different python installed then when you use CP on OSX or Windows for example.
* I will mark issues with the "can't reproduce" tag. Don't go asking me "why closed" if it clearly says the issue in the tag ;)
* I will mark issues with the "can't reproduce" tag. Don't go asking "why closed" if it clearly says the issue in the tag ;)
* If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are setup to use our source repo (RuudBurger/CouchPotatoServer) and nothing else!!
**If I don't get enough info, the chance of the issue getting closed is a lot bigger ;)**
## Pull Request
* Make sure you're pull request is made for develop branch (or relevant feature branch)
* Have you tested your PR? If not, why?
* Are there any limitations of your PR we should know of?
* Make sure to keep you're PR up-to-date with the branch you're trying to push into.
**If we don't get enough info, the chance of the issue getting closed is a lot bigger ;)**

View File

@@ -55,6 +55,10 @@ class Core(Plugin):
if not Env.get('desktop'):
self.signalHandler()
# Set default urlopen timeout
import socket
socket.setdefaulttimeout(30)
def md5Password(self, value):
return md5(value) if value else ''

View File

@@ -34,6 +34,8 @@ class ClientScript(Plugin):
'scripts/library/question.js',
'scripts/library/scrollspy.js',
'scripts/library/spin.js',
'scripts/library/Array.stableSort.js',
'scripts/library/async.js',
'scripts/couchpotato.js',
'scripts/api.js',
'scripts/library/history.js',

View File

@@ -31,13 +31,13 @@ class Scheduler(Plugin):
pass
def doShutdown(self):
super(Scheduler, self).doShutdown()
self.stop()
return super(Scheduler, self).doShutdown()
def stop(self):
if self.started:
log.debug('Stopping scheduler')
self.sched.shutdown()
self.sched.shutdown(wait = False)
log.debug('Scheduler stopped')
self.started = False

View File

@@ -183,9 +183,6 @@ class GitUpdater(BaseUpdater):
def doUpdate(self):
try:
log.debug('Stashing local changes')
self.repo.saveStash()
log.info('Updating to latest version')
self.repo.pull()

View File

@@ -24,7 +24,7 @@ var UpdaterBase = new Class({
self.doUpdate();
else {
App.unBlockPage();
App.fireEvent('message', 'No updates available');
App.on('message', 'No updates available');
}
}
})

View File

@@ -13,6 +13,7 @@ class Downloader(Provider):
protocol = []
http_time_between_calls = 0
status_support = True
torrent_sources = [
'http://torrage.com/torrent/%s.torrent',
@@ -49,22 +50,27 @@ class Downloader(Provider):
return []
def _download(self, data = None, movie = None, manual = False, filedata = None):
if not movie: movie = {}
def _download(self, data = None, media = None, manual = False, filedata = None):
if not media: media = {}
if not data: data = {}
if self.isDisabled(manual, data):
return
return self.download(data = data, movie = movie, filedata = filedata)
return self.download(data = data, media = media, filedata = filedata)
def _getAllDownloadStatus(self):
def _getAllDownloadStatus(self, download_ids):
if self.isDisabled(manual = True, data = {}):
return
return self.getAllDownloadStatus()
ids = [download_id['id'] for download_id in download_ids if download_id['downloader'] == self.getName()]
def getAllDownloadStatus(self):
return
if ids:
return self.getAllDownloadStatus(ids)
else:
return
def getAllDownloadStatus(self, ids):
return []
def _removeFailed(self, release_download):
if self.isDisabled(manual = True, data = {}):
@@ -128,6 +134,7 @@ class Downloader(Provider):
def downloadReturnId(self, download_id):
return {
'downloader': self.getName(),
'status_support': self.status_support,
'id': download_id
}

View File

@@ -13,7 +13,7 @@ config = [{
'list': 'download_providers',
'name': 'blackhole',
'label': 'Black hole',
'description': 'Download the NZB/Torrent to a specific folder.',
'description': 'Download the NZB/Torrent to a specific folder. <em>Note: Seeding and copying/linking features do <strong>not</strong> work with Black hole</em>.',
'wizard': True,
'options': [
{

View File

@@ -11,9 +11,10 @@ log = CPLog(__name__)
class Blackhole(Downloader):
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
@@ -33,7 +34,7 @@ class Blackhole(Downloader):
log.error('No nzb/torrent available: %s', data.get('url'))
return False
file_name = self.createFileName(data, filedata, movie)
file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name)
if self.conf('create_subdir'):
@@ -51,10 +52,10 @@ class Blackhole(Downloader):
with open(full_path, 'wb') as f:
f.write(filedata)
os.chmod(full_path, Env.getPermission('file'))
return True
return self.downloadReturnId('')
else:
log.info('File %s already exists.', full_path)
return True
return self.downloadReturnId('')
except:
log.error('Failed to download to blackhole %s', traceback.format_exc())

View File

@@ -32,7 +32,10 @@ class Deluge(Downloader):
return self.drpc
def download(self, data, movie, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
if not self.connect():
@@ -73,7 +76,7 @@ class Deluge(Downloader):
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, movie)
filename = self.createFileName(data, filedata, media)
remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent:
@@ -83,25 +86,25 @@ class Deluge(Downloader):
log.info('Torrent sent to Deluge successfully.')
return self.downloadReturnId(remote_torrent)
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking Deluge download status.')
if not self.connect():
return False
return []
release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents()
queue = self.drpc.get_alltorrents(ids)
if not queue:
log.debug('Nothing in queue or error')
return False
return []
for torrent_id in queue:
torrent = queue[torrent_id]
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
@@ -117,11 +120,11 @@ class Deluge(Downloader):
download_dir = sp(torrent['save_path'])
if torrent['move_on_completed']:
download_dir = torrent['move_completed_path']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
release_downloads.append({
'id': torrent['hash'],
'name': torrent['name'],
@@ -205,11 +208,11 @@ class DelugeRPC(object):
return torrent_id
def get_alltorrents(self):
def get_alltorrents(self, ids):
ret = False
try:
self.connect()
ret = self.client.core.get_torrents_status({}, {}).get()
ret = self.client.core.get_torrents_status({'id': ids}, {}).get()
except Exception, err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally:

View File

@@ -25,6 +25,13 @@ config = [{
'default': 'localhost:6789',
'description': 'Hostname with port. Usually <strong>localhost:6789</strong>',
},
{
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'username',
'default': 'nzbget',

View File

@@ -17,10 +17,10 @@ class NZBGet(Downloader):
protocol = ['nzb']
url = 'http://%(username)s:%(password)s@%(host)s/xmlrpc'
url = '%(protocol)s://%(username)s:%(password)s@%(host)s/xmlrpc'
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
if not filedata:
@@ -29,8 +29,8 @@ class NZBGet(Downloader):
log.info('Sending "%s" to NZBGet.', data.get('name'))
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))
url = self.url % {'protocol': 'https' if self.conf('ssl') else 'http', 'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
nzb_name = ss('%s.nzb' % self.createNzbName(data, media))
rpc = xmlrpclib.ServerProxy(url)
try:
@@ -67,11 +67,11 @@ class NZBGet(Downloader):
log.error('NZBGet could not add %s to the queue.', nzb_name)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking NZBGet download status.')
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
url = self.url % {'protocol': 'https' if self.conf('ssl') else 'http', 'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
rpc = xmlrpclib.ServerProxy(url)
try:
@@ -81,13 +81,13 @@ class NZBGet(Downloader):
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return False
return []
except xmlrpclib.ProtocolError, e:
if e.errcode == 401:
log.error('Password is incorrect.')
else:
log.error('Protocol Error: %s', e)
return False
return []
# Get NZBGet data
try:
@@ -97,56 +97,59 @@ class NZBGet(Downloader):
history = rpc.history()
except:
log.error('Failed getting data: %s', traceback.format_exc(1))
return False
return []
release_downloads = ReleaseDownloadList(self)
for nzb in groups:
log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
try:
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = nzb['NZBID']
timeleft = -1
try:
if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except:
pass
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft,
})
if nzb_id in ids:
log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
timeleft = -1
try:
if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except:
pass
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft,
})
for nzb in queue: # 'Parameters' is not passed in rpc.postqueue
log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
release_downloads.append({
'id': nzb['NZBID'],
'name': nzb['NZBFilename'],
'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
if nzb['NZBID'] in ids:
log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
release_downloads.append({
'id': nzb['NZBID'],
'name': nzb['NZBFilename'],
'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
for nzb in history:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
try:
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = nzb['NZBID']
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})
if nzb_id in ids:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})
return release_downloads

View File

@@ -8,9 +8,11 @@ from uuid import uuid4
import hashlib
import httplib
import json
import os
import socket
import ssl
import sys
import time
import traceback
import urllib2
@@ -23,44 +25,46 @@ class NZBVortex(Downloader):
api_level = None
session_id = None
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, movie)
self.call('nzb/add', params = {'file': (nzb_filename, filedata)}, multipart = True)
nzb_filename = self.createFileName(data, filedata, media)
self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
time.sleep(10)
raw_statuses = self.call('nzb')
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if nzb['name'] == nzb_filename][0]
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if os.path.basename(item['nzbFileName']) == nzb_filename][0]
return self.downloadReturnId(nzb_id)
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
raw_statuses = self.call('nzb')
release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []):
if nzb['id'] in ids:
# Check status
status = 'busy'
if nzb['state'] == 20:
status = 'completed'
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
release_downloads.append({
'id': nzb['id'],
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
'timeleft':-1,
'folder': sp(nzb['destinationPath']),
})
# Check status
status = 'busy'
if nzb['state'] == 20:
status = 'completed'
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
release_downloads.append({
'id': nzb['id'],
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
'timeleft':-1,
'folder': sp(nzb['destinationPath']),
})
return release_downloads
@@ -113,10 +117,9 @@ class NZBVortex(Downloader):
params = tryUrlencode(parameters)
url = cleanHost(self.conf('host')) + 'api/' + call
url_opener = urllib2.build_opener(HTTPSHandler())
try:
data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs)
data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
if data:
return json.loads(data)
@@ -138,10 +141,9 @@ class NZBVortex(Downloader):
if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
url_opener = urllib2.build_opener(HTTPSHandler())
try:
data = self.urlopen(url, opener = url_opener, show_error = False)
data = self.urlopen(url, show_error = False)
self.api_level = float(json.loads(data).get('apilevel'))
except URLError, e:
if hasattr(e, 'code') and e.code == 403:

View File

@@ -11,9 +11,10 @@ class Pneumatic(Downloader):
protocol = ['nzb']
strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
status_support = False
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
@@ -25,7 +26,7 @@ class Pneumatic(Downloader):
log.error('No nzb available!')
return False
fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
fullPath = os.path.join(directory, self.createFileName(data, filedata, media))
try:
if not os.path.isfile(fullPath):
@@ -33,7 +34,7 @@ class Pneumatic(Downloader):
with open(fullPath, 'wb') as f:
f.write(filedata)
nzb_name = self.createNzbName(data, movie)
nzb_name = self.createNzbName(data, media)
strm_path = os.path.join(directory, nzb_name)
strm_file = open(strm_path + '.strm', 'wb')
@@ -41,11 +42,11 @@ class Pneumatic(Downloader):
strm_file.write(strmContent)
strm_file.close()
return True
return self.downloadReturnId('')
else:
log.info('File %s already exists.', fullPath)
return True
return self.downloadReturnId('')
except:
log.error('Failed to download .strm: %s', traceback.format_exc())

View File

@@ -58,14 +58,6 @@ config = [{
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'append_label',
'label': 'Append Label',
'default': False,
'advanced': True,
'type': 'bool',
'description': 'Append label to download location. Requires you to set the download location above.',
},
{
'name': 'paused',
'type': 'bool',

View File

@@ -77,7 +77,10 @@ class rTorrent(Downloader):
return True
def download(self, data, movie, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.debug('Sending "%s" to rTorrent.', (data.get('name')))
if not self.connect():
@@ -125,9 +128,7 @@ class rTorrent(Downloader):
if self.conf('label'):
torrent.set_custom(1, self.conf('label'))
if self.conf('directory') and self.conf('append_label'):
torrent.set_directory(os.path.join(self.conf('directory'), self.conf('label')))
elif self.conf('directory'):
if self.conf('directory'):
torrent.set_directory(self.conf('directory'))
# Set Ratio Group
@@ -142,11 +143,11 @@ class rTorrent(Downloader):
log.error('Failed to send torrent to rTorrent: %s', err)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking rTorrent download status.')
if not self.connect():
return False
return []
try:
torrents = self.rt.get_torrents()
@@ -154,33 +155,34 @@ class rTorrent(Downloader):
release_downloads = ReleaseDownloadList(self)
for torrent in torrents:
torrent_files = []
for file_item in torrent.get_files():
torrent_files.append(sp(os.path.join(torrent.directory, file_item.path)))
status = 'busy'
if torrent.complete:
if torrent.active:
status = 'seeding'
else:
status = 'completed'
release_downloads.append({
'id': torrent.info_hash,
'name': torrent.name,
'status': status,
'seed_ratio': torrent.ratio,
'original_status': torrent.state,
'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
'folder': sp(torrent.directory),
'files': '|'.join(torrent_files)
})
if torrent.info_hash in ids:
torrent_files = []
for file_item in torrent.get_files():
torrent_files.append(sp(os.path.join(torrent.directory, file_item.path)))
status = 'busy'
if torrent.complete:
if torrent.active:
status = 'seeding'
else:
status = 'completed'
release_downloads.append({
'id': torrent.info_hash,
'name': torrent.name,
'status': status,
'seed_ratio': torrent.ratio,
'original_status': torrent.state,
'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
'folder': sp(torrent.directory),
'files': '|'.join(torrent_files)
})
return release_downloads
except Exception, err:
log.error('Failed to get status from rTorrent: %s', err)
return False
return []
def pause(self, release_download, pause = True):
if not self.connect():

View File

@@ -16,8 +16,8 @@ class Sabnzbd(Downloader):
protocol = ['nzb']
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to SABnzbd.', data.get('name'))
@@ -25,7 +25,7 @@ class Sabnzbd(Downloader):
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
'nzbname': self.createNzbName(data, media),
'priority': self.conf('priority'),
}
@@ -36,14 +36,14 @@ class Sabnzbd(Downloader):
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
nzb_filename = self.createFileName(data, filedata, media)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if nzb_filename and req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)})
else:
sab_data = self.call(req_params)
except URLError:
@@ -64,7 +64,7 @@ class Sabnzbd(Downloader):
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking SABnzbd download status.')
@@ -75,7 +75,7 @@ class Sabnzbd(Downloader):
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
return []
# Go through history items
try:
@@ -85,41 +85,42 @@ class Sabnzbd(Downloader):
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
return []
release_downloads = ReleaseDownloadList(self)
# Get busy releases
for nzb in queue.get('slots', []):
status = 'busy'
if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['filename'],
'status': status,
'original_status': nzb['status'],
'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
})
if nzb['nzo_id'] in ids:
status = 'busy'
if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['filename'],
'status': status,
'original_status': nzb['status'],
'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for nzb in history.get('slots', []):
status = 'busy'
if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed'
elif nzb['status'] == 'Completed':
status = 'completed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['name'],
'status': status,
'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
})
if nzb['nzo_id'] in ids:
status = 'busy'
if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed'
elif nzb['status'] == 'Completed':
status = 'completed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['name'],
'status': status,
'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
})
return release_downloads

View File

@@ -11,10 +11,10 @@ log = CPLog(__name__)
class Synology(Downloader):
protocol = ['nzb', 'torrent', 'torrent_magnet']
log = CPLog(__name__)
status_support = False
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
response = False
@@ -42,7 +42,7 @@ class Synology(Downloader):
except:
log.error('Exception while adding torrent: %s', traceback.format_exc())
finally:
return response
return self.downloadReturnId('') if response else False
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':

View File

@@ -31,7 +31,9 @@ class Transmission(Downloader):
return self.trpc
def download(self, data, movie, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol')))
@@ -81,12 +83,12 @@ class Transmission(Downloader):
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking Transmission download status.')
if not self.connect():
return False
return []
release_downloads = ReleaseDownloadList(self)
@@ -97,34 +99,35 @@ class Transmission(Downloader):
queue = self.trpc.get_alltorrents(return_params)
if not (queue and queue.get('torrents')):
log.debug('Nothing in queue or error')
return False
return []
for torrent in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / isFinished=%s',
(torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent['eta'], torrent['uploadRatio'], torrent['isFinished']))
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(torrent['downloadDir'], file_item['name'])))
status = 'busy'
if torrent.get('isStalled') and self.conf('stalled_as_failed'):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'
release_downloads.append({
'id': torrent['hashString'],
'name': torrent['name'],
'status': status,
'original_status': torrent['status'],
'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(torrent['downloadDir'] if len(torrent_files) == 1 else os.path.join(torrent['downloadDir'], torrent['name'])),
'files': '|'.join(torrent_files)
})
if torrent['hashString'] in ids:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s',
(torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished']))
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(torrent['downloadDir'], file_item['name'])))
status = 'busy'
if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'
release_downloads.append({
'id': torrent['hashString'],
'name': torrent['name'],
'status': status,
'original_status': torrent['status'],
'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(torrent['downloadDir'] if len(torrent_files) == 1 else os.path.join(torrent['downloadDir'], torrent['name'])),
'files': '|'.join(torrent_files)
})
return release_downloads

View File

@@ -24,6 +24,16 @@ class uTorrent(Downloader):
protocol = ['torrent', 'torrent_magnet']
utorrent_api = None
status_flags = {
'STARTED' : 1,
'CHECKING' : 2,
'CHECK-START' : 4,
'CHECKED' : 8,
'ERROR' : 16,
'PAUSED' : 32,
'QUEUED' : 64,
'LOADED' : 128
}
def connect(self):
# Load host from config and split out port.
@@ -36,11 +46,11 @@ class uTorrent(Downloader):
return self.utorrent_api
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('protocol')))
log.debug("Sending '%s' (%s) to uTorrent.", (data.get('name'), data.get('protocol')))
if not self.connect():
return False
@@ -75,9 +85,10 @@ class uTorrent(Downloader):
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers)
else:
info = bdecode(filedata)["info"]
info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, movie)
torrent_filename = self.createFileName(data, filedata, media)
if data.get('seed_ratio'):
torrent_params['seed_override'] = 1
@@ -104,72 +115,62 @@ class uTorrent(Downloader):
return self.downloadReturnId(torrent_hash)
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking uTorrent download status.')
if not self.connect():
return False
return []
release_downloads = ReleaseDownloadList(self)
data = self.utorrent_api.get_status()
if not data:
log.error('Error getting data from uTorrent')
return False
return []
queue = json.loads(data)
if queue.get('error'):
log.error('Error getting data from uTorrent: %s', queue.get('error'))
return False
return []
if not queue.get('torrents'):
log.debug('Nothing in queue')
return False
return []
# Get torrents
for torrent in queue['torrents']:
if torrent[0] in ids:
#Get files of the torrent
torrent_files = []
try:
torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
except:
log.debug('Failed getting files from torrent: %s', torrent[2])
status_flags = {
"STARTED" : 1,
"CHECKING" : 2,
"CHECK-START" : 4,
"CHECKED" : 8,
"ERROR" : 16,
"PAUSED" : 32,
"QUEUED" : 64,
"LOADED" : 128
}
status = 'busy'
if (torrent[1] & status_flags["STARTED"] or torrent[1] & status_flags["QUEUED"]) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & status_flags["ERROR"]):
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
if not status == 'busy':
self.removeReadOnly(torrent_files)
release_downloads.append({
'id': torrent[0],
'name': torrent[2],
'status': status,
'seed_ratio': float(torrent[7]) / 1000,
'original_status': torrent[1],
'timeleft': str(timedelta(seconds = torrent[10])),
'folder': sp(torrent[26]),
'files': '|'.join(torrent_files)
})
#Get files of the torrent
torrent_files = []
try:
torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
except:
log.debug('Failed getting files from torrent: %s', torrent[2])
status = 'busy'
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & self.status_flags['ERROR']):
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
if not status == 'busy':
self.removeReadOnly(torrent_files)
release_downloads.append({
'id': torrent[0],
'name': torrent[2],
'status': status,
'seed_ratio': float(torrent[7]) / 1000,
'original_status': torrent[1],
'timeleft': str(timedelta(seconds = torrent[10])),
'folder': sp(torrent[26]),
'files': '|'.join(torrent_files)
})
return release_downloads
@@ -222,7 +223,7 @@ class uTorrentAPI(object):
if time.time() > self.last_time + 1800:
self.last_time = time.time()
self.token = self.get_token()
request = urllib2.Request(self.url + "?token=" + self.token + "&" + action, data)
request = urllib2.Request(self.url + '?token=' + self.token + '&' + action, data)
try:
open_request = self.opener.open(request)
response = open_request.read()
@@ -242,52 +243,52 @@ class uTorrentAPI(object):
return False
def get_token(self):
request = self.opener.open(self.url + "token.html")
token = re.findall("<div.*?>(.*?)</", request.read())[0]
request = self.opener.open(self.url + 'token.html')
token = re.findall('<div.*?>(.*?)</', request.read())[0]
return token
def add_torrent_uri(self, filename, torrent, add_folder = False):
action = "action=add-url&s=%s" % urllib.quote(torrent)
action = 'action=add-url&s=%s' % urllib.quote(torrent)
if add_folder:
action += "&path=%s" % urllib.quote(filename)
action += '&path=%s' % urllib.quote(filename)
return self._request(action)
def add_torrent_file(self, filename, filedata, add_folder = False):
action = "action=add-file"
action = 'action=add-file'
if add_folder:
action += "&path=%s" % urllib.quote(filename)
return self._request(action, {"torrent_file": (ss(filename), filedata)})
action += '&path=%s' % urllib.quote(filename)
return self._request(action, {'torrent_file': (ss(filename), filedata)})
def set_torrent(self, hash, params):
action = "action=setprops&hash=%s" % hash
action = 'action=setprops&hash=%s' % hash
for k, v in params.iteritems():
action += "&s=%s&v=%s" % (k, v)
action += '&s=%s&v=%s' % (k, v)
return self._request(action)
def pause_torrent(self, hash, pause = True):
if pause:
action = "action=pause&hash=%s" % hash
action = 'action=pause&hash=%s' % hash
else:
action = "action=unpause&hash=%s" % hash
action = 'action=unpause&hash=%s' % hash
return self._request(action)
def stop_torrent(self, hash):
action = "action=stop&hash=%s" % hash
action = 'action=stop&hash=%s' % hash
return self._request(action)
def remove_torrent(self, hash, remove_data = False):
if remove_data:
action = "action=removedata&hash=%s" % hash
action = 'action=removedata&hash=%s' % hash
else:
action = "action=remove&hash=%s" % hash
action = 'action=remove&hash=%s' % hash
return self._request(action)
def get_status(self):
action = "list=1"
action = 'list=1'
return self._request(action)
def get_settings(self):
action = "action=getsettings"
action = 'action=getsettings'
settings_dict = {}
try:
utorrent_settings = json.loads(self._request(action))
@@ -319,5 +320,5 @@ class uTorrentAPI(object):
return self._request(action)
def get_files(self, hash):
action = "action=getfiles&hash=%s" % hash
action = 'action=getfiles&hash=%s' % hash
return self._request(action)

View File

@@ -49,8 +49,29 @@ def ss(original, *args):
return u_original.encode('UTF-8')
def sp(path, *args):
# Standardise encoding, normalise case, path and strip trailing '/' or '\'
return os.path.normcase(os.path.normpath(ss(path, *args))).rstrip(os.path.sep)
if not path or len(path) == 0:
return path
# convert windows path (from remote box) to *nix path
if os.path.sep == '/' and '\\' in path:
path = '/' + path.replace(':', '').replace('\\', '/')
path = os.path.normcase(os.path.normpath(ss(path, *args)))
# Remove any trailing path separators
if path != os.path.sep:
path = path.rstrip(os.path.sep)
# Add a trailing separator in case it is a root folder on windows (crashes guessit)
if len(path) == 2 and path[1] == ':':
path = path + os.path.sep
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
path = re.sub('^//', '/', path)
return path
def ek(original, *args):
if isinstance(original, (str, unicode)):

View File

@@ -2,7 +2,7 @@ from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.logger import CPLog
import collections
import hashlib
import os.path
import os
import platform
import random
import re
@@ -11,6 +11,9 @@ import sys
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[','[[').replace(']','[]]').replace('[[','[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
@@ -167,7 +170,7 @@ def natcmp(a, b):
return cmp(natsortKey(a), natsortKey(b))
def toIterable(value):
if isinstance(value, collections.Iterable):
if type(value) in [list, tuple]:
return value
return [value]
@@ -216,3 +219,7 @@ def splitString(str, split_on = ',', clean = True):
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
def isSubFolder(sub_folder, base_folder):
# Returns True is sub_folder is the same as or in base_folder
return base_folder.rstrip(os.path.sep) + os.path.sep in sub_folder.rstrip(os.path.sep) + os.path.sep

View File

@@ -1,5 +1,6 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media
@@ -17,6 +18,13 @@ class MediaBase(Plugin):
'category': {},
}
search_dict = mergeDicts({
'library': {
'related_libraries': {},
'root_library': {}
},
}, default_dict)
def initType(self):
addEvent('media.types', self.getType)
@@ -28,7 +36,7 @@ class MediaBase(Plugin):
def onComplete():
db = get_session()
media = db.query(Media).filter_by(id = id).first()
fireEventAsync('%s.searcher.single' % media.type, media.to_dict(self.default_dict), on_complete = self.createNotifyFront(id))
fireEventAsync('%s.searcher.single' % media.type, media.to_dict(self.search_dict), on_complete = self.createNotifyFront(id))
db.expire_all()
return onComplete
@@ -38,7 +46,7 @@ class MediaBase(Plugin):
def notifyFront():
db = get_session()
media = db.query(Media).filter_by(id = media_id).first()
fireEvent('notify.frontend', type = '%s.update.%s' % (media.type, media.id), data = media.to_dict(self.default_dict))
fireEvent('notify.frontend', type = '%s.update' % media.type, data = media.to_dict(self.default_dict))
db.expire_all()
return notifyFront

View File

@@ -1,13 +1,6 @@
from couchpotato.core.event import addEvent
from couchpotato.core.plugins.base import Plugin
from .main import Library
def start():
return Library()
class LibraryBase(Plugin):
_type = None
def initType(self):
addEvent('library.types', self.getType)
def getType(self):
return self._type
config = []

View File

@@ -0,0 +1,13 @@
from couchpotato.core.event import addEvent
from couchpotato.core.plugins.base import Plugin
class LibraryBase(Plugin):
_type = None
def initType(self):
addEvent('library.types', self.getType)
def getType(self):
return self._type

View File

@@ -0,0 +1,18 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.media._base.library.base import LibraryBase
class Library(LibraryBase):
def __init__(self):
addEvent('library.title', self.title)
def title(self, library):
return fireEvent(
'library.query',
library,
condense = False,
include_year = False,
include_identifier = False,
single = True
)

View File

@@ -0,0 +1,6 @@
from .main import Matcher
def start():
return Matcher()
config = []

View File

@@ -0,0 +1,84 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class MatcherBase(Plugin):
type = None
def __init__(self):
if self.type:
addEvent('%s.matcher.correct' % self.type, self.correct)
def correct(self, chain, release, media, quality):
raise NotImplementedError()
def flattenInfo(self, info):
# Flatten dictionary of matches (chain info)
if isinstance(info, dict):
return dict([(key, self.flattenInfo(value)) for key, value in info.items()])
# Flatten matches
result = None
for match in info:
if isinstance(match, dict):
if result is None:
result = {}
for key, value in match.items():
if key not in result:
result[key] = []
result[key].append(value)
else:
if result is None:
result = []
result.append(match)
return result
def constructFromRaw(self, match):
if not match:
return None
parts = [
''.join([
y for y in x[1:] if y
]) for x in match
]
return ''.join(parts)[:-1].strip()
def simplifyValue(self, value):
if not value:
return value
if isinstance(value, basestring):
return simplifyString(value)
if isinstance(value, list):
return [self.simplifyValue(x) for x in value]
raise ValueError("Unsupported value type")
def chainMatch(self, chain, group, tags):
info = self.flattenInfo(chain.info[group])
found_tags = []
for tag, accepted in tags.items():
values = [self.simplifyValue(x) for x in info.get(tag, [None])]
if any([val in accepted for val in values]):
found_tags.append(tag)
log.debug('tags found: %s, required: %s' % (found_tags, tags.keys()))
if set(tags.keys()) == set(found_tags):
return True
return all([key in found_tags for key, value in tags.items()])

View File

@@ -0,0 +1,88 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import possibleTitles
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.matcher.base import MatcherBase
from caper import Caper
log = CPLog(__name__)
class Matcher(MatcherBase):
def __init__(self):
super(Matcher, self).__init__()
self.caper = Caper()
addEvent('matcher.parse', self.parse)
addEvent('matcher.match', self.match)
addEvent('matcher.flatten_info', self.flattenInfo)
addEvent('matcher.construct_from_raw', self.constructFromRaw)
addEvent('matcher.correct_title', self.correctTitle)
addEvent('matcher.correct_quality', self.correctQuality)
def parse(self, name, parser='scene'):
return self.caper.parse(name, parser)
def match(self, release, media, quality):
match = fireEvent('matcher.parse', release['name'], single = True)
if len(match.chains) < 1:
log.info2('Wrong: %s, unable to parse release name (no chains)', release['name'])
return False
for chain in match.chains:
if fireEvent('%s.matcher.correct' % media['type'], chain, release, media, quality, single = True):
return chain
return False
def correctTitle(self, chain, media):
root_library = media['library']['root_library']
if 'show_name' not in chain.info or not len(chain.info['show_name']):
log.info('Wrong: missing show name in parsed result')
return False
# Get the lower-case parsed show name from the chain
chain_words = [x.lower() for x in chain.info['show_name']]
# Build a list of possible titles of the media we are searching for
titles = root_library['info']['titles']
# Add year suffix titles (will result in ['<name_one>', '<name_one> <suffix_one>', '<name_two>', ...])
suffixes = [None, root_library['info']['year']]
titles = [
title + ((' %s' % suffix) if suffix else '')
for title in titles
for suffix in suffixes
]
# Check show titles match
# TODO check xem names
for title in titles:
for valid_words in [x.split(' ') for x in possibleTitles(title)]:
if valid_words == chain_words:
return True
return False
def correctQuality(self, chain, quality, quality_map):
if quality['identifier'] not in quality_map:
log.info2('Wrong: unknown preferred quality %s', quality['identifier'])
return False
if 'video' not in chain.info:
log.info2('Wrong: no video tags found')
return False
video_tags = quality_map[quality['identifier']]
if not self.chainMatch(chain, 'video', video_tags):
log.info2('Wrong: %s tags not in chain', video_tags)
return False
return True

View File

@@ -1,10 +1,15 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import mergeDicts, splitString, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from couchpotato.core.settings.model import Media
from couchpotato.core.settings.model import Library, LibraryTitle, Release, \
Media
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_, asc, not_, desc
from string import ascii_lowercase
log = CPLog(__name__)
@@ -20,7 +25,49 @@ class MediaPlugin(MediaBase):
}
})
addEvent('app.load', self.addSingleRefresh)
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView)
addEvent('app.load', self.addSingleListView)
addEvent('app.load', self.addSingleCharView)
addEvent('app.load', self.addSingleDeleteView)
addEvent('media.get', self.get)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
def refresh(self, id = '', **kwargs):
db = get_session()
@@ -34,7 +81,7 @@ class MediaPlugin(MediaBase):
for title in media.library.titles:
if title.default: default_title = title.title
fireEvent('notify.frontend', type = '%s.busy.%s' % (media.type, x), data = True)
fireEvent('notify.frontend', type = '%s.busy' % media.type, data = {'id': x})
fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
db.expire_all()
@@ -43,7 +90,369 @@ class MediaPlugin(MediaBase):
'success': True,
}
def addSingleRefresh(self):
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
db = get_session()
imdb_id = getImdb(str(media_id))
if imdb_id:
m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
else:
m = db.query(Media).filter_by(id = media_id).first()
results = None
if m:
results = m.to_dict(self.default_dict)
db.expire_all()
return results
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def list(self, types = None, status = None, release_status = None, limit_offset = None, starts_with = None, search = None, order = None):
db = get_session()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query movie ids
q = db.query(Media) \
.with_entities(Media.id) \
.group_by(Media.id)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.join(Media.releases)
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Release.status_id.in_(statuses))
# Filter on type
if types and len(types) > 0:
try: q = q.filter(Media.type.in_(types))
except: pass
# Only join when searching / ordering
if starts_with or search or order != 'release_order':
q = q.join(Media.library, Library.titles) \
.filter(LibraryTitle.default == True)
# Add search filters
filter_or = []
if starts_with:
starts_with = toUnicode(starts_with.lower())
if starts_with in ascii_lowercase:
filter_or.append(LibraryTitle.simple_title.startswith(starts_with))
else:
ignore = []
for letter in ascii_lowercase:
ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter)))
filter_or.append(not_(or_(*ignore)))
if search:
filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%'))
if len(filter_or) > 0:
q = q.filter(or_(*filter_or))
total_count = q.count()
if total_count == 0:
return 0, []
if order == 'release_order':
q = q.order_by(desc(Release.last_edit))
else:
q = q.order_by(asc(LibraryTitle.simple_title))
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
q = q.limit(limit).offset(offset)
# Get all media_ids in sorted order
media_ids = [m.id for m in q.all()]
# List release statuses
releases = db.query(Release) \
.filter(Release.media_id.in_(media_ids)) \
.all()
release_statuses = dict((m, set()) for m in media_ids)
releases_count = dict((m, 0) for m in media_ids)
for release in releases:
release_statuses[release.media_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.media_id] += 1
# Get main movie data
q2 = db.query(Media) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
q2 = q2.filter(Media.id.in_(media_ids))
results = q2.all()
# Create dict by movie id
movie_dict = {}
for movie in results:
movie_dict[movie.id] = movie
# List movies based on media_ids order
movies = []
for media_id in media_ids:
releases = []
for r in release_statuses.get(media_id):
x = splitString(r)
releases.append({'status_id': x[0], 'quality_id': x[1]})
# Merge releases with movie dict
movies.append(mergeDicts(movie_dict[media_id].to_dict({
'library': {'titles': {}, 'files':{}},
'files': {},
}), {
'releases': releases,
'releases_count': releases_count.get(media_id),
}))
db.expire_all()
return total_count, movies
def listView(self, **kwargs):
types = splitString(kwargs.get('types'))
status = splitString(kwargs.get('status'))
release_status = splitString(kwargs.get('release_status'))
limit_offset = kwargs.get('limit_offset')
starts_with = kwargs.get('starts_with')
search = kwargs.get('search')
order = kwargs.get('order')
total_movies, movies = self.list(
types = types,
status = status,
release_status = release_status,
limit_offset = limit_offset,
starts_with = starts_with,
search = search,
order = order
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
def tempList(*args, **kwargs):
return self.listView(types = media_type, *args, **kwargs)
addApiView('%s.list' % media_type, tempList)
def availableChars(self, types = None, status = None, release_status = None):
types = types or []
status = status or []
release_status = release_status or []
db = get_session()
# Make a list from string
if not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
q = db.query(Media)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.join(Media.releases) \
.filter(Release.status_id.in_(statuses))
# Filter on type
if types and len(types) > 0:
try: q = q.filter(Media.type.in_(types))
except: pass
q = q.join(Library, LibraryTitle) \
.with_entities(LibraryTitle.simple_title) \
.filter(LibraryTitle.default == True)
titles = q.all()
chars = set()
for title in titles:
try:
char = title[0][0]
char = char if char in ascii_lowercase else '#'
chars.add(str(char))
except:
log.error('Failed getting title for %s', title.libraries_id)
if len(chars) == 25:
break
db.expire_all()
return ''.join(sorted(chars))
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
def tempChar(*args, **kwargs):
return self.charView(types = media_type, *args, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
db = get_session()
media = db.query(Media).filter_by(id = media_id).first()
if media:
deleted = False
if delete_from == 'all':
db.delete(media)
db.commit()
deleted = True
else:
done_status = fireEvent('status.get', 'done', single = True)
total_releases = len(media.releases)
total_deleted = 0
new_movie_status = None
for release in media.releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.status_id != done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'done'
elif delete_from == 'manage':
if release.status_id == done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'active'
db.commit()
if total_releases == total_deleted:
db.delete(media)
db.commit()
deleted = True
elif new_movie_status:
new_status = fireEvent('status.get', new_movie_status, single = True)
media.profile_id = None
media.status_id = new_status.get('id')
db.commit()
else:
fireEvent('media.restatus', media.id, single = True)
if deleted:
fireEvent('notify.frontend', type = 'movie.deleted', data = media.to_dict())
db.expire_all()
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
def tempDelete(*args, **kwargs):
return self.deleteView(types = media_type, *args, **kwargs)
addApiView('%s.delete' % media_type, tempDelete)
def restatus(self, media_id):
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
db = get_session()
m = db.query(Media).filter_by(id = media_id).first()
if not m or len(m.library.titles) == 0:
log.debug('Can\'t restatus movie, doesn\'t seem to exist.')
return False
log.debug('Changing status for %s', m.library.titles[0].title)
if not m.profile:
m.status_id = done_status.get('id')
else:
move_to_wanted = True
for t in m.profile.types:
for release in m.releases:
if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish):
move_to_wanted = False
m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id')
db.commit()
return True

View File

@@ -1,11 +1,17 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.variable import md5, getTitle, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.settings.model import Media, Release, ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
import datetime
import re
import time
import traceback
log = CPLog(__name__)
@@ -165,7 +171,7 @@ class Searcher(SearcherBase):
return False
def correctWords(self, rel_name, media):
media_title = fireEvent('searcher.get_search_title', media, single = True)
media_title = fireEvent('library.title', media['library'], single = True)
media_words = re.split('\W+', simplifyString(media_title))
rel_name = simplifyString(rel_name)

View File

@@ -2,15 +2,10 @@ from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt, \
mergeDicts
from couchpotato.core.helpers.variable import splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Library, LibraryTitle, Media, \
Release
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_, asc, not_, desc
from string import ascii_lowercase
from couchpotato.core.settings.model import Media
import time
log = CPLog(__name__)
@@ -26,33 +21,12 @@ class MovieBase(MovieTypeBase):
super(MovieBase, self).__init__()
self.initType()
addApiView('movie.list', self.listView, docs = {
'desc': 'List movies in wanted list',
'params': {
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'movies': array, movies found,
}"""}
})
addApiView('movie.get', self.getView, docs = {
'desc': 'Get a movie by id',
'params': {
'id': {'desc': 'The id of the movie'},
}
})
addApiView('movie.available_chars', self.charView)
addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
@@ -61,258 +35,12 @@ class MovieBase(MovieTypeBase):
'params': {
'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'},
'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addApiView('movie.delete', self.deleteView, docs = {
'desc': 'Delete a movie from the wanted list',
'params': {
'id': {'desc': 'Movie ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete movie from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addEvent('movie.add', self.add)
addEvent('movie.delete', self.delete)
addEvent('movie.get', self.get)
addEvent('movie.list', self.list)
addEvent('movie.restatus', self.restatus)
def getView(self, id = None, **kwargs):
movie = self.get(id) if id else None
return {
'success': movie is not None,
'movie': movie,
}
def get(self, movie_id):
db = get_session()
imdb_id = getImdb(str(movie_id))
if imdb_id:
m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
else:
m = db.query(Media).filter_by(id = movie_id).first()
results = None
if m:
results = m.to_dict(self.default_dict)
db.expire_all()
return results
def list(self, status = None, release_status = None, limit_offset = None, starts_with = None, search = None, order = None):
db = get_session()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
# query movie ids
q = db.query(Media) \
.with_entities(Media.id) \
.group_by(Media.id)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.join(Media.releases)
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Release.status_id.in_(statuses))
# Only join when searching / ordering
if starts_with or search or order != 'release_order':
q = q.join(Media.library, Library.titles) \
.filter(LibraryTitle.default == True)
# Add search filters
filter_or = []
if starts_with:
starts_with = toUnicode(starts_with.lower())
if starts_with in ascii_lowercase:
filter_or.append(LibraryTitle.simple_title.startswith(starts_with))
else:
ignore = []
for letter in ascii_lowercase:
ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter)))
filter_or.append(not_(or_(*ignore)))
if search:
filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%'))
if len(filter_or) > 0:
q = q.filter(or_(*filter_or))
total_count = q.count()
if total_count == 0:
return 0, []
if order == 'release_order':
q = q.order_by(desc(Release.last_edit))
else:
q = q.order_by(asc(LibraryTitle.simple_title))
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
q = q.limit(limit).offset(offset)
# Get all movie_ids in sorted order
movie_ids = [m.id for m in q.all()]
# List release statuses
releases = db.query(Release) \
.filter(Release.movie_id.in_(movie_ids)) \
.all()
release_statuses = dict((m, set()) for m in movie_ids)
releases_count = dict((m, 0) for m in movie_ids)
for release in releases:
release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.movie_id] += 1
# Get main movie data
q2 = db.query(Media) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
q2 = q2.filter(Media.id.in_(movie_ids))
results = q2.all()
# Create dict by movie id
movie_dict = {}
for movie in results:
movie_dict[movie.id] = movie
# List movies based on movie_ids order
movies = []
for movie_id in movie_ids:
releases = []
for r in release_statuses.get(movie_id):
x = splitString(r)
releases.append({'status_id': x[0], 'quality_id': x[1]})
# Merge releases with movie dict
movies.append(mergeDicts(movie_dict[movie_id].to_dict({
'library': {'titles': {}, 'files':{}},
'files': {},
}), {
'releases': releases,
'releases_count': releases_count.get(movie_id),
}))
db.expire_all()
return total_count, movies
def availableChars(self, status = None, release_status = None):
status = status or []
release_status = release_status or []
db = get_session()
# Make a list from string
if not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
q = db.query(Media)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.join(Media.releases) \
.filter(Release.status_id.in_(statuses))
q = q.join(Library, LibraryTitle) \
.with_entities(LibraryTitle.simple_title) \
.filter(LibraryTitle.default == True)
titles = q.all()
chars = set()
for title in titles:
try:
char = title[0][0]
char = char if char in ascii_lowercase else '#'
chars.add(str(char))
except:
log.error('Failed getting title for %s', title.libraries_id)
if len(chars) == 25:
break
db.expire_all()
return ''.join(sorted(chars))
def listView(self, **kwargs):
status = splitString(kwargs.get('status'))
release_status = splitString(kwargs.get('release_status'))
limit_offset = kwargs.get('limit_offset')
starts_with = kwargs.get('starts_with')
search = kwargs.get('search')
order = kwargs.get('order')
total_movies, movies = self.list(
status = status,
release_status = release_status,
limit_offset = limit_offset,
starts_with = starts_with,
search = search,
order = order
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def charView(self, **kwargs):
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None):
if not params: params = {}
@@ -421,9 +149,9 @@ class MovieBase(MovieTypeBase):
available_status = fireEvent('status.get', 'available', single = True)
ids = splitString(id)
for movie_id in ids:
for media_id in ids:
m = db.query(Media).filter_by(id = movie_id).first()
m = db.query(Media).filter_by(id = media_id).first()
if not m:
continue
@@ -446,98 +174,12 @@ class MovieBase(MovieTypeBase):
db.commit()
fireEvent('movie.restatus', m.id)
fireEvent('media.restatus', m.id)
movie_dict = m.to_dict(self.default_dict)
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(movie_id))
movie_dict = m.to_dict(self.search_dict)
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))
db.expire_all()
return {
'success': True,
}
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for movie_id in ids:
self.delete(movie_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def delete(self, movie_id, delete_from = None):
db = get_session()
movie = db.query(Media).filter_by(id = movie_id).first()
if movie:
deleted = False
if delete_from == 'all':
db.delete(movie)
db.commit()
deleted = True
else:
done_status = fireEvent('status.get', 'done', single = True)
total_releases = len(movie.releases)
total_deleted = 0
new_movie_status = None
for release in movie.releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.status_id != done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'done'
elif delete_from == 'manage':
if release.status_id == done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'active'
db.commit()
if total_releases == total_deleted:
db.delete(movie)
db.commit()
deleted = True
elif new_movie_status:
new_status = fireEvent('status.get', new_movie_status, single = True)
movie.profile_id = None
movie.status_id = new_status.get('id')
db.commit()
else:
fireEvent('movie.restatus', movie.id, single = True)
if deleted:
fireEvent('notify.frontend', type = 'movie.deleted', data = movie.to_dict())
db.expire_all()
return True
def restatus(self, movie_id):
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
db = get_session()
m = db.query(Media).filter_by(id = movie_id).first()
if not m or len(m.library.titles) == 0:
log.debug('Can\'t restatus movie, doesn\'t seem to exist.')
return False
log.debug('Changing status for %s', m.library.titles[0].title)
if not m.profile:
m.status_id = done_status.get('id')
else:
move_to_wanted = True
for t in m.profile.types:
for release in m.releases:
if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish):
move_to_wanted = False
m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id')
db.commit()
return True

View File

@@ -52,8 +52,8 @@ var MovieList = new Class({
self.getMovies();
App.addEvent('movie.added', self.movieAdded.bind(self))
App.addEvent('movie.deleted', self.movieDeleted.bind(self))
App.on('movie.added', self.movieAdded.bind(self))
App.on('movie.deleted', self.movieDeleted.bind(self))
},
movieDeleted: function(notification){
@@ -65,6 +65,7 @@ var MovieList = new Class({
movie.destroy();
delete self.movies_added[notification.data.id];
self.setCounter(self.counter_count-1);
self.total_movies--;
}
})
}
@@ -75,6 +76,7 @@ var MovieList = new Class({
movieAdded: function(notification){
var self = this;
self.fireEvent('movieAdded', notification);
if(self.options.add_new && !self.movies_added[notification.data.id] && notification.data.status.identifier == self.options.status){
window.scroll(0,0);
self.createMovie(notification.data, 'top');
@@ -279,7 +281,7 @@ var MovieList = new Class({
// Get available chars and highlight
if(!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible()))
Api.request('movie.available_chars', {
Api.request('media.available_chars', {
'data': Object.merge({
'status': self.options.status
}, self.filter),
@@ -370,7 +372,7 @@ var MovieList = new Class({
'click': function(e){
(e).preventDefault();
this.set('text', 'Deleting..')
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': ids.join(','),
'delete_from': self.options.identifier
@@ -390,6 +392,7 @@ var MovieList = new Class({
self.movies.erase(movie);
movie.destroy();
self.setCounter(self.counter_count-1);
self.total_movies--;
});
self.calculateSelected();
@@ -547,8 +550,9 @@ var MovieList = new Class({
}
Api.request(self.options.api_call || 'movie.list', {
Api.request(self.options.api_call || 'media.list', {
'data': Object.merge({
'type': 'movie',
'status': self.options.status,
'limit_offset': self.options.limit ? self.options.limit + ',' + self.offset : null
}, self.filter),

View File

@@ -126,7 +126,9 @@ MA.Release = new Class({
else
self.showHelper();
App.addEvent('movie.searcher.ended.'+self.movie.data.id, function(notification){
App.on('movie.searcher.ended', function(notification){
if(self.movie.data.id != notification.data.id) return;
self.releases = null;
if(self.options_container){
self.options_container.destroy();
@@ -250,12 +252,14 @@ MA.Release = new Class({
else if(!self.next_release && status.identifier == 'available'){
self.next_release = release;
}
var update_handle = function(notification) {
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
if(notification.data.id != release.id) return;
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
status = Status.get(release.status_id),
new_status = Status.get(notification.data);
new_status = Status.get(notification.data.status_id);
release.status_id = new_status.id
release.el.set('class', 'item ' + new_status.identifier);
@@ -272,7 +276,7 @@ MA.Release = new Class({
}
}
App.addEvent('release.update_status.' + release.id, update_handle);
App.on('release.update_status', update_handle);
});
@@ -285,7 +289,7 @@ MA.Release = new Class({
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top');
var nr = self.next_release,
lr = self.last_release;
@@ -427,7 +431,7 @@ MA.Release = new Class({
markMovieDone: function(){
var self = this;
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': 'wanted'
@@ -446,7 +450,7 @@ MA.Release = new Class({
},
tryNextRelease: function(movie_id){
tryNextRelease: function(){
var self = this;
Api.request('movie.searcher.try_next', {
@@ -817,7 +821,7 @@ MA.Delete = new Class({
self.callChain();
},
function(){
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': self.movie.list.options.identifier

View File

@@ -1036,7 +1036,7 @@
text-overflow: ellipsis;
overflow: hidden;
width: 85%;
direction: rtl;
direction: ltr;
vertical-align: middle;
}

View File

@@ -23,23 +23,49 @@ var Movie = new Class({
addEvents: function(){
var self = this;
App.addEvent('movie.update.'+self.data.id, function(notification){
self.global_events = {}
// Do refresh with new data
self.global_events['movie.update'] = function(notification){
if(self.data.id != notification.data.id) return;
self.busy(false);
self.removeView();
self.update.delay(2000, self, notification);
});
}
App.on('movie.update', self.global_events['movie.update']);
// Add spinner on load / search
['movie.busy', 'movie.searcher.started'].each(function(listener){
App.addEvent(listener+'.'+self.data.id, function(notification){
if(notification.data)
self.global_events[listener] = function(notification){
if(notification.data && self.data.id == notification.data.id)
self.busy(true)
});
}
App.on(listener, self.global_events[listener]);
})
App.addEvent('movie.searcher.ended.'+self.data.id, function(notification){
if(notification.data)
// Remove spinner
self.global_events['movie.searcher.ended'] = function(notification){
if(notification.data && self.data.id == notification.data.id)
self.busy(false)
});
}
App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']);
// Reload when releases have updated
self.global_events['release.update_status'] = function(notification){
var data = notification.data
if(data && self.data.id == data.media_id){
if(!self.data.releases)
self.data.releases = [];
self.data.releases.push({'quality_id': data.quality_id, 'status_id': data.status_id});
self.updateReleases();
}
}
App.on('release.update_status', self.global_events['release.update_status']);
},
destroy: function(){
@@ -52,10 +78,9 @@ var Movie = new Class({
self.list.checkIfEmpty();
// Remove events
App.removeEvents('movie.update.'+self.data.id);
['movie.busy', 'movie.searcher.started'].each(function(listener){
App.removeEvents(listener+'.'+self.data.id);
})
Object.each(self.global_events, function(handle, listener){
App.off(listener, handle);
});
},
busy: function(set_busy, timeout){
@@ -179,21 +204,7 @@ var Movie = new Class({
});
// Add releases
if(self.data.releases)
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
self.updateReleases();
Object.each(self.options.actions, function(action, key){
self.action[key.toLowerCase()] = action = new self.options.actions[key](self)
@@ -203,6 +214,26 @@ var Movie = new Class({
},
updateReleases: function(){
var self = this;
if(!self.data.releases || self.data.releases.length == 0) return;
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
},
addQuality: function(quality_id){
var self = this;

View File

@@ -181,7 +181,7 @@ Block.Search.MovieItem = new Class({
if(categories.length == 0)
self.category_select.hide();
else {
self.category_select.show();
self.category_select.movie();
categories.each(function(category){
new Element('option', {
'value': category.data.id,

View File

@@ -2,7 +2,7 @@ from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.library import LibraryBase
from couchpotato.core.media._base.library.base import LibraryBase
from couchpotato.core.settings.model import Library, LibraryTitle, File
from string import ascii_letters
import time
@@ -16,27 +16,46 @@ class MovieLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
addEvent('library.query', self.query)
addEvent('library.add.movie', self.add)
addEvent('library.update.movie', self.update)
addEvent('library.update.movie.release_date', self.updateReleaseDate)
def add(self, attrs = None, update_after = True):
if not attrs: attrs = {}
def query(self, library, first = True, include_year = True, **kwargs):
if library.get('type') != 'movie':
return
titles = [title['title'] for title in library['titles']]
# Add year identifier to titles
if include_year:
titles = [title + (' %s' % str(library['year'])) for title in titles]
if first:
return titles[0] if titles else None
return titles
def add(self, attrs = {}, update_after = True):
# movies don't yet contain these, so lets make sure to set defaults
type = attrs.get('type', 'movie')
primary_provider = attrs.get('primary_provider', 'imdb')
db = get_session()
l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first()
l = db.query(Library).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = Library(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {}
info = {},
parent = None
)
title = LibraryTitle(

View File

@@ -30,7 +30,6 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
addEvent('movie.searcher.try_next_release', self.tryNextRelease)
addEvent('movie.searcher.could_be_released', self.couldBeReleased)
addEvent('searcher.correct_release', self.correctRelease)
addEvent('searcher.get_search_title', self.getSearchTitle)
addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
@@ -145,10 +144,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
default_title = getTitle(movie['library'])
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
fireEvent('movie.delete', movie['id'], single = True)
fireEvent('media.delete', movie['id'], single = True)
return
fireEvent('notify.frontend', type = 'movie.searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title)
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'id': movie['id']}, message = 'Searching for "%s"' % default_title)
ret = False
@@ -192,7 +191,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('movie.restatus', movie['id'])
fireEvent('media.restatus', movie['id'])
break
# Break if CP wants to shut down
@@ -202,7 +201,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
fireEvent('notify.frontend', type = 'movie.searcher.ended.%s' % movie['id'], data = True)
fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'id': movie['id']})
return ret
@@ -210,7 +209,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
if media.get('type') != 'movie': return
media_title = fireEvent('searcher.get_search_title', media, single = True)
media_title = fireEvent('library.title', media['library'], single = True)
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
@@ -284,6 +283,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return True
else:
# Don't allow movies with years to far in the future
if year is not None and year > now_year + 1:
return False
# For movies before 1972
if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True
@@ -318,14 +321,14 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
'success': trynext
}
def tryNextRelease(self, movie_id, manual = False):
def tryNextRelease(self, media_id, manual = False):
snatched_status, done_status, ignored_status = fireEvent('status.get', ['snatched', 'done', 'ignored'], single = True)
try:
db = get_session()
rels = db.query(Release) \
.filter_by(movie_id = movie_id) \
.filter_by(media_id = media_id) \
.filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \
.all()
@@ -333,7 +336,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
rel.status_id = ignored_status.get('id')
db.commit()
movie_dict = fireEvent('movie.get', movie_id, single = True)
movie_dict = fireEvent('media.get', media_id = media_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict['library']))
fireEvent('movie.searcher.single', movie_dict, manual = manual)
@@ -343,9 +346,5 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
def getSearchTitle(self, media):
if media['type'] == 'movie':
return getTitle(media['library'])
class SearchSetupError(Exception):
pass

View File

View File

@@ -0,0 +1,6 @@
from .main import ShowBase
def start():
return ShowBase()
config = []

View File

@@ -0,0 +1,239 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from couchpotato.core.settings.model import Media
import time
log = CPLog(__name__)
class ShowBase(MediaBase):
_type = 'show'
def __init__(self):
super(ShowBase, self).__init__()
addApiView('show.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addEvent('show.add', self.add)
def addView(self, **kwargs):
add_dict = self.add(params = kwargs)
return {
'success': True if add_dict else False,
'show': add_dict,
}
def add(self, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None):
"""
params
{'category_id': u'-1',
'identifier': u'tt1519931',
'profile_id': u'12',
'thetvdb_id': u'158661',
'title': u'Haven'}
"""
log.debug("show.add")
# Add show parent to db first; need to update library so maps will be in place (if any)
parent = self.addToDatabase(params = params, update_library = True, type = 'show')
# TODO: add by airdate
# Add by Season/Episode numbers
self.addBySeasonEpisode(parent,
params = params,
force_readd = force_readd,
search_after = search_after,
update_library = update_library,
status_id = status_id
)
def addBySeasonEpisode(self, parent, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None):
identifier = params.get('id')
# 'tvdb' will always be the master for our purpose. All mapped data can be mapped
# to another source for downloading, but it will always be remapped back to tvdb numbering
# when renamed so media can be used in media players that use tvdb for info provider
#
# This currently means the episode must actually exist in tvdb in order to be found but
# the numbering can be different
#master = 'tvdb'
#destination = 'scene'
#destination = 'anidb'
#destination = 'rage'
#destination = 'trakt'
# TODO: auto mode. if anime exists use it. if scene exists use it else use tvdb
# XXX: We should abort adding show, etc if either tvdb or xem is down or we will have incorrent mappings
# I think if tvdb gets error we wont have anydata anyway, but we must make sure XEM returns!!!!
# Only the master should return results here; all other info providers should just return False
# since we are just interested in the structure at this point.
seasons = fireEvent('season.info', merge = True, identifier = identifier)
if seasons is not None:
for season in seasons:
# Make sure we are only dealing with 'tvdb' responses at this point
if season.get('primary_provider', None) != 'thetvdb':
continue
season_id = season.get('id', None)
if season_id is None: continue
season_params = {'season_identifier': season_id}
# Calling all info providers; merge your info now for individual season
single_season = fireEvent('season.info', merge = True, identifier = identifier, params = season_params)
single_season['category_id'] = params.get('category_id')
single_season['profile_id'] = params.get('profile_id')
single_season['title'] = single_season.get('original_title', None)
single_season['identifier'] = season_id
single_season['parent_identifier'] = identifier
log.info("Adding Season %s" % season_id)
s = self.addToDatabase(params = single_season, type = "season")
episode_params = {'season_identifier': season_id}
episodes = fireEvent('episode.info', merge = True, identifier = identifier, params = episode_params)
if episodes is not None:
for episode in episodes:
# Make sure we are only dealing with 'tvdb' responses at this point
if episode.get('primary_provider', None) != 'thetvdb':
continue
episode_id = episode.get('id', None)
if episode_id is None: continue
try:
episode_number = int(episode.get('episodenumber', None))
except (ValueError, TypeError):
continue
try:
absolute_number = int(episode.get('absolute_number', None))
except (ValueError, TypeError):
absolute_number = None
episode_params = {'season_identifier': season_id,
'episode_identifier': episode_id,
'episode': episode_number}
if absolute_number:
episode_params['absolute'] = absolute_number
# Calling all info providers; merge your info now for individual episode
single_episode = fireEvent('episode.info', merge = True, identifier = identifier, params = episode_params)
single_episode['category_id'] = params.get('category_id')
single_episode['profile_id'] = params.get('profile_id')
single_episode['title'] = single_episode.get('original_title', None)
single_episode['identifier'] = episode_id
single_episode['parent_identifier'] = single_season['identifier']
log.info("Adding [%sx%s] %s - %s" % (season_id,
episode_number,
params['title'],
single_episode.get('original_title', '')))
e = self.addToDatabase(params = single_episode, type = "episode")
# Start searching now that all the media has been added
if search_after:
onComplete = self.createOnComplete(parent['id'])
onComplete()
return parent
def addToDatabase(self, params = {}, type = "show", force_readd = True, search_after = False, update_library = False, status_id = None):
log.debug("show.addToDatabase")
if not params.get('identifier'):
msg = 'Can\'t add show without imdb identifier.'
log.error(msg)
fireEvent('notify.frontend', type = 'show.is_tvshow', message = msg)
return False
#else:
#try:
#is_show = fireEvent('movie.is_show', identifier = params.get('identifier'), single = True)
#if not is_show:
#msg = 'Can\'t add show, seems to be a TV show.'
#log.error(msg)
#fireEvent('notify.frontend', type = 'show.is_tvshow', message = msg)
#return False
#except:
#pass
library = fireEvent('library.add.%s' % type, single = True, attrs = params, update_after = update_library)
if not library:
return False
# Status
status_active, snatched_status, ignored_status, done_status, downloaded_status = \
fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True)
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id', None)
db = get_session()
m = db.query(Media).filter_by(library_id = library.get('id')).first()
added = True
do_search = False
if not m:
m = Media(
type = type,
library_id = library.get('id'),
profile_id = params.get('profile_id', default_profile.get('id')),
status_id = status_id if status_id else status_active.get('id'),
category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None,
)
db.add(m)
db.commit()
onComplete = None
if search_after:
onComplete = self.createOnComplete(m.id)
fireEventAsync('library.update.%s' % type, params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete)
search_after = False
elif force_readd:
# Clean snatched history
for release in m.releases:
if release.status_id in [downloaded_status.get('id'), snatched_status.get('id'), done_status.get('id')]:
if params.get('ignore_previous', False):
release.status_id = ignored_status.get('id')
else:
fireEvent('release.delete', release.id, single = True)
m.profile_id = params.get('profile_id', default_profile.get('id'))
m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None
else:
log.debug('Show already exists, not updating: %s', params)
added = False
if force_readd:
m.status_id = status_id if status_id else status_active.get('id')
m.last_edit = int(time.time())
do_search = True
db.commit()
# Remove releases
available_status = fireEvent('status.get', 'available', single = True)
for rel in m.releases:
if rel.status_id is available_status.get('id'):
db.delete(rel)
db.commit()
show_dict = m.to_dict(self.default_dict)
if do_search and search_after:
onComplete = self.createOnComplete(m.id)
onComplete()
if added:
fireEvent('notify.frontend', type = 'show.added', data = show_dict, message = 'Successfully added "%s" to your wanted list.' % params.get('title', ''))
db.expire_all()
return show_dict

View File

@@ -0,0 +1,232 @@
Block.Search.ShowItem = new Class({
Implements: [Options, Events],
initialize: function(info, options){
var self = this;
self.setOptions(options);
self.info = info;
self.alternative_titles = [];
self.create();
},
create: function(){
var self = this,
info = self.info;
self.el = new Element('div.media_result', {
'id': info.id
}).adopt(
self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', {
'src': info.images.poster[0],
'height': null,
'width': null
}) : null,
self.options_el = new Element('div.options.inlay'),
self.data_container = new Element('div.data', {
'events': {
'click': self.showOptions.bind(self)
}
}).adopt(
self.info_container = new Element('div.info').adopt(
new Element('h2').adopt(
self.title = new Element('span.title', {
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
}),
self.year = info.year ? new Element('span.year', {
'text': info.year
}) : null
)
)
)
)
if(info.titles)
info.titles.each(function(title){
self.alternativeTitle({
'title': title
});
})
},
alternativeTitle: function(alternative){
var self = this;
self.alternative_titles.include(alternative);
},
getTitle: function(){
var self = this;
try {
return self.info.original_title ? self.info.original_title : self.info.titles[0];
}
catch(e){
return 'Unknown';
}
},
get: function(key){
return this.info[key]
},
showOptions: function(){
var self = this;
self.createOptions();
self.data_container.addClass('open');
self.el.addEvent('outerClick', self.closeOptions.bind(self))
},
closeOptions: function(){
var self = this;
self.data_container.removeClass('open');
self.el.removeEvents('outerClick')
},
add: function(e){
var self = this;
if(e)
(e).preventDefault();
self.loadingMask();
Api.request('show.add', {
'data': {
'identifier': self.info.id,
'id': self.info.id,
'type': self.info.type,
'primary_provider': self.info.primary_provider,
'title': self.title_select.get('value'),
'profile_id': self.profile_select.get('value'),
'category_id': self.category_select.get('value')
},
'onComplete': function(json){
self.options_el.empty();
self.options_el.adopt(
new Element('div.message', {
'text': json.added ? 'Show successfully added.' : 'Show didn\'t add properly. Check logs'
})
);
self.mask.fade('out');
self.fireEvent('added');
},
'onFailure': function(){
self.options_el.empty();
self.options_el.adopt(
new Element('div.message', {
'text': 'Something went wrong, check the logs for more info.'
})
);
self.mask.fade('out');
}
});
},
createOptions: function(){
var self = this,
info = self.info;
if(!self.options_el.hasClass('set')){
if(self.info.in_library){
var in_library = [];
self.info.in_library.releases.each(function(release){
in_library.include(release.quality.label)
});
}
self.options_el.grab(
new Element('div', {
'class': self.info.in_wanted && self.info.in_wanted.profile_id || in_library ? 'in_library_wanted' : ''
}).adopt(
self.info.in_wanted && self.info.in_wanted.profile_id ? new Element('span.in_wanted', {
'text': 'Already in wanted list: ' + Quality.getProfile(self.info.in_wanted.profile_id).get('label')
}) : (in_library ? new Element('span.in_library', {
'text': 'Already in library: ' + in_library.join(', ')
}) : null),
self.title_select = new Element('select', {
'name': 'title'
}),
self.profile_select = new Element('select', {
'name': 'profile'
}),
self.category_select = new Element('select', {
'name': 'category'
}).grab(
new Element('option', {'value': -1, 'text': 'None'})
),
self.add_button = new Element('a.button', {
'text': 'Add',
'events': {
'click': self.add.bind(self)
}
})
)
);
Array.each(self.alternative_titles, function(alt){
new Element('option', {
'text': alt.title
}).inject(self.title_select)
})
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
self.category_select.hide();
else {
self.category_select.show();
categories.each(function(category){
new Element('option', {
'value': category.data.id,
'text': category.data.label
}).inject(self.category_select);
});
}
// Fill profiles
var profiles = Quality.getActiveProfiles();
if(profiles.length == 1)
self.profile_select.hide();
profiles.each(function(profile){
new Element('option', {
'value': profile.id ? profile.id : profile.data.id,
'text': profile.label ? profile.label : profile.data.label
}).inject(self.profile_select)
});
self.options_el.addClass('set');
if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
!(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
self.add();
}
},
loadingMask: function(){
var self = this;
self.mask = new Element('div.mask').inject(self.el).fade('hide')
createSpinner(self.mask)
self.mask.fade('in')
},
toElement: function(){
return this.el
}
});

View File

@@ -0,0 +1,6 @@
from .main import EpisodeLibraryPlugin
def start():
return EpisodeLibraryPlugin()
config = []

View File

@@ -0,0 +1,266 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.settings.model import EpisodeLibrary, SeasonLibrary, LibraryTitle, File
from couchpotato.core.media._base.library.base import LibraryBase
from couchpotato.core.helpers.variable import tryInt
from string import ascii_letters
import time
import traceback
log = CPLog(__name__)
class EpisodeLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
addEvent('library.query', self.query)
addEvent('library.identifier', self.identifier)
addEvent('library.add.episode', self.add)
addEvent('library.update.episode', self.update)
addEvent('library.update.episode_release_date', self.updateReleaseDate)
def query(self, library, first = True, condense = True, include_identifier = True, **kwargs):
if library is list or library.get('type') != 'episode':
return
# Get the titles of the season
if not library.get('related_libraries', {}).get('season', []):
log.warning('Invalid library, unable to determine title.')
return
titles = fireEvent(
'library.query',
library['related_libraries']['season'][0],
first=False,
include_identifier=include_identifier,
condense=condense,
single=True
)
identifier = fireEvent('library.identifier', library, single = True)
# Add episode identifier to titles
if include_identifier and identifier.get('episode'):
titles = [title + ('E%02d' % identifier['episode']) for title in titles]
if first:
return titles[0] if titles else None
return titles
def identifier(self, library):
if library.get('type') != 'episode':
return
identifier = {
'season': None,
'episode': None
}
scene_map = library['info'].get('map_episode', {}).get('scene')
if scene_map:
# Use scene mappings if they are available
identifier['season'] = scene_map.get('season')
identifier['episode'] = scene_map.get('episode')
else:
# Fallback to normal season/episode numbers
identifier['season'] = library.get('season_number')
identifier['episode'] = library.get('episode_number')
# Cast identifiers to integers
# TODO this will need changing to support identifiers with trailing 'a', 'b' characters
identifier['season'] = tryInt(identifier['season'], None)
identifier['episode'] = tryInt(identifier['episode'], None)
return identifier
def add(self, attrs = {}, update_after = True):
type = attrs.get('type', 'episode')
primary_provider = attrs.get('primary_provider', 'thetvdb')
db = get_session()
parent_identifier = attrs.get('parent_identifier', None)
parent = None
if parent_identifier:
parent = db.query(SeasonLibrary).filter_by(primary_provider = primary_provider, identifier = attrs.get('parent_identifier')).first()
l = db.query(EpisodeLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = EpisodeLibrary(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {},
parent = parent,
season_number = tryInt(attrs.get('seasonnumber', None)),
episode_number = tryInt(attrs.get('episodenumber', None)),
absolute_number = tryInt(attrs.get('absolute_number', None))
)
title = LibraryTitle(
title = toUnicode(attrs.get('title')),
simple_title = self.simplifyTitle(attrs.get('title')),
)
l.titles.append(title)
db.add(l)
db.commit()
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update.episode', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
db.expire_all()
return library_dict
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(EpisodeLibrary).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
parent_identifier = None
if library.parent is not None:
parent_identifier = library.parent.identifier
if library.status_id == done_status.get('id') and not force:
do_update = False
episode_params = {'season_identifier': parent_identifier,
'episode_identifier': identifier,
'episode': library.episode_number,
'absolute': library.absolute_number,}
info = fireEvent('episode.info', merge = True, params = episode_params)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Main info
if do_update:
library.plot = toUnicode(info.get('plot', ''))
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.season_number = tryInt(info.get('seasonnumber', None))
library.episode_number = tryInt(info.get('episodenumber', None))
library.absolute_number = tryInt(info.get('absolute_number', None))
try:
library.last_updated = int(info.get('lastupdated'))
except:
library.last_updated = int(time.time())
library.info.update(info)
db.commit()
# Titles
[db.delete(title) for title in library.titles]
db.commit()
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
counter = 0
for title in titles:
if not title:
continue
title = toUnicode(title)
t = LibraryTitle(
title = title,
simple_title = self.simplifyTitle(title),
default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
)
library.titles.append(t)
counter += 1
db.commit()
# Files
images = info.get('images', [])
for image_type in ['poster']:
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
try:
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
library.files.append(file_obj)
db.commit()
break
except:
log.debug('Failed to attach to library: %s', traceback.format_exc())
library_dict = library.to_dict(self.default_dict)
db.expire_all()
return library_dict
def updateReleaseDate(self, identifier):
'''XXX: Not sure what this is for yet in relation to an episode'''
pass
#db = get_session()
#library = db.query(EpisodeLibrary).filter_by(identifier = identifier).first()
#if not library.info:
#library_dict = self.update(identifier, force = True)
#dates = library_dict.get('info', {}).get('release_date')
#else:
#dates = library.info.get('release_date')
#if dates and dates.get('expires', 0) < time.time() or not dates:
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
#library.info.update({'release_date': dates })
#db.commit()
#db.expire_all()
#return dates
#TODO: Add to base class
def simplifyTitle(self, title):
title = toUnicode(title)
nr_prefix = '' if title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return nr_prefix + title

View File

@@ -0,0 +1,6 @@
from .main import SeasonLibraryPlugin
def start():
return SeasonLibraryPlugin()
config = []

View File

@@ -0,0 +1,242 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.settings.model import SeasonLibrary, ShowLibrary, LibraryTitle, File
from couchpotato.core.media._base.library.base import LibraryBase
from couchpotato.core.helpers.variable import tryInt
from string import ascii_letters
import time
import traceback
log = CPLog(__name__)
class SeasonLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
addEvent('library.query', self.query)
addEvent('library.identifier', self.identifier)
addEvent('library.add.season', self.add)
addEvent('library.update.season', self.update)
addEvent('library.update.season_release_date', self.updateReleaseDate)
def query(self, library, first = True, condense = True, include_identifier = True, **kwargs):
if library is list or library.get('type') != 'season':
return
# Get the titles of the show
if not library.get('related_libraries', {}).get('show', []):
log.warning('Invalid library, unable to determine title.')
return
titles = fireEvent(
'library.query',
library['related_libraries']['show'][0],
first=False,
condense=condense,
single=True
)
# Add season map_names if they exist
if 'map_names' in library['info']:
season_names = library['info']['map_names'].get(str(library['season_number']), {})
# Add titles from all locations
# TODO only add name maps from a specific location
for location, names in season_names.items():
titles += [name for name in names if name and name not in titles]
identifier = fireEvent('library.identifier', library, single = True)
# Add season identifier to titles
if include_identifier and identifier.get('season') is not None:
titles = [title + (' S%02d' % identifier['season']) for title in titles]
if first:
return titles[0] if titles else None
return titles
def identifier(self, library):
if library.get('type') != 'season':
return
return {
'season': tryInt(library['season_number'], None)
}
def add(self, attrs = {}, update_after = True):
type = attrs.get('type', 'season')
primary_provider = attrs.get('primary_provider', 'thetvdb')
db = get_session()
parent_identifier = attrs.get('parent_identifier', None)
parent = None
if parent_identifier:
parent = db.query(ShowLibrary).filter_by(primary_provider = primary_provider, identifier = attrs.get('parent_identifier')).first()
l = db.query(SeasonLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = SeasonLibrary(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {},
parent = parent,
)
title = LibraryTitle(
title = toUnicode(attrs.get('title')),
simple_title = self.simplifyTitle(attrs.get('title')),
)
l.titles.append(title)
db.add(l)
db.commit()
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update.season', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
db.expire_all()
return library_dict
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(SeasonLibrary).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
parent_identifier = None
if library.parent is not None:
parent_identifier = library.parent.identifier
if library.status_id == done_status.get('id') and not force:
do_update = False
season_params = {'season_identifier': identifier}
info = fireEvent('season.info', merge = True, identifier = parent_identifier, params = season_params)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Main info
if do_update:
library.plot = toUnicode(info.get('plot', ''))
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.season_number = tryInt(info.get('seasonnumber', None))
library.info.update(info)
db.commit()
# Titles
[db.delete(title) for title in library.titles]
db.commit()
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
counter = 0
for title in titles:
if not title:
continue
title = toUnicode(title)
t = LibraryTitle(
title = title,
simple_title = self.simplifyTitle(title),
# XXX: default was None; so added a quick hack since we don't really need titiles for seasons anyway
#default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
default = True,
)
library.titles.append(t)
counter += 1
db.commit()
# Files
images = info.get('images', [])
for image_type in ['poster']:
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
try:
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
library.files.append(file_obj)
db.commit()
break
except:
log.debug('Failed to attach to library: %s', traceback.format_exc())
library_dict = library.to_dict(self.default_dict)
db.expire_all()
return library_dict
def updateReleaseDate(self, identifier):
'''XXX: Not sure what this is for yet in relation to a tvshow'''
pass
#db = get_session()
#library = db.query(SeasonLibrary).filter_by(identifier = identifier).first()
#if not library.info:
#library_dict = self.update(identifier, force = True)
#dates = library_dict.get('info', {}).get('release_date')
#else:
#dates = library.info.get('release_date')
#if dates and dates.get('expires', 0) < time.time() or not dates:
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
#library.info.update({'release_date': dates })
#db.commit()
#db.expire_all()
#return dates
#TODO: Add to base class
def simplifyTitle(self, title):
title = toUnicode(title)
nr_prefix = '' if title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return nr_prefix + title

View File

@@ -0,0 +1,6 @@
from .main import ShowLibraryPlugin
def start():
return ShowLibraryPlugin()
config = []

View File

@@ -0,0 +1,229 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.settings.model import ShowLibrary, LibraryTitle, File
from couchpotato.core.media._base.library.base import LibraryBase
from qcond.helpers import simplify
from qcond import QueryCondenser
from string import ascii_letters
import time
import traceback
log = CPLog(__name__)
class ShowLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
self.query_condenser = QueryCondenser()
addEvent('library.query', self.query)
addEvent('library.add.show', self.add)
addEvent('library.update.show', self.update)
addEvent('library.update.show_release_date', self.updateReleaseDate)
def query(self, library, first = True, condense = True, **kwargs):
if library is list or library.get('type') != 'show':
return
titles = [title['title'] for title in library['titles']]
if condense:
# Use QueryCondenser to build a list of optimal search titles
condensed_titles = self.query_condenser.distinct(titles)
if condensed_titles:
# Use condensed titles if we got a valid result
titles = condensed_titles
else:
# Fallback to simplifying titles
titles = [simplify(title) for title in titles]
if first:
return titles[0] if titles else None
return titles
def add(self, attrs = {}, update_after = True):
type = attrs.get('type', 'show')
primary_provider = attrs.get('primary_provider', 'thetvdb')
db = get_session()
l = db.query(ShowLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = ShowLibrary(
type = type,
primary_provider = primary_provider,
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {},
parent = None,
)
title = LibraryTitle(
title = toUnicode(attrs.get('title')),
simple_title = self.simplifyTitle(attrs.get('title')),
)
l.titles.append(title)
db.add(l)
db.commit()
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update.show', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
db.expire_all()
return library_dict
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(ShowLibrary).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
info = fireEvent('show.info', merge = True, identifier = identifier)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no show info to work with: %s', identifier)
return False
# Main info
if do_update:
library.plot = toUnicode(info.get('plot', ''))
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.show_status = toUnicode(info.get('status', '').lower())
library.airs_time = info.get('airs_time', None)
# Bits
days_of_week_map = {
u'Monday': 1,
u'Tuesday': 2,
u'Wednesday': 4,
u'Thursday': 8,
u'Friday': 16,
u'Saturday': 32,
u'Sunday': 64,
u'Daily': 127,
}
try:
library.airs_dayofweek = days_of_week_map.get(info.get('airs_dayofweek'))
except:
library.airs_dayofweek = 0
try:
library.last_updated = int(info.get('lastupdated'))
except:
library.last_updated = int(time.time())
library.info.update(info)
db.commit()
# Titles
[db.delete(title) for title in library.titles]
db.commit()
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
counter = 0
for title in titles:
if not title:
continue
title = toUnicode(title)
t = LibraryTitle(
title = title,
simple_title = self.simplifyTitle(title),
default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
)
library.titles.append(t)
counter += 1
db.commit()
# Files
images = info.get('images', [])
for image_type in ['poster']:
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
try:
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
library.files.append(file_obj)
db.commit()
break
except:
log.debug('Failed to attach to library: %s', traceback.format_exc())
library_dict = library.to_dict(self.default_dict)
db.expire_all()
return library_dict
def updateReleaseDate(self, identifier):
'''XXX: Not sure what this is for yet in relation to a show'''
pass
#db = get_session()
#library = db.query(ShowLibrary).filter_by(identifier = identifier).first()
#if not library.info:
#library_dict = self.update(identifier, force = True)
#dates = library_dict.get('info', {}).get('release_date')
#else:
#dates = library.info.get('release_date')
#if dates and dates.get('expires', 0) < time.time() or not dates:
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
#library.info.update({'release_date': dates })
#db.commit()
#db.expire_all()
#return dates
#TODO: Add to base class
def simplifyTitle(self, title):
title = toUnicode(title)
nr_prefix = '' if title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return nr_prefix + title

View File

@@ -0,0 +1,6 @@
from .main import ShowMatcher
def start():
return ShowMatcher()
config = []

View File

@@ -0,0 +1,127 @@
from couchpotato import CPLog
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import dictIsSubset, tryInt, toIterable
from couchpotato.core.media._base.matcher.base import MatcherBase
from couchpotato.core.providers.base import MultiProvider
log = CPLog(__name__)
class ShowMatcher(MultiProvider):
def getTypes(self):
return [Season, Episode]
class Base(MatcherBase):
# TODO come back to this later, think this could be handled better, this is starting to get out of hand....
quality_map = {
'bluray_1080p': {'resolution': ['1080p'], 'source': ['bluray']},
'bluray_720p': {'resolution': ['720p'], 'source': ['bluray']},
'bdrip_1080p': {'resolution': ['1080p'], 'source': ['BDRip']},
'bdrip_720p': {'resolution': ['720p'], 'source': ['BDRip']},
'brrip_1080p': {'resolution': ['1080p'], 'source': ['BRRip']},
'brrip_720p': {'resolution': ['720p'], 'source': ['BRRip']},
'webdl_1080p': {'resolution': ['1080p'], 'source': ['webdl', ['web', 'dl']]},
'webdl_720p': {'resolution': ['720p'], 'source': ['webdl', ['web', 'dl']]},
'webdl_480p': {'resolution': ['480p'], 'source': ['webdl', ['web', 'dl']]},
'hdtv_720p': {'resolution': ['720p'], 'source': ['hdtv']},
'hdtv_sd': {'resolution': ['480p', None], 'source': ['hdtv']},
}
def __init__(self):
super(Base, self).__init__()
addEvent('%s.matcher.correct_identifier' % self.type, self.correctIdentifier)
def correct(self, chain, release, media, quality):
log.info("Checking if '%s' is valid", release['name'])
log.info2('Release parsed as: %s', chain.info)
if not fireEvent('matcher.correct_quality', chain, quality, self.quality_map, single = True):
log.info('Wrong: %s, quality does not match', release['name'])
return False
if not fireEvent('%s.matcher.correct_identifier' % self.type, chain, media):
log.info('Wrong: %s, identifier does not match', release['name'])
return False
if not fireEvent('matcher.correct_title', chain, media):
log.info("Wrong: '%s', undetermined naming.", (' '.join(chain.info['show_name'])))
return False
return True
def correctIdentifier(self, chain, media):
raise NotImplementedError()
def getChainIdentifier(self, chain):
if 'identifier' not in chain.info:
return None
identifier = self.flattenInfo(chain.info['identifier'])
# Try cast values to integers
for key, value in identifier.items():
if isinstance(value, list):
if len(value) <= 1:
value = value[0]
else:
log.warning('Wrong: identifier contains multiple season or episode values, unsupported')
return None
identifier[key] = tryInt(value, value)
return identifier
class Episode(Base):
type = 'episode'
def correctIdentifier(self, chain, media):
identifier = self.getChainIdentifier(chain)
if not identifier:
log.info2('Wrong: release identifier is not valid (unsupported or missing identifier)')
return False
# TODO - Parse episode ranges from identifier to determine if they are multi-part episodes
if any([x in identifier for x in ['episode_from', 'episode_to']]):
log.info2('Wrong: releases with identifier ranges are not supported yet')
return False
required = fireEvent('library.identifier', media['library'], single = True)
# TODO - Support air by date episodes
# TODO - Support episode parts
if identifier != required:
log.info2('Wrong: required identifier (%s) does not match release identifier (%s)', (required, identifier))
return False
return True
class Season(Base):
type = 'season'
def correctIdentifier(self, chain, media):
identifier = self.getChainIdentifier(chain)
if not identifier:
log.info2('Wrong: release identifier is not valid (unsupported or missing identifier)')
return False
# TODO - Parse episode ranges from identifier to determine if they are season packs
if any([x in identifier for x in ['episode_from', 'episode_to']]):
log.info2('Wrong: releases with identifier ranges are not supported yet')
return False
required = fireEvent('library.identifier', media['library'], single = True)
if identifier != required:
log.info2('Wrong: required identifier (%s) does not match release identifier (%s)', (required, identifier))
return False
return True

View File

@@ -0,0 +1,7 @@
from .main import ShowSearcher
import random
def start():
return ShowSearcher()
config = []

View File

@@ -0,0 +1,189 @@
from couchpotato import Env, get_session
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import getTitle, toIterable
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.main import SearchSetupError
from couchpotato.core.media.show._base import ShowBase
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media
from qcond import QueryCondenser
from qcond.helpers import simplify
log = CPLog(__name__)
class ShowSearcher(Plugin):
type = ['show', 'season', 'episode']
in_progress = False
def __init__(self):
super(ShowSearcher, self).__init__()
self.query_condenser = QueryCondenser()
for type in toIterable(self.type):
addEvent('%s.searcher.single' % type, self.single)
addEvent('searcher.correct_release', self.correctRelease)
def single(self, media, search_protocols = None, manual = False):
show, season, episode = self.getLibraries(media['library'])
db = get_session()
if media['type'] == 'show':
for library in season:
# TODO ideally we shouldn't need to fetch the media for each season library here
m = db.query(Media).filter_by(library_id = library['library_id']).first()
fireEvent('season.searcher.single', m.to_dict(ShowBase.search_dict))
return
# Find out search type
try:
if not search_protocols:
search_protocols = fireEvent('searcher.protocols', single = True)
except SearchSetupError:
return
done_status, available_status, ignored_status, failed_status = fireEvent('status.get', ['done', 'available', 'ignored', 'failed'], single = True)
if not media['profile'] or media['status_id'] == done_status.get('id'):
log.debug('Episode doesn\'t have a profile or already done, assuming in manage tab.')
return
#pre_releases = fireEvent('quality.pre_releases', single = True)
found_releases = []
too_early_to_search = []
default_title = fireEvent('library.query', media['library'], condense = False, single=True)
if not default_title:
log.error('No proper info found for episode, removing it from library to cause it from having more issues.')
#fireEvent('episode.delete', episode['id'], single = True)
return
if not show or not season:
log.error('Unable to find show or season library in database, missing required data for searching')
return
fireEvent('notify.frontend', type = 'show.searcher.started.%s' % media['id'], data = True, message = 'Searching for "%s"' % default_title)
ret = False
has_better_quality = None
for quality_type in media['profile']['types']:
# TODO check air date?
#if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates, movie['library']['year']):
# too_early_to_search.append(quality_type['quality']['identifier'])
# continue
has_better_quality = 0
# See if better quality is available
for release in media['releases']:
if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id'), failed_status.get('id')]:
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality is 0:
log.info('Search for %s S%02d%s in %s', (
getTitle(show),
season['season_number'],
"E%02d" % episode['episode_number'] if episode and len(episode) == 1 else "",
quality_type['quality']['label'])
)
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = fireEvent('searcher.search', search_protocols, media, quality, single = True)
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
# Check if movie isn't deleted while searching
if not db.query(Media).filter_by(id = media.get('id')).first():
break
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, media, quality_type, single = True)
# Try find a valid result and download it
if fireEvent('release.try_download_result', results, media, quality_type, manual, single = True):
ret = True
# Remove releases that aren't found anymore
for release in media.get('releases', []):
if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('id'), single = True)
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('media.restatus', media['id'])
break
# Break if CP wants to shut down
if self.shuttingDown() or ret:
break
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
elif media['type'] == 'season' and not ret and has_better_quality is 0:
# If nothing was found, start searching for episodes individually
log.info('No season pack found, starting individual episode search')
for library in episode:
# TODO ideally we shouldn't need to fetch the media for each episode library here
m = db.query(Media).filter_by(library_id = library['library_id']).first()
fireEvent('episode.searcher.single', m.to_dict(ShowBase.search_dict))
fireEvent('notify.frontend', type = 'show.searcher.ended.%s' % media['id'], data = True)
return ret
def correctRelease(self, release = None, media = None, quality = None, **kwargs):
if media.get('type') not in ['season', 'episode']: return
retention = Env.setting('retention', section = 'nzb')
if release.get('seeders') is None and 0 < retention < release.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (release['age'], retention, release['name']))
return False
# Check for required and ignored words
if not fireEvent('searcher.correct_words', release['name'], media, single = True):
return False
# TODO Matching is quite costly, maybe we should be caching release matches somehow? (also look at caper optimizations)
match = fireEvent('matcher.match', release, media, quality, single = True)
if match:
return match.weight
return False
def getLibraries(self, library):
if 'related_libraries' not in library:
log.warning("'related_libraries' missing from media library, unable to continue searching")
return None, None, None
libraries = library['related_libraries']
# Show always collapses as there can never be any multiples
show = libraries.get('show', [])
show = show[0] if len(show) else None
# Season collapses if the subject is a season or episode
season = libraries.get('season', [])
if library['type'] in ['season', 'episode']:
season = season[0] if len(season) else None
# Episode collapses if the subject is a episode
episode = libraries.get('episode', [])
if library['type'] == 'episode':
episode = episode[0] if len(episode) else None
return show, season, episode

View File

@@ -17,7 +17,7 @@ class Notification(Provider):
listen_to = [
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message',
'core.message.important',
]
dont_listen_to = []

View File

@@ -16,14 +16,14 @@ class Boxcar(Notification):
try:
message = message.strip()
params = {
data = {
'email': self.conf('email'),
'notification[from_screen_name]': self.default_title,
'notification[message]': toUnicode(message),
'notification[from_remote_service_id]': int(time.time()),
}
self.urlopen(self.url, params = params)
self.urlopen(self.url, data = data)
except:
log.error('Check your email and added services on boxcar.io')
return False

View File

@@ -21,6 +21,12 @@ class CoreNotifier(Notification):
m_lock = None
listen_to = [
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message', 'core.message.important',
]
def __init__(self):
super(CoreNotifier, self).__init__()
@@ -121,7 +127,10 @@ class CoreNotifier(Notification):
for message in messages:
if message.get('time') > last_check:
fireEvent('core.message', message = message.get('message'), data = message)
message['sticky'] = True # Always sticky core messages
message_type = 'core.message.important' if message.get('important') else 'core.message'
fireEvent(message_type, message = message.get('message'), data = message)
if last_check < message.get('time'):
last_check = message.get('time')

View File

@@ -10,8 +10,8 @@ var NotificationBase = new Class({
// Listener
App.addEvent('unload', self.stopPoll.bind(self));
App.addEvent('reload', self.startInterval.bind(self, [true]));
App.addEvent('notification', self.notify.bind(self));
App.addEvent('message', self.showMessage.bind(self));
App.on('notification', self.notify.bind(self));
App.on('message', self.showMessage.bind(self));
// Add test buttons to settings page
App.addEvent('load', self.addTestButtons.bind(self));
@@ -50,9 +50,9 @@ var NotificationBase = new Class({
, 'top');
self.notifications.include(result);
if(result.data.important !== undefined && !result.read){
if((result.data.important !== undefined || result.data.sticky !== undefined) && !result.read){
var sticky = true
App.fireEvent('message', [result.message, sticky, result])
App.trigger('message', [result.message, sticky, result])
}
else if(!result.read){
self.setBadge(self.notifications.filter(function(n){ return !n.read}).length)
@@ -147,7 +147,7 @@ var NotificationBase = new Class({
// Process data
if(json){
Array.each(json.result, function(result){
App.fireEvent(result.type, result);
App.trigger(result.type, result);
if(result.message && result.read === undefined)
self.showMessage(result.message);
})

View File

@@ -4,6 +4,7 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.environment import Env
from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
import smtplib
import traceback
@@ -30,6 +31,8 @@ class Email(Notification):
message['Subject'] = self.default_title
message['From'] = from_address
message['To'] = to_address
message['Date'] = formatdate(localtime = 1)
message['Message-ID'] = make_msgid()
try:
# Open the SMTP connection, via SSL if requested

View File

@@ -26,7 +26,7 @@ class Prowl(Notification):
}
try:
self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False)
self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False)
log.info('Prowl notifications sent.')
return True
except:

View File

@@ -29,7 +29,7 @@ class Pushalot(Notification):
}
try:
self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False)
self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False)
return True
except:
log.error('PushAlot failed: %s', traceback.format_exc())

View File

@@ -0,0 +1,39 @@
from .main import Pushbullet
def start():
return Pushbullet()
config = [{
'name': 'pushbullet',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'pushbullet',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'api_key',
'label': 'User API Key'
},
{
'name': 'devices',
'default': '',
'advanced': True,
'description': 'IDs of devices to send notifications to, empty = all devices'
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

View File

@@ -0,0 +1,86 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import base64
import json
log = CPLog(__name__)
class Pushbullet(Notification):
url = 'https://api.pushbullet.com/api/%s'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
devices = self.getDevices()
if devices is None:
return False
# Get all the device IDs linked to this user
if not len(devices):
response = self.request('devices')
if not response:
return False
devices += [device.get('id') for device in response['devices']]
successful = 0
for device in devices:
response = self.request(
'pushes',
cache = False,
device_id = device,
type = 'note',
title = self.default_title,
body = toUnicode(message)
)
if response:
successful += 1
else:
log.error('Unable to push notification to Pushbullet device with ID %s' % device)
return successful == len(devices)
def getDevices(self):
devices = [d.strip() for d in self.conf('devices').split(',')]
# Remove empty items
devices = [d for d in devices if len(d)]
# Break on any ids that aren't integers
valid_devices = []
for device_id in devices:
d = tryInt(device_id, None)
if not d:
log.error('Device ID "%s" is not valid', device_id)
return None
valid_devices.append(d)
return valid_devices
def request(self, method, cache = True, **kwargs):
try:
base64string = base64.encodestring('%s:' % self.conf('api_key'))[:-1]
headers = {
"Authorization": "Basic %s" % base64string
}
if cache:
return self.getJsonData(self.url % method, headers = headers, data = kwargs)
else:
data = self.urlopen(self.url % method, headers = headers, data = kwargs)
return json.loads(data)
except Exception, ex:
log.error('Pushbullet request failed')
log.debug(ex)
return None

View File

@@ -35,7 +35,7 @@ class Trakt(Notification):
def call(self, method_url, post_data):
try:
response = self.getJsonData(self.urls['base'] % method_url, params = post_data, cache_timeout = 1)
response = self.getJsonData(self.urls['base'] % method_url, data = post_data, cache_timeout = 1)
if response:
if response.get('status') == "success":
log.info('Successfully called Trakt')

View File

@@ -46,6 +46,14 @@ config = [{
'advanced': True,
'description': 'Only scan new movie folder at remote XBMC servers. Works if movie location is the same.',
},
{
'name': 'force_full_scan',
'label': 'Always do a full scan',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Do a full scan instead of only the new movie. Useful if the XBMC path is different from the path CPS uses.',
},
{
'name': 'on_snatch',
'default': 0,

View File

@@ -7,6 +7,7 @@ import json
import socket
import traceback
import urllib
import requests
log = CPLog(__name__)
@@ -36,7 +37,7 @@ class XBMC(Notification):
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
param = {}
if self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0]):
if not self.conf('force_full_scan') and (self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0])):
param = {'directory': data['destination_dir']}
calls.append(('VideoLibrary.Scan', param))
@@ -167,22 +168,18 @@ class XBMC(Notification):
# manually fake expected response array
return [{'result': 'Error'}]
except URLError, e:
if isinstance(e.reason, socket.timeout):
log.info('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [{'result': 'Error'}]
else:
log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc())
return [{'result': 'Error'}]
except requests.exceptions.Timeout:
log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [{'result': 'Error'}]
except:
log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc())
return [{'result': 'Error'}]
def request(self, host, requests):
def request(self, host, do_requests):
server = 'http://%s/jsonrpc' % host
data = []
for req in requests:
for req in do_requests:
method, kwargs = req
data.append({
'method': method,
@@ -202,17 +199,13 @@ class XBMC(Notification):
try:
log.debug('Sending request to %s: %s', (host, data))
response = self.getJsonData(server, headers = headers, params = data, timeout = 3, show_error = False)
response = self.getJsonData(server, headers = headers, data = data, timeout = 3, show_error = False)
log.debug('Returned from request %s: %s', (host, response))
return response
except URLError, e:
if isinstance(e.reason, socket.timeout):
log.info('Couldn\'t send request to XBMC, assuming it\'s turned off')
return []
else:
log.error('Failed sending request to XBMC: %s', traceback.format_exc())
return []
except requests.exceptions.Timeout:
log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
return []
except:
log.error('Failed sending request to XBMC: %s', traceback.format_exc())
return []

View File

@@ -41,7 +41,7 @@ config = [{
'label': 'Required Genres',
'default': '',
'placeholder': 'Example: Action, Crime & Drama',
'description': 'Ignore movies that don\'t contain at least one set of genres. Sets are separated by "," and each word within a set must be separated with "&"'
'description': ('Ignore movies that don\'t contain at least one set of genres.', 'Sets are separated by "," and each word within a set must be separated with "&"')
},
{
'name': 'ignored_genres',

View File

@@ -43,7 +43,7 @@ class Automation(Plugin):
if self.shuttingDown():
break
movie_dict = fireEvent('movie.get', movie_id, single = True)
movie_dict = fireEvent('media.get', movie_id, single = True)
fireEvent('movie.searcher.single', movie_dict)
return True
return True

View File

@@ -1,19 +1,15 @@
from StringIO import StringIO
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \
from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from multipartpost import MultipartPostHandler
import requests
from tornado import template
from tornado.web import StaticFileHandler
from urlparse import urlparse
import cookielib
import glob
import gzip
import inspect
import math
import os.path
import re
import time
@@ -39,6 +35,7 @@ class Plugin(object):
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
http_opener = requests.Session()
def __new__(typ, *args, **kwargs):
new_plugin = super(Plugin, typ).__new__(typ)
@@ -106,7 +103,9 @@ class Plugin(object):
f.close()
os.chmod(path, Env.getPermission('file'))
except Exception, e:
log.error('Unable writing to file "%s": %s', (path, e))
log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path):
path = ss(path)
@@ -120,11 +119,11 @@ class Plugin(object):
return False
# http request
def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True):
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, return_raw = False):
url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not params: params = {}
if not data: data = {}
# Fill in some headers
parsed_url = urlparse(url)
@@ -137,6 +136,8 @@ class Plugin(object):
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = self.http_opener
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900):
@@ -152,45 +153,18 @@ class Plugin(object):
self.wait(host)
try:
# Make sure opener has the correct headers
if opener:
opener.add_headers = headers
kwargs = {
'headers': headers,
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
}
method = 'post' if len(data) > 0 or files else 'get'
if multipart:
log.info('Opening multipart url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data'))
request = urllib2.Request(url, params, headers)
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.iterkeys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, verify = False, **kwargs)
if opener:
opener.add_handler(MultipartPostHandler())
else:
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
response = opener.open(request, timeout = timeout)
else:
log.info('Opening url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data'))
if isinstance(params, (str, unicode)) and len(params) > 0:
data = params
else:
data = tryUrlencode(params) if len(params) > 0 else None
request = urllib2.Request(url, data, headers)
if opener:
response = opener.open(request, timeout = timeout)
else:
response = urllib2.urlopen(request, timeout = timeout)
# unzip if needed
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj = buf)
data = f.read()
f.close()
else:
data = response.read()
response.close()
data = response.content if return_raw else response.text
self.http_failed_request[host] = 0
except IOError:
@@ -218,15 +192,19 @@ class Plugin(object):
return data
def wait(self, host = ''):
if self.http_time_between_calls == 0:
return
now = time.time()
last_use = self.http_last_use.get(host, 0)
if last_use > 0:
wait = math.ceil(last_use - now + self.http_time_between_calls)
wait = (last_use - now) + self.http_time_between_calls
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
time.sleep(last_use - now + self.http_time_between_calls)
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
time.sleep(wait)
def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__))
@@ -269,18 +247,19 @@ class Plugin(object):
try:
cache_timeout = 300
if kwargs.get('cache_timeout'):
if kwargs.has_key('cache_timeout'):
cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs)
if data:
if data and cache_timeout > 0:
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error', True):
raise
log.error('Failed getting cache: %s', (traceback.format_exc()))
return ''
def setCache(self, cache_key, value, timeout = 300):
@@ -289,19 +268,19 @@ class Plugin(object):
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, movie):
tag = self.cpTag(movie)
def createNzbName(self, data, media):
tag = self.cpTag(media)
return '%s%s' % (toSafeString(toUnicode(data.get('name'))[:127 - len(tag)]), tag)
def createFileName(self, data, filedata, movie):
name = sp(os.path.join(self.createNzbName(data, movie)))
def createFileName(self, data, filedata, media):
name = sp(os.path.join(self.createNzbName(data, media)))
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, movie):
def cpTag(self, media):
if Env.setting('enabled', 'renamer'):
return '.cp(' + movie['library'].get('identifier') + ')' if movie['library'].get('identifier') else ''
return '.cp(' + media['library'].get('identifier') + ')' if media['library'].get('identifier') else ''
return ''

View File

@@ -93,7 +93,7 @@ class FileManager(Plugin):
return dest
try:
filedata = self.urlopen(url, **urlopen_kwargs)
filedata = self.urlopen(url, return_raw = True, **urlopen_kwargs)
except:
log.error('Failed downloading file %s: %s', (url, traceback.format_exc()))
return False

View File

@@ -1,6 +1,6 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import splitString, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -79,6 +79,7 @@ class Manage(Plugin):
try:
directories = self.directories()
directories.sort()
added_identifiers = []
# Add some progress
@@ -111,22 +112,20 @@ class Manage(Plugin):
if self.conf('cleanup') and full and not self.shuttingDown():
# Get movies with done status
total_movies, done_movies = fireEvent('movie.list', status = 'done', single = True)
total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', single = True)
for done_movie in done_movies:
if done_movie['library']['identifier'] not in added_identifiers:
fireEvent('movie.delete', movie_id = done_movie['id'], delete_from = 'all')
fireEvent('media.delete', media_id = done_movie['id'], delete_from = 'all')
else:
releases = fireEvent('release.for_movie', id = done_movie.get('id'), single = True)
for release in releases:
if len(release.get('files', [])) == 0:
fireEvent('release.delete', release['id'])
else:
if len(release.get('files', [])) > 0:
for release_file in release.get('files', []):
# Remove release not available anymore
if not os.path.isfile(ss(release_file['path'])):
if not os.path.isfile(sp(release_file['path'])):
fireEvent('release.clean', release['id'])
break
@@ -201,7 +200,7 @@ class Manage(Plugin):
self.in_progress[folder]['to_go'] -= 1
total = self.in_progress[folder]['total']
movie_dict = fireEvent('movie.get', identifier, single = True)
movie_dict = fireEvent('media.get', identifier, single = True)
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict['library']))

View File

@@ -2,7 +2,7 @@ from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
from couchpotato.core.helpers.variable import mergeDicts, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Quality, Profile, ProfileType
@@ -19,14 +19,32 @@ class QualityPlugin(Plugin):
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': '1080p', 'hd': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts', 'x264', 'h264']},
{'identifier': '720p', 'hd': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':['avi'], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':[], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd'], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r')]},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':['avi', 'mpg', 'mpeg'], 'tags': ['webrip', ('web', 'rip')]},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':[], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': ['webrip', ('web', 'rip')]},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':[]},
# TODO come back to this later, think this could be handled better, this is starting to get out of hand....
# BluRay
{'identifier': 'bluray_1080p', 'hd': True, 'size': (800, 5000), 'label': 'BluRay - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'bluray_720p', 'hd': True, 'size': (800, 5000), 'label': 'BluRay - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
# BDRip
{'identifier': 'bdrip_1080p', 'hd': True, 'size': (800, 5000), 'label': 'BDRip - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'bdrip_720p', 'hd': True, 'size': (800, 5000), 'label': 'BDRip - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
# BRRip
{'identifier': 'brrip_1080p', 'hd': True, 'size': (800, 5000), 'label': 'BRRip - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'brrip_720p', 'hd': True, 'size': (800, 5000), 'label': 'BRRip - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
# WEB-DL
{'identifier': 'webdl_1080p', 'hd': True, 'size': (800, 5000), 'label': 'WEB-DL - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'webdl_720p', 'hd': True, 'size': (800, 5000), 'label': 'WEB-DL - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'webdl_480p', 'hd': True, 'size': (100, 5000), 'label': 'WEB-DL - 480p', 'width': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
# HDTV
{'identifier': 'hdtv_720p', 'hd': True, 'size': (800, 5000), 'label': 'HDTV - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'hdtv_sd', 'hd': False, 'size': (100, 1000), 'label': 'HDTV - SD', 'width': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'mp4', 'avi']},
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
@@ -50,6 +68,8 @@ class QualityPlugin(Plugin):
addEvent('app.initialize', self.fill, priority = 10)
addEvent('app.test', self.doTest)
def preReleases(self):
return self.pre_releases
@@ -165,9 +185,10 @@ class QualityPlugin(Plugin):
if not extra: extra = {}
# Create hash for cache
cache_key = md5(str([f.replace('.' + getExt(f), '') for f in files]))
cache_key = str([f.replace('.' + getExt(f), '') if len(getExt(f)) < 4 else f for f in files])
cached = self.getCache(cache_key)
if cached and len(extra) == 0: return cached
if cached and len(extra) == 0:
return cached
qualities = self.all()
@@ -228,11 +249,6 @@ class QualityPlugin(Plugin):
if len(set(words) & set(alt)) == len(alt):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
elif len(set(words) & set(alt)) > 0:
partial = list(set(words) & set(alt))[0]
if len(partial) > 2:
log.debug('Found %s via partial %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type) / 3
if (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
@@ -285,3 +301,34 @@ class QualityPlugin(Plugin):
if add_score != 0:
for allow in quality.get('allow', []):
score[allow] -= 40 if self.cached_order[allow] < self.cached_order[quality['identifier']] else 5
def doTest(self):
tests = {
'Movie Name (1999)-DVD-Rip.avi': 'dvdrip',
'Movie Name 1999 720p Bluray.mkv': '720p',
'Movie Name 1999 BR-Rip 720p.avi': 'brrip',
'Movie Name 1999 720p Web Rip.avi': 'scr',
'Movie Name 1999 Web DL.avi': 'brrip',
'Movie.Name.1999.1080p.WEBRip.H264-Group': 'scr',
'Movie.Name.1999.DVDRip-Group': 'dvdrip',
'Movie.Name.1999.DVD-Rip-Group': 'dvdrip',
'Movie.Name.1999.DVD-R-Group': 'dvdr',
'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': '720p',
}
correct = 0
for name in tests:
success = self.guess([name]).get('identifier') == tests[name]
if not success:
log.error('%s failed check, thinks it\'s %s', (name, self.guess([name]).get('identifier')))
correct += success
if correct == len(tests):
log.info('Quality test successful')
return True
else:
log.error('Quality test failed: %s out of %s succeeded', (correct, len(tests)))

View File

@@ -100,14 +100,14 @@ class Release(Plugin):
done_status, snatched_status = fireEvent('status.get', ['done', 'snatched'], single = True)
# Add movie
movie = db.query(Media).filter_by(library_id = group['library'].get('id')).first()
if not movie:
movie = Media(
media = db.query(Media).filter_by(library_id = group['library'].get('id')).first()
if not media:
media = Media(
library_id = group['library'].get('id'),
profile_id = 0,
status_id = done_status.get('id')
)
db.add(movie)
db.add(media)
db.commit()
# Add Release
@@ -120,7 +120,7 @@ class Release(Plugin):
if not rel:
rel = Relea(
identifier = identifier,
movie = movie,
media = media,
quality_id = group['meta_data']['quality'].get('id'),
status_id = done_status.get('id')
)
@@ -142,7 +142,7 @@ class Release(Plugin):
except:
log.debug('Failed to attach "%s" to release: %s', (added_files, traceback.format_exc()))
fireEvent('movie.restatus', movie.id)
fireEvent('media.restatus', media.id)
return True
@@ -211,119 +211,136 @@ class Release(Plugin):
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel:
item = {}
for info in rel.info:
item[info.identifier] = info.value
if not rel:
log.error('Couldn\'t find release with id: %s', id)
return {
'success': False
}
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
item = {}
for info in rel.info:
item[info.identifier] = info.value
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
if not item.get('protocol'):
item['protocol'] = item['type']
item['type'] = 'movie'
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
# Backwards compatibility code
if not item.get('protocol'):
item['protocol'] = item['type']
item['type'] = 'movie'
success = self.download(data = item, media = rel.movie.to_dict({
success = self.download(data = item, media = rel.media.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
}), manual = True)
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
if success:
db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
success = self.download(data = item, media = rel.movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
}), manual = True)
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': success
}
else:
log.error('Couldn\'t find release with id: %s', id)
if success == True:
db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': False
'success': success == True
}
def download(self, data, media, manual = False):
# Backwards compatibility code
if not data.get('protocol'):
data['protocol'] = data['type']
data['type'] = 'movie'
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if not downloader_enabled:
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol'))
return False
if downloader_enabled:
snatched_status, done_status, active_status = fireEvent('status.get', ['snatched', 'done', 'active'], single = True)
# Download NZB or torrent file
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
log.info('Tried to download, but the "%s" provider gave an error', data.get('protocol'))
if filedata == 'try_next':
return filedata
# Download release to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
# Send NZB or torrent file to downloader
download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True)
if not download_result:
log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol'))
return False
log.debug('Downloader result: %s', download_result)
download_result = fireEvent('download', data = data, movie = media, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
snatched_status, done_status, downloaded_status, active_status = fireEvent('status.get', ['snatched', 'done', 'downloaded', 'active'], single = True)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
try:
db = get_session()
rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first()
if not rls:
log.error('No release found to store download information in')
return False
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
renamer_enabled = Env.setting('enabled', 'renamer')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict())
# Mark release as snatched
if renamer_enabled:
self.updateStatus(rls.id, status = snatched_status)
# If renamer isn't used, mark media done if finished or release downloaded
else:
if media['status_id'] == active_status.get('id'):
finished = next((True for profile_type in media['profile']['types'] if \
profile_type['quality_id'] == rls.quality.id and profile_type['finish']), False)
if finished:
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls.id, status = done_status)
# Mark media done
mdia = db.query(Media).filter_by(id = media['id']).first()
mdia.status_id = done_status.get('id')
mdia.last_edit = int(time.time())
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict())
return True
# If renamer isn't used, mark media done
if not renamer_enabled:
try:
if media['status_id'] == active_status.get('id'):
for profile_type in media['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Assume release downloaded
self.updateStatus(rls.id, status = downloaded_status)
# Mark release done
self.updateStatus(rls.id, status = done_status)
except:
log.error('Failed storing download status: %s', traceback.format_exc())
return False
# Mark media done
mdia = db.query(Media).filter_by(id = media['id']).first()
mdia.status_id = done_status.get('id')
mdia.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking media finished, renamer disabled: %s', traceback.format_exc())
else:
self.updateStatus(rls.id, status = snatched_status)
except:
log.error('Failed marking media finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol')))
return False
return True
def tryDownloadResult(self, results, media, quality_type, manual = False):
ignored_status, failed_status = fireEvent('status.get', ['ignored', 'failed'], single = True)
@@ -365,8 +382,7 @@ class Release(Plugin):
if not rls:
rls = Relea(
identifier = rel_identifier,
movie_id = media.get('id'),
#media_id = media.get('id'),
media_id = media.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
@@ -403,7 +419,7 @@ class Release(Plugin):
releases_raw = db.query(Relea) \
.options(joinedload_all('info')) \
.options(joinedload_all('files')) \
.filter(Relea.movie_id == id) \
.filter(Relea.media_id == id) \
.all()
releases = [r.to_dict({'info':{}, 'files':{}}) for r in releases_raw]
@@ -446,6 +462,6 @@ class Release(Plugin):
db.commit()
#Update all movie info as there is no release update function
fireEvent('notify.frontend', type = 'release.update_status.%s' % rel.id, data = status.get('id'))
fireEvent('notify.frontend', type = 'release.update_status', data = rel.to_dict())
return True

View File

@@ -93,7 +93,7 @@ config = [{
'default': 1,
'type': 'int',
'unit': 'min(s)',
'description': 'Detect movie status every X minutes. Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled',
'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled'),
},
{
'advanced': True,
@@ -122,13 +122,13 @@ config = [{
'advanced': True,
'name': 'separator',
'label': 'File-Separator',
'description': 'Replace all the spaces with a character. Example: ".", "-" (without quotes). Leave empty to use spaces.',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'advanced': True,
'name': 'foldersep',
'label': 'Folder-Separator',
'description': 'Replace all the spaces with a character. Example: ".", "-" (without quotes). Leave empty to use spaces.',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'name': 'file_action',
@@ -136,7 +136,7 @@ config = [{
'default': 'link',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': '<strong>Link</strong> or <strong>Copy</strong> after downloading completed (and allow for seeding), or <strong>Move</strong> after seeding completed. Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy.',
'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.', 'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'),
'advanced': True,
},
{

View File

@@ -3,7 +3,7 @@ from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt, splitString
getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library, File, Profile, Release, \
@@ -30,10 +30,11 @@ class Renamer(Plugin):
'desc': 'For the renamer to check for new files to rename in a folder',
'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'movie_folder': {'desc': 'Optional: The folder of the movie to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same movie_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'},
'downloader' : {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in movie_folder. \'downloader\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'},
'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''},
},
})
@@ -64,25 +65,33 @@ class Renamer(Plugin):
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0))
movie_folder = sp(kwargs.get('movie_folder'))
base_folder = kwargs.get('base_folder')
media_folder = sp(kwargs.get('media_folder'))
# Backwards compatibility, to be removed after a few versions :)
if not media_folder:
media_folder = sp(kwargs.get('movie_folder'))
downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id')
files = '|'.join([sp(filename) for filename in splitString(kwargs.get('files'), '|')])
status = kwargs.get('status', 'completed')
release_download = {'folder': movie_folder} if movie_folder else None
if release_download:
release_download = None
if not base_folder and media_folder:
release_download = {'folder': media_folder}
release_download.update({'id': download_id, 'downloader': downloader, 'status': status, 'files': files} if download_id else {})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', release_download)
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download)
return {
'success': True
}
def scan(self, release_download = None):
def scan(self, base_folder = None, release_download = None):
if not release_download: release_download = {}
if self.isDisabled():
return
@@ -91,11 +100,14 @@ class Renamer(Plugin):
log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return
if not base_folder:
base_folder = self.conf('from')
from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
# Get movie folder to process
movie_folder = release_download and release_download.get('folder')
# Get media folder to process
media_folder = release_download.get('folder')
# Get all folders that should not be processed
no_process = [to_folder]
@@ -108,73 +120,73 @@ class Renamer(Plugin):
pass
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(from_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" have to exist.')
if not os.path.isdir(base_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" folder have to exist.')
return
else:
for item in no_process:
if from_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the "from" folder.')
if isSubFolder(item, base_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.')
return
# Check to see if the no_process folders are inside the provided movie_folder
if movie_folder and not os.path.isdir(movie_folder):
log.debug('The provided movie folder %s does not exist. Trying to find it in the \'from\' folder.', movie_folder)
# Check to see if the no_process folders are inside the provided media_folder
if media_folder and not os.path.isdir(media_folder):
log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder)
# Update to the from folder
if len(release_download.get('files')) == 1:
new_movie_folder = from_folder
if len(splitString(release_download.get('files'), '|')) == 1:
new_media_folder = from_folder
else:
new_movie_folder = os.path.join(from_folder, os.path.basename(movie_folder))
new_media_folder = os.path.join(from_folder, os.path.basename(media_folder))
if not os.path.isdir(new_movie_folder):
log.error('The provided movie folder %s does not exist and could also not be found in the \'from\' folder.', movie_folder)
if not os.path.isdir(new_media_folder):
log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder)
return
# Update the files
new_files = [os.path.join(new_movie_folder, os.path.relpath(filename, movie_folder)) for filename in splitString(release_download.get('files'), '|')]
new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in splitString(release_download.get('files'), '|')]
if new_files and not os.path.isfile(new_files[0]):
log.error('The provided movie folder %s does not exist and its files could also not be found in the \'from\' folder.', movie_folder)
log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder)
return
# Update release_download info to the from folder
log.debug('Release %s found in the \'from\' folder.', movie_folder)
release_download['folder'] = new_movie_folder
log.debug('Release %s found in the \'from\' folder.', media_folder)
release_download['folder'] = new_media_folder
release_download['files'] = '|'.join(new_files)
movie_folder = new_movie_folder
media_folder = new_media_folder
if movie_folder:
if media_folder:
for item in no_process:
if movie_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the provided movie folder.')
if isSubFolder(item, media_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.')
return
# Make sure a checkSnatched marked all downloads/seeds as such
if not release_download and self.conf('run_every') > 0:
fireEvent('renamer.check_snatched')
self.checkSnatched(fire_scan = False)
self.renaming_started = True
# make sure the movie folder name is included in the search
# make sure the media folder name is included in the search
folder = None
files = []
if movie_folder:
log.info('Scanning movie folder %s...', movie_folder)
folder = os.path.dirname(movie_folder)
if media_folder:
log.info('Scanning media folder %s...', media_folder)
folder = os.path.dirname(media_folder)
if release_download.get('files', ''):
files = splitString(release_download['files'], '|')
# If there is only one file in the torrent, the downloader did not create a subfolder
if len(files) == 1:
folder = movie_folder
folder = media_folder
else:
# Get all files from the specified folder
try:
for root, folders, names in os.walk(movie_folder):
for root, folders, names in os.walk(media_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (movie_folder, traceback.format_exc()))
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
db = get_session()
@@ -184,10 +196,10 @@ class Renamer(Plugin):
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files,
folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files,
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download))
groups = fireEvent('scanner.scan', folder = folder if folder else from_folder,
groups = fireEvent('scanner.scan', folder = folder if folder else base_folder,
files = files, release_download = release_download, return_ignored = False, single = True) or []
folder_name = self.conf('folder_name')
@@ -228,7 +240,7 @@ class Renamer(Plugin):
# Overwrite destination when set in category
destination = to_folder
category_label = ''
for movie in library_ent.movies:
for movie in library_ent.media:
if movie.category and movie.category.label:
category_label = movie.category.label
@@ -402,13 +414,13 @@ class Renamer(Plugin):
remove_leftovers = True
# Add it to the wanted list before we continue
if len(library_ent.movies) == 0:
if len(library_ent.media) == 0:
profile = db.query(Profile).filter_by(core = True, label = group['meta_data']['quality']['label']).first()
fireEvent('movie.add', params = {'identifier': group['library']['identifier'], 'profile_id': profile.id}, search_after = False)
db.expire_all()
library_ent = db.query(Library).filter_by(identifier = group['library']['identifier']).first()
for movie in library_ent.movies:
for movie in library_ent.media:
# Mark movie "done" once it's found the quality with the finish check
try:
@@ -496,7 +508,10 @@ class Renamer(Plugin):
os.remove(src)
parent_dir = os.path.dirname(src)
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and not parent_dir in [destination, movie_folder] and not from_folder in parent_dir:
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \
not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \
not isSubFolder(parent_dir, base_folder):
delete_folders.append(parent_dir)
except:
@@ -528,7 +543,7 @@ class Renamer(Plugin):
self.tagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(movie_folder) and self.downloadIsTorrent(release_download):
if self.movieInFromFolder(media_folder) and self.downloadIsTorrent(release_download):
self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases
@@ -540,12 +555,12 @@ class Renamer(Plugin):
log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download):
if movie_folder:
if media_folder:
# Delete the movie folder
group_folder = movie_folder
group_folder = media_folder
else:
# Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = sp(os.path.join(from_folder, os.path.relpath(group['parentdir'], from_folder).split(os.path.sep)[0]))
group_folder = sp(os.path.join(base_folder, os.path.relpath(group['parentdir'], base_folder).split(os.path.sep)[0]))
try:
log.info('Deleting folder: %s', group_folder)
@@ -563,7 +578,7 @@ class Renamer(Plugin):
# Break if CP wants to shut down
if self.shuttingDown():
break
self.renaming_started = False
def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False):
@@ -613,6 +628,11 @@ Remove it if you want it to be renamed (again, or at least let it try again)
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
# Dont tag .ignore files
if os.path.splitext(filename)[1] == '.ignore':
continue
tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag)
if not os.path.isfile(tag_filename):
self.createFile(tag_filename, text)
@@ -643,7 +663,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Match all found ignore files with the tag_files and delete if found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (re.escape(os.path.splitext(tag_file)[0]), tag if tag else '*'))
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
for filename in ignore_file:
try:
os.remove(filename)
@@ -676,7 +696,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Match all found ignore files with the tag_files and return True found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
if ignore_file:
return True
@@ -789,7 +809,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
except:
loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
def checkSnatched(self):
def checkSnatched(self, fire_scan = True):
if self.checking_snatched:
log.debug('Already checking snatched')
@@ -805,126 +825,168 @@ Remove it if you want it to be renamed (again, or at least let it try again)
Release.status_id.in_([snatched_status.get('id'), seeding_status.get('id'), missing_status.get('id')])
).all()
if not rels:
#No releases found that need status checking
self.checking_snatched = False
return True
# Collect all download information with the download IDs from the releases
download_ids = []
no_status_support = []
try:
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
if rel_dict['info'].get('download_id') and rel_dict['info'].get('download_downloader'):
download_ids.append({'id': rel_dict['info']['download_id'], 'downloader': rel_dict['info']['download_downloader']})
ds = rel_dict['info'].get('download_status_support')
if ds == False or ds == 'False':
no_status_support.append(ss(rel_dict['info'].get('download_downloader')))
except:
log.error('Error getting download IDs from database')
self.checking_snatched = False
return False
release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else []
if len(no_status_support) > 0:
log.debug('Download status functionality is not implemented for one of the active downloaders: %s', no_status_support)
if not release_downloads:
if fire_scan:
self.scan()
self.checking_snatched = False
return True
scan_releases = []
scan_required = False
if rels:
log.debug('Checking status snatched releases...')
log.debug('Checking status snatched releases...')
release_downloads = fireEvent('download.status', merge = True)
if not release_downloads:
log.debug('Download status functionality is not implemented for active downloaders.')
scan_required = True
else:
try:
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
movie_dict = fireEvent('movie.get', rel.movie_id, single = True)
try:
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
movie_dict = fireEvent('media.get', media_id = rel.media_id, single = True)
if not isinstance(rel_dict['info'], (dict)):
log.error('Faulty release found without any info, ignoring.')
if not isinstance(rel_dict['info'], (dict)):
log.error('Faulty release found without any info, ignoring.')
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
continue
# Check if download ID is available
if not rel_dict['info'].get('download_id') or not rel_dict['info'].get('download_downloader'):
log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (rel_dict['info'].get('download_downloader', 'unknown'), rel_dict['info']['name']))
scan_required = True
# Continue with next release
continue
# Find release in downloaders
nzbname = self.createNzbName(rel_dict['info'], movie_dict)
for release_download in release_downloads:
found_release = False
if rel_dict['info'].get('download_id'):
if release_download['id'] == rel_dict['info']['download_id'] and release_download['downloader'] == rel_dict['info']['download_downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
break
else:
if release_download['name'] == nzbname or rel_dict['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['library']['identifier']:
log.debug('Found release by release name or imdb ID: %s', release_download['name'])
found_release = True
break
if not found_release:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.status_id == missing_status.get('id'):
if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
continue
else:
# Set the release to missing
fireEvent('release.update_status', rel.id, status = missing_status, single = True)
# check status
nzbname = self.createNzbName(rel_dict['info'], movie_dict)
# Continue with next release
continue
found = False
for release_download in release_downloads:
found_release = False
if rel_dict['info'].get('download_id'):
if release_download['id'] == rel_dict['info']['download_id'] and release_download['downloader'] == rel_dict['info']['download_downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
# Log that we found the release
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
# Check status of release
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and set the torrent to paused if required
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
#let it seed
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.id, status = failed_status, single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', media_id = rel.media_id)
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
#Make sure the downloader sent over a path to look in
if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done
if rel.status_id == seeding_status.get('id'):
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.id, status = downloaded_status, single = True)
# Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
if release_download['name'] == nzbname or rel_dict['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['library']['identifier']:
found_release = True
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
if found_release:
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and set the torrent to paused if required
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
#let it seed
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.id, status = failed_status, single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', movie_id = rel.movie_id)
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done
if rel.status_id == seeding_status.get('id'):
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.id, status = downloaded_status, single = True)
# Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
found = True
break
if not found:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.status_id == missing_status.get('id'):
if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
else:
# Set the release to missing
fireEvent('release.update_status', rel.id, status = missing_status, single = True)
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go
for release_download in scan_releases:
@@ -932,7 +994,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if release_download['scan']:
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = True, single = True)
fireEvent('renamer.scan', release_download = release_download)
self.scan(release_download = release_download)
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if release_download['process_complete']:
@@ -943,11 +1005,10 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Ask the downloader to process the item
fireEvent('download.process_complete', release_download = release_download, single = True)
if scan_required:
fireEvent('renamer.scan')
if fire_scan and (scan_required or len(no_status_support) > 0):
self.scan()
self.checking_snatched = False
return True
def extendReleaseDownload(self, release_download):
@@ -994,10 +1055,10 @@ Remove it if you want it to be renamed (again, or at least let it try again)
def statusInfoComplete(self, release_download):
return release_download['id'] and release_download['downloader'] and release_download['folder']
def movieInFromFolder(self, movie_folder):
return movie_folder and sp(self.conf('from')) in sp(movie_folder) or not movie_folder
def movieInFromFolder(self, media_folder):
return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder
def extractFiles(self, folder = None, movie_folder = None, files = None, cleanup = False):
def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False):
if not files: files = []
# RegEx for finding rar files
@@ -1012,7 +1073,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
folder = from_folder
check_file_date = True
if movie_folder:
if media_folder:
check_file_date = False
if not files:
@@ -1108,18 +1169,18 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if cleanup:
# Remove all left over folders
log.debug('Removing old movie folder %s...', movie_folder)
self.deleteEmptyFolder(movie_folder)
log.debug('Removing old movie folder %s...', media_folder)
self.deleteEmptyFolder(media_folder)
movie_folder = os.path.join(from_folder, os.path.relpath(movie_folder, folder))
media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder))
folder = from_folder
if extr_files:
files.extend(extr_files)
# Cleanup files and folder if movie_folder was not provided
if not movie_folder:
# Cleanup files and folder if media_folder was not provided
if not media_folder:
files = []
folder = None
return folder, movie_folder, files, extr_files
return folder, media_folder, files, extr_files

View File

@@ -80,7 +80,8 @@ class Scanner(Plugin):
'hdtv': ['hdtv']
}
clean = '[ _\,\.\(\)\[\]\-](extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
clean = '[ _\,\.\(\)\[\]\-]?(extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
'|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
@@ -454,7 +455,7 @@ class Scanner(Plugin):
data['resolution_width'] = meta.get('resolution_width', 720)
data['resolution_height'] = meta.get('resolution_height', 480)
data['audio_channels'] = meta.get('audio_channels', 2.0)
data['aspect'] = meta.get('resolution_width', 720) / meta.get('resolution_height', 480)
data['aspect'] = round(float(meta.get('resolution_width', 720)) / meta.get('resolution_height', 480), 2)
except:
log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc()))
pass

View File

@@ -20,7 +20,7 @@ config = [{
},
{
'name': 'languages',
'description': 'Comma separated, 2 letter country code. Example: en, nl. See the codes at <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes">on Wikipedia</a>',
'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes">on Wikipedia</a>'),
},
# {
# 'name': 'automatic',

View File

@@ -27,7 +27,7 @@ class Subtitle(Plugin):
library = db.query(Library).all()
done_status = fireEvent('status.get', 'done', single = True)
for movie in library.movies:
for movie in library.media:
for release in movie.releases:

View File

@@ -59,7 +59,7 @@ config = [{
{
'name': 'automation_charts_boxoffice',
'type': 'bool',
'label': 'Box offce TOP 10',
'label': 'Box office TOP 10',
'description': 'IMDB Box office <a href="http://www.imdb.com/chart/">TOP 10</a> chart',
'default': True,
},

View File

@@ -1,16 +1,14 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
possibleTitles, getTitle
possibleTitles, toIterable
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from urlparse import urlparse
import cookielib
import json
import re
import time
import traceback
import urllib2
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@@ -95,7 +93,7 @@ class Provider(Plugin):
def getHTMLData(self, url, **kwargs):
cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {})))
cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('data', {})))
return self.getCache(cache_key, url, **kwargs)
@@ -111,13 +109,14 @@ class YarrProvider(Provider):
sizeMb = ['mb', 'mib']
sizeKb = ['kb', 'kib']
login_opener = None
last_login_check = 0
last_login_check = None
def __init__(self):
addEvent('provider.enabled_protocols', self.getEnabledProtocol)
addEvent('provider.belongs_to', self.belongsTo)
addEvent('provider.search.%s.%s' % (self.protocol, self.type), self.search)
for type in toIterable(self.type):
addEvent('provider.search.%s.%s' % (self.protocol, type), self.search)
def getEnabledProtocol(self):
if self.isEnabled():
@@ -129,35 +128,30 @@ class YarrProvider(Provider):
# Check if we are still logged in every hour
now = time.time()
if self.login_opener and self.last_login_check < (now - 3600):
if self.last_login_check and self.last_login_check < (now - 3600):
try:
output = self.urlopen(self.urls['login_check'], opener = self.login_opener)
output = self.urlopen(self.urls['login_check'])
if self.loginCheckSuccess(output):
self.last_login_check = now
return True
else:
self.login_opener = None
except:
self.login_opener = None
except: pass
self.last_login_check = None
if self.login_opener:
if self.last_login_check:
return True
try:
cookiejar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
output = self.urlopen(self.urls['login'], params = self.getLoginParams(), opener = opener)
output = self.urlopen(self.urls['login'], data = self.getLoginParams())
if self.loginSuccess(output):
self.last_login_check = now
self.login_opener = opener
return True
error = 'unknown'
except:
error = traceback.format_exc()
self.login_opener = None
self.last_login_check = None
log.error('Failed to login %s: %s', (self.getName(), error))
return False
@@ -171,12 +165,12 @@ class YarrProvider(Provider):
try:
if not self.login():
log.error('Failed downloading from %s', self.getName())
return self.urlopen(url, opener = self.login_opener)
return self.urlopen(url)
except:
log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return ''
return {}
def download(self, url = '', nzb_id = ''):
try:
@@ -186,7 +180,7 @@ class YarrProvider(Provider):
return 'try_next'
def search(self, movie, quality):
def search(self, media, quality):
if self.isDisabled():
return []
@@ -198,15 +192,17 @@ class YarrProvider(Provider):
# Create result container
imdb_results = hasattr(self, '_search')
results = ResultList(self, movie, quality, imdb_results = imdb_results)
results = ResultList(self, media, quality, imdb_results = imdb_results)
# Do search based on imdb id
if imdb_results:
self._search(movie, quality, results)
self._search(media, quality, results)
# Search possible titles
else:
for title in possibleTitles(getTitle(movie['library'])):
self._searchOnTitle(title, movie, quality, results)
media_title = fireEvent('library.query', media['library'], single = True)
for title in possibleTitles(media_title):
self._searchOnTitle(title, media, quality, results)
return results
@@ -249,8 +245,7 @@ class YarrProvider(Provider):
def getCatId(self, identifier):
for cats in self.cat_ids:
ids, qualities = cats
for ids, qualities in self.cat_ids:
if identifier in qualities:
return ids
@@ -264,14 +259,14 @@ class ResultList(list):
result_ids = None
provider = None
movie = None
media = None
quality = None
def __init__(self, provider, movie, quality, **kwargs):
def __init__(self, provider, media, quality, **kwargs):
self.result_ids = []
self.provider = provider
self.movie = movie
self.media = media
self.quality = quality
self.kwargs = kwargs
@@ -285,13 +280,13 @@ class ResultList(list):
new_result = self.fillResult(result)
is_correct = fireEvent('searcher.correct_release', new_result, self.movie, self.quality,
is_correct = fireEvent('searcher.correct_release', new_result, self.media, self.quality,
imdb_results = self.kwargs.get('imdb_results', False), single = True)
if is_correct and new_result['id'] not in self.result_ids:
is_correct_weight = float(is_correct)
new_result['score'] += fireEvent('score.calculate', new_result, self.movie, single = True)
new_result['score'] += fireEvent('score.calculate', new_result, self.media, single = True)
old_score = new_result['score']
new_result['score'] = int(old_score * is_correct_weight)

View File

@@ -1,7 +1,7 @@
from .main import MovieResultModifier
from .main import InfoResultModifier
def start():
return MovieResultModifier()
return InfoResultModifier()
config = []

View File

@@ -3,6 +3,7 @@ from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import mergeDicts, randomString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.providers.base import MultiProvider
from couchpotato.core.settings.model import Library
import copy
import traceback
@@ -10,7 +11,17 @@ import traceback
log = CPLog(__name__)
class MovieResultModifier(Plugin):
class InfoResultModifier(MultiProvider):
def getTypes(self):
return [Movie, Show]
class ModifierBase(Plugin):
pass
class Movie(ModifierBase):
default_info = {
'tmdb_id': 0,
@@ -21,14 +32,17 @@ class MovieResultModifier(Plugin):
'poster': [],
'backdrop': [],
'poster_original': [],
'backdrop_original': []
'backdrop_original': [],
'actors': {}
},
'runtime': 0,
'plot': '',
'tagline': '',
'imdb': '',
'genres': [],
'mpaa': None
'mpaa': None,
'actors': [],
'actor_roles': {}
}
def __init__(self):
@@ -91,13 +105,13 @@ class MovieResultModifier(Plugin):
# Statuses
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
for movie in l.movies:
for movie in l.media:
if movie.status_id == active_status['id']:
temp['in_wanted'] = fireEvent('movie.get', movie.id, single = True)
temp['in_wanted'] = fireEvent('media.get', movie.id, single = True)
for release in movie.releases:
if release.status_id == done_status['id']:
temp['in_library'] = fireEvent('movie.get', movie.id, single = True)
temp['in_library'] = fireEvent('media.get', movie.id, single = True)
except:
log.error('Tried getting more info on searched movies: %s', traceback.format_exc())
@@ -110,3 +124,7 @@ class MovieResultModifier(Plugin):
if result and result.get('imdb'):
return mergeDicts(result, self.getLibraryTags(result['imdb']))
return result
class Show(ModifierBase):
pass

View File

@@ -3,3 +3,15 @@ from couchpotato.core.providers.base import Provider
class MovieProvider(Provider):
type = 'movie'
class ShowProvider(Provider):
type = 'show'
class SeasonProvider(Provider):
type = 'season'
class EpisodeProvider(Provider):
type = 'episode'

View File

@@ -74,7 +74,7 @@ class CouchPotatoApi(MovieProvider):
return True
def getInfo(self, identifier = None):
def getInfo(self, identifier = None, **kwargs):
if not identifier:
return
@@ -97,7 +97,7 @@ class CouchPotatoApi(MovieProvider):
if not ignore: ignore = []
if not movies: movies = []
suggestions = self.getJsonData(self.urls['suggest'], params = {
suggestions = self.getJsonData(self.urls['suggest'], data = {
'movies': ','.join(movies),
'ignore': ','.join(ignore),
}, headers = self.getRequestHeaders())

View File

@@ -46,7 +46,7 @@ class OMDBAPI(MovieProvider):
return []
def getInfo(self, identifier = None):
def getInfo(self, identifier = None, **kwargs):
if not identifier:
return {}
@@ -82,6 +82,10 @@ class OMDBAPI(MovieProvider):
if tmp_movie.get(key).lower() == 'n/a':
del movie[key]
# Ignore series from omdbapi for now, should we use this in the future?
if movie.get('Type') == "series":
return
year = tryInt(movie.get('Year', ''))
movie_data = {

View File

@@ -11,8 +11,8 @@ log = CPLog(__name__)
class TheMovieDb(MovieProvider):
def __init__(self):
addEvent('info.search', self.search, priority = 2)
addEvent('movie.search', self.search, priority = 2)
#addEvent('info.search', self.search, priority = 2)
#addEvent('movie.search', self.search, priority = 2)
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info_by_tmdb', self.getInfo)
@@ -45,7 +45,7 @@ class TheMovieDb(MovieProvider):
nr = 0
for movie in raw:
results.append(self.parseMovie(movie, with_titles = False))
results.append(self.parseMovie(movie, extended = False))
nr += 1
if nr == limit:
@@ -61,7 +61,7 @@ class TheMovieDb(MovieProvider):
return results
def getInfo(self, identifier = None):
def getInfo(self, identifier = None, extended = True):
if not identifier:
return {}
@@ -73,14 +73,14 @@ class TheMovieDb(MovieProvider):
try:
log.debug('Getting info: %s', cache_key)
movie = tmdb3.Movie(identifier)
result = self.parseMovie(movie)
result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result)
except:
pass
return result
def parseMovie(self, movie, with_titles = True):
def parseMovie(self, movie, extended = True):
cache_key = 'tmdb.cache.%s' % movie.id
movie_data = self.getCache(cache_key)
@@ -92,6 +92,14 @@ class TheMovieDb(MovieProvider):
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {}
}
# Genres
try:
genres = [genre.name for genre in movie.genres]
@@ -103,18 +111,23 @@ class TheMovieDb(MovieProvider):
if not movie.releasedate or year == '1900' or year.lower() == 'none':
year = None
# Gather actors data
actors = {}
if extended:
for cast_item in movie.cast:
try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.id,
'titles': [toUnicode(movie.title)],
'original_title': movie.originaltitle,
'images': {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
},
'images': images,
'imdb': movie.imdb,
'runtime': movie.runtime,
'released': str(movie.releasedate),
@@ -122,12 +135,13 @@ class TheMovieDb(MovieProvider):
'plot': movie.overview,
'genres': genres,
'collection': getattr(movie.collection, 'name', None),
'actor_roles': actors
}
movie_data = dict((k, v) for k, v in movie_data.iteritems() if v)
# Add alternative names
if with_titles:
if extended:
movie_data['titles'].append(movie.originaltitle)
for alt in movie.alternate_titles:
alt_name = alt.title
@@ -145,7 +159,7 @@ class TheMovieDb(MovieProvider):
try:
image_url = getattr(movie, type).geturl(size = 'original')
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, movie.title))
log.debug('Failed getting %s.%s for "%s"', (type, size, movie))
return image_url

View File

@@ -0,0 +1,24 @@
from .main import TheTVDb
def start():
return TheTVDb()
config = [{
'name': 'thetvdb',
'groups': [
{
'tab': 'providers',
'name': 'tmdb',
'label': 'TheTVDB',
'hidden': True,
'description': 'Used for all calls to TheTVDB.',
'options': [
{
'name': 'api_key',
'default': '7966C02F860586D2',
'label': 'Api Key',
},
],
},
],
}]

View File

@@ -0,0 +1,468 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.info.base import ShowProvider
from couchpotato.environment import Env
from tvdb_api import tvdb_api, tvdb_exceptions
from datetime import datetime
import traceback
import os
log = CPLog(__name__)
# TODO: Consider grabbing zips to put less strain on tvdb
# TODO: Unicode stuff (check)
# TODO: Notigy frontend on error (tvdb down at monent)
# TODO: Expose apikey in setting so it can be changed by user
class TheTVDb(ShowProvider):
def __init__(self):
addEvent('info.search', self.search, priority = 1)
addEvent('show.search', self.search, priority = 1)
addEvent('show.info', self.getShowInfo, priority = 1)
addEvent('season.info', self.getSeasonInfo, priority = 1)
addEvent('episode.info', self.getEpisodeInfo, priority = 1)
self.tvdb_api_parms = {
'apikey': self.conf('api_key'),
'banners': True,
'language': 'en',
'cache': os.path.join(Env.get('cache_dir'), 'thetvdb_api'),
}
self._setup()
def _setup(self):
self.tvdb = tvdb_api.Tvdb(**self.tvdb_api_parms)
self.valid_languages = self.tvdb.config['valid_languages']
def search(self, q, limit = 12, language = 'en'):
''' Find show by name
show = { 'id': 74713,
'language': 'en',
'lid': 7,
'seriesid': '74713',
'seriesname': u'Breaking Bad',}
'''
if self.isDisabled():
return False
if language != self.tvdb_api_parms['language'] and language in self.valid_languages:
self.tvdb_api_parms['language'] = language
self._setup()
search_string = simplifyString(q)
cache_key = 'thetvdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
if not results:
log.debug('Searching for show: %s', q)
raw = None
try:
raw = self.tvdb.search(search_string)
except (tvdb_exceptions.tvdb_error, IOError), e:
log.error('Failed searching TheTVDB for "%s": %s', (search_string, traceback.format_exc()))
return False
results = []
if raw:
try:
nr = 0
for show_info in raw:
show = self.tvdb[int(show_info['id'])]
results.append(self._parseShow(show))
nr += 1
if nr == limit:
break
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
self.setCache(cache_key, results)
return results
except (tvdb_exceptions.tvdb_error, IOError), e:
log.error('Failed parsing TheTVDB for "%s": %s', (show, traceback.format_exc()))
return False
return results
def getShow(self, identifier = None):
show = None
try:
log.debug('Getting show: %s', identifier)
show = self.tvdb[int(identifier)]
except (tvdb_exceptions.tvdb_error, IOError), e:
log.error('Failed to getShowInfo for show id "%s": %s', (identifier, traceback.format_exc()))
return None
return show
def getShowInfo(self, identifier = None):
if not identifier:
return None
cache_key = 'thetvdb.cache.%s' % identifier
log.debug('Getting showInfo: %s', cache_key)
result = self.getCache(cache_key) or {}
if result:
return result
show = self.getShow(identifier = identifier)
if show:
result = self._parseShow(show)
self.setCache(cache_key, result)
return result
def getSeasonInfo(self, identifier = None, params = {}):
"""Either return a list of all seasons or a single season by number.
identifier is the show 'id'
"""
if not identifier:
return False
season_identifier = params.get('season_identifier', None)
# season_identifier must contain the 'show id : season number' since there is no tvdb id
# for season and we need a reference to both the show id and season number
if season_identifier:
try: season_identifier = int(season_identifier.split(':')[1])
except: return False
cache_key = 'thetvdb.cache.%s.%s' % (identifier, season_identifier)
log.debug('Getting SeasonInfo: %s', cache_key)
result = self.getCache(cache_key) or {}
if result:
return result
try:
show = self.tvdb[int(identifier)]
except (tvdb_exceptions.tvdb_error, IOError), e:
log.error('Failed parsing TheTVDB SeasonInfo for "%s" id "%s": %s', (show, identifier, traceback.format_exc()))
return False
result = []
for number, season in show.items():
if season_identifier is not None and number == season_identifier:
result = self._parseSeason(show, (number, season))
self.setCache(cache_key, result)
return result
else:
result.append(self._parseSeason(show, (number, season)))
self.setCache(cache_key, result)
return result
def getEpisodeInfo(self, identifier = None, params = {}):
"""Either return a list of all episodes or a single episode.
If episode_identifer contains an episode number to search for
"""
season_identifier = params.get('season_identifier', None)
episode_identifier = params.get('episode_identifier', None)
if not identifier and season_identifier is None:
return False
# season_identifier must contain the 'show id : season number' since there is no tvdb id
# for season and we need a reference to both the show id and season number
if season_identifier:
try:
identifier, season_identifier = season_identifier.split(':')
season_identifier = int(season_identifier)
except: return None
cache_key = 'thetvdb.cache.%s.%s.%s' % (identifier, episode_identifier, season_identifier)
log.debug('Getting EpisodeInfo: %s', cache_key)
result = self.getCache(cache_key) or {}
if result:
return result
try:
show = self.tvdb[int(identifier)]
except (tvdb_exceptions.tvdb_error, IOError), e:
log.error('Failed parsing TheTVDB EpisodeInfo for "%s" id "%s": %s', (show, identifier, traceback.format_exc()))
return False
result = []
for number, season in show.items():
if season_identifier is not None and number != season_identifier:
continue
for episode in season.values():
if episode_identifier is not None and episode['id'] == toUnicode(episode_identifier):
result = self._parseEpisode(show, episode)
self.setCache(cache_key, result)
return result
else:
result.append(self._parseEpisode(show, episode))
self.setCache(cache_key, result)
return result
def _parseShow(self, show):
"""
'actors': u'|Bryan Cranston|Aaron Paul|Dean Norris|RJ Mitte|Betsy Brandt|Anna Gunn|Laura Fraser|Jesse Plemons|Christopher Cousins|Steven Michael Quezada|Jonathan Banks|Giancarlo Esposito|Bob Odenkirk|',
'added': None,
'addedby': None,
'airs_dayofweek': u'Sunday',
'airs_time': u'9:00 PM',
'banner': u'http://thetvdb.com/banners/graphical/81189-g13.jpg',
'contentrating': u'TV-MA',
'fanart': u'http://thetvdb.com/banners/fanart/original/81189-28.jpg',
'firstaired': u'2008-01-20',
'genre': u'|Crime|Drama|Suspense|',
'id': u'81189',
'imdb_id': u'tt0903747',
'language': u'en',
'lastupdated': u'1376620212',
'network': u'AMC',
'networkid': None,
'overview': u"Walter White, a struggling high school chemistry teacher is diagnosed with advanced lung cancer. He turns to a life of crime, producing and selling methamphetamine accompanied by a former student, Jesse Pinkman with the aim of securing his family's financial future before he dies.",
'poster': u'http://thetvdb.com/banners/posters/81189-22.jpg',
'rating': u'9.3',
'ratingcount': u'473',
'runtime': u'60',
'seriesid': u'74713',
'seriesname': u'Breaking Bad',
'status': u'Continuing',
'zap2it_id': u'SH01009396'
"""
#
# NOTE: show object only allows direct access via
# show['id'], not show.get('id')
#
# TODO: Make sure we have a valid show id, not '' or None
#if len (show['id']) is 0:
# return None
## Images
poster = show['poster'] or None
backdrop = show['fanart'] or None
genres = [] if show['genre'] is None else show['genre'].strip('|').split('|')
if show['firstaired'] is not None:
try: year = datetime.strptime(show['firstaired'], '%Y-%m-%d').year
except: year = None
else:
year = None
try:
id = int(show['id'])
except:
id = None
show_data = {
'id': id,
'type': 'show',
'primary_provider': 'thetvdb',
'titles': [show['seriesname'] or u'', ],
'original_title': show['seriesname'] or u'',
'images': {
'poster': [poster] if poster else [],
'backdrop': [backdrop] if backdrop else [],
'poster_original': [],
'backdrop_original': [],
},
'year': year,
'genres': genres,
'imdb': show['imdb_id'] or None,
'zap2it_id': show['zap2it_id'] or None,
'seriesid': show['seriesid'] or None,
'network': show['network'] or None,
'networkid': show['networkid'] or None,
'airs_dayofweek': show['airs_dayofweek'] or None,
'airs_time': show['airs_time'] or None,
'firstaired': show['firstaired'] or None,
'released': show['firstaired'] or None,
'runtime': show['runtime'] or None,
'contentrating': show['contentrating'] or None,
'rating': show['rating'] or None,
'ratingcount': show['ratingcount'] or None,
'actors': show['actors'] or None,
'lastupdated': show['lastupdated'] or None,
'status': show['status'] or None,
'language': show['language'] or None,
}
show_data = dict((k, v) for k, v in show_data.iteritems() if v)
# Add alternative titles
try:
raw = self.tvdb.search(show['seriesname'])
if raw:
for show_info in raw:
if show_info['id'] == show_data['id'] and show_info.get('aliasnames', None):
for alt_name in show_info['aliasnames'].split('|'):
show_data['titles'].append(toUnicode(alt_name))
except (tvdb_exceptions.tvdb_error, IOError), e:
log.error('Failed searching TheTVDB for "%s": %s', (show['seriesname'], traceback.format_exc()))
return show_data
def _parseSeason(self, show, season_tuple):
"""
contains no data
"""
number, season = season_tuple
title = toUnicode('%s - Season %s' % (show['seriesname'] or u'', str(number)))
poster = []
try:
for id, data in show.data['_banners']['season']['season'].items():
if data.get('season', None) == str(number) and data['bannertype'] == 'season' and data['bannertype2'] == 'season':
poster.append(data.get('_bannerpath'))
break # Only really need one
except:
pass
try:
id = (show['id'] + ':' + str(number))
except:
id = None
# XXX: work on title; added defualt_title to fix an error
season_data = {
'id': id,
'type': 'season',
'primary_provider': 'thetvdb',
'titles': [title, ],
'original_title': title,
'via_thetvdb': True,
'parent_identifier': show['id'] or None,
'seasonnumber': str(number),
'images': {
'poster': poster,
'backdrop': [],
'poster_original': [],
'backdrop_original': [],
},
'year': None,
'genres': None,
'imdb': None,
}
season_data = dict((k, v) for k, v in season_data.iteritems() if v)
return season_data
def _parseEpisode(self, show, episode):
"""
('episodenumber', u'1'),
('thumb_added', None),
('rating', u'7.7'),
('overview',
u'Experienced waitress Max Black meets her new co-worker, former rich-girl Caroline Channing, and puts her skills to the test at an old but re-emerging Brooklyn diner. Despite her initial distaste for Caroline, Max eventually softens and the two team up for a new business venture.'),
('dvd_episodenumber', None),
('dvd_discid', None),
('combined_episodenumber', u'1'),
('epimgflag', u'7'),
('id', u'4099506'),
('seasonid', u'465948'),
('thumb_height', u'225'),
('tms_export', u'1374789754'),
('seasonnumber', u'1'),
('writer', u'|Michael Patrick King|Whitney Cummings|'),
('lastupdated', u'1371420338'),
('filename', u'http://thetvdb.com/banners/episodes/248741/4099506.jpg'),
('absolute_number', u'1'),
('ratingcount', u'102'),
('combined_season', u'1'),
('thumb_width', u'400'),
('imdb_id', u'tt1980319'),
('director', u'James Burrows'),
('dvd_chapter', None),
('dvd_season', None),
('gueststars',
u'|Brooke Lyons|Noah Mills|Shoshana Bush|Cale Hartmann|Adam Korson|Alex Enriquez|Matt Cook|Bill Parks|Eugene Shaw|Sergey Brusilovsky|Greg Lewis|Cocoa Brown|Nick Jameson|'),
('seriesid', u'248741'),
('language', u'en'),
('productioncode', u'296793'),
('firstaired', u'2011-09-19'),
('episodename', u'Pilot')]
"""
poster = episode.get('filename', [])
backdrop = []
genres = []
plot = "%s - %sx%s - %s" % (show['seriesname'] or u'',
episode.get('seasonnumber', u'?'),
episode.get('episodenumber', u'?'),
episode.get('overview', u''))
if episode.get('firstaired', None) is not None:
try: year = datetime.strptime(episode['firstaired'], '%Y-%m-%d').year
except: year = None
else:
year = None
try:
id = int(episode['id'])
except:
id = None
episode_data = {
'id': id,
'type': 'episode',
'primary_provider': 'thetvdb',
'via_thetvdb': True,
'thetvdb_id': id,
'titles': [episode.get('episodename', u''), ],
'original_title': episode.get('episodename', u'') ,
'images': {
'poster': [poster] if poster else [],
'backdrop': [backdrop] if backdrop else [],
'poster_original': [],
'backdrop_original': [],
},
'imdb': episode.get('imdb_id', None),
'runtime': None,
'released': episode.get('firstaired', None),
'year': year,
'plot': plot,
'genres': genres,
'parent_identifier': show['id'] or None,
'seasonnumber': episode.get('seasonnumber', None),
'episodenumber': episode.get('episodenumber', None),
'combined_episodenumber': episode.get('combined_episodenumber', None),
'absolute_number': episode.get('absolute_number', None),
'combined_season': episode.get('combined_season', None),
'productioncode': episode.get('productioncode', None),
'seriesid': episode.get('seriesid', None),
'seasonid': episode.get('seasonid', None),
'firstaired': episode.get('firstaired', None),
'thumb_added': episode.get('thumb_added', None),
'thumb_height': episode.get('thumb_height', None),
'thumb_width': episode.get('thumb_width', None),
'rating': episode.get('rating', None),
'ratingcount': episode.get('ratingcount', None),
'epimgflag': episode.get('epimgflag', None),
'dvd_episodenumber': episode.get('dvd_episodenumber', None),
'dvd_discid': episode.get('dvd_discid', None),
'dvd_chapter': episode.get('dvd_chapter', None),
'dvd_season': episode.get('dvd_season', None),
'tms_export': episode.get('tms_export', None),
'writer': episode.get('writer', None),
'director': episode.get('director', None),
'gueststars': episode.get('gueststars', None),
'lastupdated': episode.get('lastupdated', None),
'language': episode.get('language', None),
}
episode_data = dict((k, v) for k, v in episode_data.iteritems() if v)
return episode_data
#def getImage(self, show, type = 'poster', size = 'cover'):
#""""""
## XXX: Need to implement size
#image_url = ''
#for res, res_data in show['_banners'].get(type, {}).items():
#for bid, banner_info in res_data.items():
#image_url = banner_info.get('_bannerpath', '')
#break
#return image_url
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')
True
else:
False

View File

@@ -0,0 +1,24 @@
from .main import Xem
def start():
return Xem()
config = [{
'name': 'xem',
'groups': [
{
'tab': 'providers',
'name': 'xem',
'label': 'TheXem',
'hidden': True,
'description': 'Used for all calls to TheXem.',
'options': [
{
'name': 'enabled',
'default': True,
'label': 'Enabled',
},
],
},
],
}]

View File

@@ -0,0 +1,184 @@
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.info.base import ShowProvider
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
import traceback
log = CPLog(__name__)
class Xem(ShowProvider):
'''
Mapping Information
===================
Single
------
You will need the id / identifier of the show e.g. tvdb-id for American Dad! is 73141
the origin is the name of the site/entity the episode, season (and/or absolute) numbers are based on
http://thexem.de/map/single?id=&origin=&episode=&season=&absolute=
episode, season and absolute are all optional but it wont work if you don't provide either episode and season OR absolute in
addition you can provide destination as the name of the wished destination, if not provided it will output all available
When a destination has two or more addresses another entry will be added as _ ... for now the second address gets the index "2"
(the first index is omitted) and so on
http://thexem.de/map/single?id=7529&origin=anidb&season=1&episode=2&destination=trakt
{
"result":"success",
"data":{
"trakt": {"season":1,"episode":3,"absolute":3},
"trakt_2":{"season":1,"episode":4,"absolute":4}
},
"message":"single mapping for 7529 on anidb."
}
All
---
Basically same as "single" just a little easier
The origin address is added into the output too!!
http://thexem.de/map/all?id=7529&origin=anidb
All Names
---------
Get all names xem has to offer
non optional params: origin(an entity string like 'tvdb')
optional params: season, language
- season: a season number or a list like: 1,3,5 or a compare operator like ne,gt,ge,lt,le,eq and a season number. default would
return all
- language: a language string like 'us' or 'jp' default is all
- defaultNames: 1(yes) or 0(no) should the default names be added to the list ? default is 0(no)
http://thexem.de/map/allNames?origin=tvdb&season=le1
{
"result": "success",
"data": {
"248812": ["Dont Trust the Bitch in Apartment 23", "Don't Trust the Bitch in Apartment 23"],
"257571": ["Nazo no Kanojo X"],
"257875": ["Lupin III - Mine Fujiko to Iu Onna", "Lupin III Fujiko to Iu Onna", "Lupin the Third - Mine Fujiko to Iu Onna"]
},
"message": ""
}
'''
def __init__(self):
addEvent('show.info', self.getShowInfo, priority = 5)
addEvent('episode.info', self.getEpisodeInfo, priority = 5)
self.config = {}
self.config['base_url'] = "http://thexem.de"
self.config['url_single'] = u"%(base_url)s/map/single?" % self.config
self.config['url_all'] = u"%(base_url)s/map/all?" % self.config
self.config['url_names'] = u"%(base_url)s/map/names?" % self.config
self.config['url_all_names'] = u"%(base_url)s/map/allNames?" % self.config
# TODO: Also get show aliases (store as titles)
def getShowInfo(self, identifier = None):
if self.isDisabled():
return {}
cache_key = 'xem.cache.%s' % identifier
log.debug('Getting showInfo: %s', cache_key)
result = self.getCache(cache_key) or {}
if result:
return result
# Create season/episode and absolute mappings
url = self.config['url_all'] + "id=%s&origin=tvdb" % tryUrlencode(identifier)
response = self.getJsonData(url)
if response:
if response.get('result') == 'success':
data = response.get('data', None)
result = self._parse(data)
# Create name alias mappings
url = self.config['url_names'] + "id=%s&origin=tvdb" % tryUrlencode(identifier)
response = self.getJsonData(url)
if response:
if response.get('result') == 'success':
data = response.get('data', None)
result.update({'map_names': data})
self.setCache(cache_key, result)
return result
def getEpisodeInfo(self, identifier = None, params = {}):
episode = params.get('episode', None)
if episode is None:
return False
season_identifier = params.get('season_identifier', None)
if season_identifier is None:
return False
episode_identifier = params.get('episode_identifier', None)
absolute = params.get('absolute', None)
# season_identifier must contain the 'show id : season number' since there is no tvdb id
# for season and we need a reference to both the show id and season number
if season_identifier:
try:
identifier, season_identifier = season_identifier.split(':')
season = int(season_identifier)
except: return False
result = self.getShowInfo(identifier)
map = {}
if result:
map_episode = result.get('map_episode', {}).get(season, {}).get(episode, {})
if map_episode:
map.update({'map_episode': map_episode})
if absolute:
map_absolute = result.get('map_absolute', {}).get(absolute, {})
if map_absolute:
map.update({'map_absolute': map_absolute})
map_names = result.get('map_names', {}).get(toUnicode(season), {})
if map_names:
map.update({'map_names': map_names})
return map
def _parse(self, data, master = 'tvdb'):
'''parses xem map and returns a custom formatted dict map
To retreive map for scene:
if 'scene' in map['map_episode'][1][1]:
print map['map_episode'][1][1]['scene']['season']
'''
if not isinstance(data, list):
return {}
map = {'map_episode': {}, 'map_absolute': {}}
for maps in data:
origin = maps.pop(master, None)
if origin is None:
continue # No master origin to map to
map.get('map_episode').setdefault(origin['season'], {}).setdefault(origin['episode'], maps.copy())
map.get('map_absolute').setdefault(origin['absolute'], maps.copy())
return map
def isDisabled(self):
if __name__ == '__main__':
return False
if self.conf('enabled'):
return False
else:
return True
#XXX: REMOVE, just for degugging
def main():
"""Simple example of using xem
"""
xem_instance = Xem()
print xem_instance.getShowInfo(identifier=73141) # (American Dad)
if __name__ == '__main__':
main()

View File

@@ -65,7 +65,7 @@ class XBMC(MetaDataBase):
name = type
try:
if data['library'].get(type):
if movie_info.get(type):
el = SubElement(nfoxml, name)
el.text = toUnicode(movie_info.get(type, ''))
except:
@@ -89,10 +89,18 @@ class XBMC(MetaDataBase):
genres.text = toUnicode(genre)
# Actors
for actor in movie_info.get('actors', []):
actors = SubElement(nfoxml, 'actor')
name = SubElement(actors, 'name')
name.text = toUnicode(actor)
for actor_name in movie_info.get('actor_roles', {}):
role_name = movie_info['actor_roles'][actor_name]
actor = SubElement(nfoxml, 'actor')
name = SubElement(actor, 'name')
name.text = toUnicode(actor_name)
if role_name:
role = SubElement(actor, 'role')
role.text = toUnicode(role_name)
if movie_info['images']['actors'].get(actor_name):
thumb = SubElement(actor, 'thumb')
thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name))
# Directors
for director_name in movie_info.get('directors', []):
@@ -112,6 +120,51 @@ class XBMC(MetaDataBase):
sorttitle = SubElement(nfoxml, 'sorttitle')
sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year'))
# Images
for image_url in movie_info['images']['poster_original']:
image = SubElement(nfoxml, 'thumb')
image.text = toUnicode(image_url)
fanart = SubElement(nfoxml, 'fanart')
for image_url in movie_info['images']['backdrop_original']:
image = SubElement(fanart, 'thumb')
image.text = toUnicode(image_url)
# Add trailer if found
trailer_found = False
if data.get('renamed_files'):
for filename in data.get('renamed_files'):
if 'trailer' in filename:
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(filename)
trailer_found = True
if not trailer_found and data['files'].get('trailer'):
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(data['files']['trailer'][0])
# Add file metadata
fileinfo = SubElement(nfoxml, 'fileinfo')
streamdetails = SubElement(fileinfo, 'streamdetails')
# Video data
if data['meta_data'].get('video'):
video = SubElement(streamdetails, 'video')
codec = SubElement(video, 'codec')
codec.text = toUnicode(data['meta_data']['video'])
aspect = SubElement(video, 'aspect')
aspect.text = str(data['meta_data']['aspect'])
width = SubElement(video, 'width')
width.text = str(data['meta_data']['resolution_width'])
height = SubElement(video, 'height')
height.text = str(data['meta_data']['resolution_height'])
# Audio data
if data['meta_data'].get('audio'):
audio = SubElement(streamdetails, 'audio')
codec = SubElement(audio, 'codec')
codec.text = toUnicode(data['meta_data'].get('audio'))
channels = SubElement(audio, 'channels')
channels.text = toUnicode(data['meta_data'].get('audio_channels'))
# Clean up the xml and return it
nfoxml = xml.dom.minidom.parseString(tostring(nfoxml))
xml_string = nfoxml.toprettyxml(indent = ' ')

View File

@@ -2,6 +2,9 @@ from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.providers.base import MultiProvider
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
import re
@@ -9,8 +12,12 @@ import traceback
log = CPLog(__name__)
class BinSearch(MultiProvider):
class BinSearch(NZBProvider):
def getTypes(self):
return [Movie, Season, Episode]
class Base(NZBProvider):
urls = {
'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s',
@@ -20,21 +27,9 @@ class BinSearch(NZBProvider):
http_time_between_calls = 4 # Seconds
def _search(self, movie, quality, results):
def _search(self, media, quality, results):
arguments = tryUrlencode({
'q': movie['library']['identifier'],
'm': 'n',
'max': 400,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',
'adv_nfo': 'on',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
})
data = self.getHTMLData(self.urls['search'] % arguments)
data = self.getHTMLData(self.urls['search'] % self.buildUrl(media, quality))
if data:
try:
@@ -90,15 +85,62 @@ class BinSearch(NZBProvider):
def download(self, url = '', nzb_id = ''):
params = {
data = {
'action': 'nzb',
nzb_id: 'on'
}
try:
return self.urlopen(url, params = params, show_error = False)
return self.urlopen(url, data = data, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'
class Movie(MovieProvider, Base):
def buildUrl(self, media, quality):
query = tryUrlencode({
'q': media['library']['identifier'], # TODO should this use library.title?
'm': 'n',
'max': 400,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',
'adv_nfo': 'on',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
})
return query
class Season(SeasonProvider, Base):
def buildUrl(self, media, quality):
query = tryUrlencode({
'q': fireEvent('library.query', media['library'], single = True),
'm': 'n',
'max': 400,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',
'adv_nfo': 'on',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
})
return query
class Episode(EpisodeProvider, Base):
def buildUrl(self, media, quality):
query = tryUrlencode({
'q': fireEvent('library.query', media['library'], single = True),
'm': 'n',
'max': 400,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',
'adv_nfo': 'on',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
})
return query

View File

@@ -1,8 +1,10 @@
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import ResultList
from couchpotato.core.providers.base import MultiProvider, ResultList
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
@@ -10,43 +12,44 @@ from urllib2 import HTTPError
from urlparse import urlparse
import time
import traceback
import urllib2
log = CPLog(__name__)
class Newznab(MultiProvider):
class Newznab(NZBProvider, RSS):
def getTypes(self):
return [Movie, Season, Episode]
class Base(NZBProvider, RSS):
urls = {
'download': 'get&id=%s',
'detail': 'details&id=%s',
'search': 'movie',
'download': 't=get&id=%s'
}
limits_reached = {}
http_time_between_calls = 1 # Seconds
def search(self, movie, quality):
def search(self, media, quality):
hosts = self.getHosts()
results = ResultList(self, movie, quality, imdb_results = True)
results = ResultList(self, media, quality, imdb_results = True)
for host in hosts:
if self.isDisabled(host):
continue
self._searchOnHost(host, movie, quality, results)
self._searchOnHost(host, media, quality, results)
return results
def _searchOnHost(self, host, movie, quality, results):
def _searchOnHost(self, host, media, quality, results):
arguments = tryUrlencode({
'imdbid': movie['library']['identifier'].replace('tt', ''),
'apikey': host['api_key'],
'extended': 1
})
url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments)
query = self.buildUrl(media, host['api_key'])
url = '%s&%s' % (self.getUrl(host['host']), query)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
@@ -87,7 +90,7 @@ class Newznab(NZBProvider, RSS):
'name_extra': name_extra,
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)),
'content': self.getTextElement(nzb, 'description'),
'score': host['extra_score'],
@@ -127,11 +130,11 @@ class Newznab(NZBProvider, RSS):
if result:
return result
def getUrl(self, host, type):
def getUrl(self, host):
if '?page=newznabapi' in host:
return cleanHost(host)[:-1] + '&t=' + type
return cleanHost(host)[:-1] + '&'
return cleanHost(host) + 'api?t=' + type
return cleanHost(host) + 'api?'
def isDisabled(self, host = None):
return not self.isEnabled(host)
@@ -159,7 +162,16 @@ class Newznab(NZBProvider, RSS):
return 'try_next'
try:
data = self.urlopen(url, show_error = False)
# Get final redirected url
log.debug('Checking %s for redirects.', url)
req = urllib2.Request(url)
req.add_header('User-Agent', self.user_agent)
res = urllib2.urlopen(req)
finalurl = res.geturl()
if finalurl != url:
log.debug('Redirect url used: %s', finalurl)
data = self.urlopen(finalurl, show_error = False)
self.limits_reached[host] = False
return data
except HTTPError, e:
@@ -174,3 +186,45 @@ class Newznab(NZBProvider, RSS):
log.error('Failed download from %s: %s', (host, traceback.format_exc()))
return 'try_next'
class Movie(MovieProvider, Base):
def buildUrl(self, media, api_key):
query = tryUrlencode({
't': 'movie',
'imdbid': media['library']['identifier'].replace('tt', ''),
'apikey': api_key,
'extended': 1
})
return query
class Season(SeasonProvider, Base):
def buildUrl(self, media, api_key):
search_title = fireEvent('library.query', media['library'], include_identifier = False, single = True)
identifier = fireEvent('library.identifier', media['library'], single = True)
query = tryUrlencode({
't': 'tvsearch',
'q': search_title,
'season': identifier['season'],
'apikey': api_key,
'extended': 1
})
return query
class Episode(EpisodeProvider, Base):
def buildUrl(self, media, api_key):
search_title = fireEvent('library.query', media['library'], include_identifier = False, single = True)
identifier = fireEvent('library.identifier', media['library'], single = True)
query = tryUrlencode({
't': 'tvsearch',
'q': search_title,
'season': identifier['season'],
'ep': identifier['episode'],
'apikey': api_key,
'extended': 1
})
return query

View File

@@ -3,14 +3,22 @@ from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.providers.base import MultiProvider
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
from couchpotato.core.providers.nzb.base import NZBProvider
from dateutil.parser import parse
import time
log = CPLog(__name__)
class NZBClub(MultiProvider):
class NZBClub(NZBProvider, RSS):
def getTypes(self):
return [Movie, Season, Episode]
class Base(NZBProvider, RSS):
urls = {
'search': 'http://www.nzbclub.com/nzbfeed.aspx?%s',
@@ -18,20 +26,9 @@ class NZBClub(NZBProvider, RSS):
http_time_between_calls = 4 #seconds
def _searchOnTitle(self, title, movie, quality, results):
def _search(self, media, quality, results):
q = '"%s %s"' % (title, movie['library']['year'])
params = tryUrlencode({
'q': q,
'ig': 1,
'rpp': 200,
'st': 5,
'sp': 1,
'ns': 1,
})
nzbs = self.getRSSData(self.urls['search'] % params)
nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media))
for nzb in nzbs:
@@ -78,3 +75,42 @@ class NZBClub(NZBProvider, RSS):
return False
return True
class Movie(MovieProvider, Base):
def buildUrl(self, media):
query = tryUrlencode({
'q': '"%s"' % fireEvent('library.query', media['library'], single = True),
'ig': 1,
'rpp': 200,
'st': 5,
'sp': 1,
'ns': 1,
})
return query
class Season(SeasonProvider, Base):
def buildUrl(self, media):
query = tryUrlencode({
'q': fireEvent('library.query', media['library'], single = True),
'ig': 1,
'rpp': 200,
'st': 5,
'sp': 1,
'ns': 1,
})
return query
class Episode(EpisodeProvider, Base):
def buildUrl(self, media):
query = tryUrlencode({
'q': fireEvent('library.query', media['library'], single = True),
'ig': 1,
'rpp': 200,
'st': 5,
'sp': 1,
'ns': 1,
})
return query

View File

@@ -3,6 +3,9 @@ from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.providers.base import MultiProvider
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
@@ -11,8 +14,13 @@ import time
log = CPLog(__name__)
class NzbIndex(MultiProvider):
class NzbIndex(NZBProvider, RSS):
def getTypes(self):
return [Movie, Season, Episode]
class Base(NZBProvider, RSS):
urls = {
'download': 'https://www.nzbindex.com/download/',
@@ -21,28 +29,44 @@ class NzbIndex(NZBProvider, RSS):
http_time_between_calls = 1 # Seconds
def _searchOnTitle(self, title, movie, quality, results):
def _search(self, media, quality, results):
q = '"%s %s" | "%s (%s)"' % (title, movie['library']['year'], title, movie['library']['year'])
arguments = tryUrlencode({
'q': q,
'age': Env.setting('retention', 'nzb'),
'sort': 'agedesc',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
'rating': 1,
'max': 250,
'more': 1,
'complete': 1,
})
nzbs = self.getRSSData(self.urls['search'] % arguments)
nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media, quality))
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
title = self.getTextElement(nzb, "title")
match = fireEvent('matcher.parse', title, parser='usenet', single = True)
if not match.chains:
log.info('Unable to parse release with title "%s"', title)
continue
# TODO should we consider other lower-weight chains here?
info = fireEvent('matcher.flatten_info', match.chains[0].info, single = True)
release_name = fireEvent('matcher.construct_from_raw', info.get('release_name'), single = True)
file_name = info.get('detail', {}).get('file_name')
file_name = file_name[0] if file_name else None
title = release_name or file_name
# Strip extension from parsed title (if one exists)
ext_pos = title.rfind('.')
# Assume extension if smaller than 4 characters
# TODO this should probably be done a better way
if len(title[ext_pos + 1:]) <= 4:
title = title[:ext_pos]
if not title:
log.info('Unable to find release name from match')
continue
try:
description = self.getTextElement(nzb, "description")
except:
@@ -57,7 +81,7 @@ class NzbIndex(NZBProvider, RSS):
results.append({
'id': nzbindex_id,
'name': self.getTextElement(nzb, "title"),
'name': title,
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
@@ -77,3 +101,53 @@ class NzbIndex(NZBProvider, RSS):
except:
pass
class Movie(MovieProvider, Base):
def buildUrl(self, media, quality):
title = fireEvent('library.query', media['library'], include_year = False, single = True)
year = media['library']['year']
query = tryUrlencode({
'q': '"%s %s" | "%s (%s)"' % (title, year, title, year),
'age': Env.setting('retention', 'nzb'),
'sort': 'agedesc',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
'rating': 1,
'max': 250,
'more': 1,
'complete': 1,
})
return query
class Season(SeasonProvider, Base):
def buildUrl(self, media, quality):
query = tryUrlencode({
'q': fireEvent('library.query', media['library'], single = True),
'age': Env.setting('retention', 'nzb'),
'sort': 'agedesc',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
'rating': 1,
'max': 250,
'more': 1,
'complete': 1,
})
return query
class Episode(EpisodeProvider, Base):
def buildUrl(self, media, quality):
query = tryUrlencode({
'q': fireEvent('library.query', media['library'], single = True),
'age': Env.setting('retention', 'nzb'),
'sort': 'agedesc',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
'rating': 1,
'max': 250,
'more': 1,
'complete': 1,
})
return query

View File

@@ -2,12 +2,20 @@ from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.providers.base import MultiProvider
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class BiTHDTV(TorrentProvider):
class BiTHDTV(MultiProvider):
def getTypes(self):
return [Movie, Season, Episode]
class Base(TorrentProvider):
urls = {
'test' : 'http://www.bit-hdtv.com/',
@@ -18,20 +26,15 @@ class BiTHDTV(TorrentProvider):
}
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
cat_id_movies = 7
http_time_between_calls = 1 #seconds
def _searchOnTitle(self, title, movie, quality, results):
def _search(self, media, quality, results):
arguments = tryUrlencode({
'search': '%s %s' % (title.replace(':', ''), movie['library']['year']),
'cat': self.cat_id_movies
})
query = self.buildUrl(media)
url = "%s&%s" % (self.urls['search'], arguments)
url = "%s&%s" % (self.urls['search'], query)
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
# Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
@@ -68,10 +71,10 @@ class BiTHDTV(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
})
}
def getMoreInfo(self, item):
full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
@@ -86,3 +89,31 @@ class BiTHDTV(TorrentProvider):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess
# Only searches BiT-HDTV's main category, subcategory and resolution search filters appear to be broken
class Movie(MovieProvider, Base):
def buildUrl(self, media):
query = tryUrlencode({
'search': fireEvent('library.query', media['library'], single = True),
'cat': 7 # Movie cat
})
return query
class Season(SeasonProvider, Base):
def buildUrl(self, media):
query = tryUrlencode({
'search': fireEvent('library.query', media['library'], single = True),
'cat': 12 # Season cat
})
return query
class Episode(EpisodeProvider, Base):
def buildUrl(self, media):
query = tryUrlencode({
'search': fireEvent('library.query', media['library'], single = True),
'cat': 10 # Episode cat
})
return query

View File

@@ -2,39 +2,46 @@ from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.providers.base import MultiProvider
from couchpotato.core.providers.info.base import EpisodeProvider, SeasonProvider, MovieProvider
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class Bitsoup(TorrentProvider):
class Bitsoup(MultiProvider):
def getTypes(self):
return [Movie, Season, Episode]
class Base(TorrentProvider):
urls = {
'test': 'https://www.bitsoup.me/',
'login' : 'https://www.bitsoup.me/takelogin.php',
'login_check': 'https://www.bitsoup.me/my.php',
'search': 'https://www.bitsoup.me/browse.php?',
'search': 'https://www.bitsoup.me/browse.php?%s',
'baseurl': 'https://www.bitsoup.me/%s',
}
http_time_between_calls = 1 #seconds
def _searchOnTitle(self, title, movie, quality, results):
def _search(self, media, quality, results):
q = '"%s" %s' % (simplifyString(title), movie['library']['year'])
arguments = tryUrlencode({
'search': q,
})
url = "%s&%s" % (self.urls['search'], arguments)
data = self.getHTMLData(url, opener = self.login_opener)
url = self.urls['search'] % self.buildUrl(media, quality)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
html = BeautifulSoup(data, "html.parser")
try:
result_table = html.find('table', attrs = {'class': 'koptekst'})
if not result_table or 'nothing found!' in data.lower():
return
entries = result_table.find_all('tr')
for result in entries[1:]:
@@ -82,3 +89,57 @@ class Bitsoup(TorrentProvider):
loginCheckSuccess = loginSuccess
# Bitsoup Categories
# Movies
# Movies/3D - 17 (unused)
# Movies/DVD-R - 20
# Movies/Packs - 27 (unused)
# Movies/XviD - 19
# The site doesn't have HD Movie caterogies, they bundle HD under x264
# x264 - 41
# TV
# TV-HDx264 - 42
# TV-Packs - 45
# TV-SDx264 - 49
# TV-XVID - 7 (unused)
class Movie(MovieProvider, Base):
cat_ids = [
([41], ['720p', '1080p']),
([20], ['dvdr']),
([19], ['brrip', 'dvdrip']),
]
cat_backup_id = 0
def buildUrl(self, media, quality):
query = tryUrlencode({
'search': '"%s" %s' % (
fireEvent('library.query', media['library'], include_year = False, single = True),
media['library']['year']
),
'cat': self.getCatId(quality['identifier'])[0],
})
return query
class Season(SeasonProvider, Base):
# For season bundles, bitsoup currently only has one category
def buildUrl(self, media, quality):
query = tryUrlencode({
'search': fireEvent('library.query', media['library'], single = True),
'cat': 45 # TV-Packs Category
})
return query
class Episode(EpisodeProvider, Base):
cat_ids = [
([42], ['hdtv_720p', 'webdl_720p', 'webdl_1080p', 'bdrip_1080p', 'bdrip_720p', 'brrip_1080p', 'brrip_720p']),
([49], ['hdtv_sd', 'webdl_480p'])
]
cat_backup_id = 0
def buildUrl(self, media, quality):
query = tryUrlencode({
'search': fireEvent('library.query', media['library'], single = True),
'cat': self.getCatId(quality['identifier'])[0],
})
return query

View File

@@ -1,5 +1,4 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
@@ -23,7 +22,7 @@ class HDBits(TorrentProvider):
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % movie['library']['identifier'], opener = self.login_opener)
data = self.getJsonData(self.urls['search'] % movie['library']['identifier'])
if data:
try:
@@ -42,15 +41,17 @@ class HDBits(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
data = self.getHTMLData('https://hdbits.org/login')
data = self.getHTMLData('https://hdbits.org/login', cache_timeout = 0)
bs = BeautifulSoup(data)
secret = bs.find('input', attrs = {'name': 'lol'})['value']
return tryUrlencode({
return {
'uname': self.conf('username'),
'password': self.conf('password'),
'returnto': '/',
'lol': secret
})
}
def loginSuccess(self, output):
return '/logout.php' in output.lower()

View File

@@ -42,7 +42,7 @@ class ILoveTorrents(TorrentProvider):
search_url = self.urls['search'] % (movieTitle, page, cats[0])
page += 1
data = self.getHTMLData(search_url, opener = self.login_opener)
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
@@ -96,11 +96,11 @@ class ILoveTorrents(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'Welcome to ILT',
})
}
def getMoreInfo(self, item):
cache_key = 'ilt.%s' % item['id']
@@ -109,7 +109,7 @@ class ILoveTorrents(TorrentProvider):
if not description:
try:
full_description = self.getHTMLData(item['detail_url'], opener = self.login_opener)
full_description = self.getHTMLData(item['detail_url'])
html = BeautifulSoup(full_description)
nfo_pre = html.find('td', attrs = {'class':'main'}).findAll('table')[1]
description = toUnicode(nfo_pre.text) if nfo_pre else ''

Some files were not shown because too many files have changed in this diff Show More