Compare commits

...

287 Commits

Author SHA1 Message Date
Ruud
f79fcda27f Small one up 2013-11-17 21:22:24 +01:00
Ruud
cdbcad2238 Merge branch 'refs/heads/develop' into desktop 2013-11-17 21:20:30 +01:00
Ruud
da760db340 Merge branch 'refs/heads/mano3m-develop_fixes' into develop 2013-11-17 21:18:37 +01:00
Ruud
4242a5cedb Merge branch 'develop_fixes' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_fixes 2013-11-17 21:16:35 +01:00
mano3m
8c41046836 more fixes 2013-11-17 21:10:07 +01:00
Ruud
5d913e87c3 One up! 2013-11-17 20:20:18 +01:00
Ruud
16f02bda27 Merge branch 'refs/heads/develop' into desktop 2013-11-17 20:03:22 +01:00
mano3m
c5e6ce0e48 Several sp fixes 2013-11-17 18:26:01 +01:00
Ruud
3ad527eb62 Allow 1080p in webrip quality 2013-11-17 00:24:09 +01:00
Ruud
af2a6bf031 Force ETA data not to be to far in the future 2013-11-16 23:58:58 +01:00
Ruud
731419b61f Better error logging for syno downloader. close #2464 2013-11-16 23:33:23 +01:00
Ruud
0fafd83d76 Do some scoring with scene / nuked. fix #2009 2013-11-16 23:26:46 +01:00
Ruud
003b78a66e Scene validation 2013-11-16 23:24:05 +01:00
Ruud
4ade857f01 Better string regex between brackets 2013-11-16 23:23:10 +01:00
Ruud
658596659f Deluge wrong sp wrap. fix #2463 2013-11-16 17:23:51 +01:00
Ruud
59e6d68416 Use correct config name for bithdtv 2013-11-16 14:39:51 +01:00
Ruud
db4f7a216a SP function wrapping whole variables 2013-11-16 13:32:00 +01:00
Ruud
3f8b97feb9 Merge branch 'refs/heads/mano3m-develop_clean_path' into develop 2013-11-16 12:57:35 +01:00
Ruud
a27673eaa4 Merge branch 'develop_clean_path' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_clean_path 2013-11-16 12:57:21 +01:00
Ruud
8e3291a1b0 bithdtv, Import correct functions 2013-11-16 12:49:39 +01:00
Ruud
89c04902e8 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-16 12:47:57 +01:00
Ruud
e29b100374 Don't try to unicode None object 2013-11-16 12:47:18 +01:00
Shatil Rafiullah
941d4414ce Changed to using getattr() so films lacking sets/collections are also handled. 2013-11-16 12:43:07 +01:00
Shatil Rafiullah
dc830324ae Added XBMC collection (set) categorization capability. 2013-11-16 12:43:02 +01:00
Ruud
3f37fc1e11 Move proxy getter to global torrent provider 2013-11-16 12:39:43 +01:00
Ruud
3442129610 Merge branch 'refs/heads/cptjhmiller-develop' into develop 2013-11-16 12:16:42 +01:00
Ruud
e9d29f10c1 Cleanup KAT import 2013-11-16 12:16:37 +01:00
Joel Kåberg
8996dd34c2 fix ident in bithdtv 2013-11-16 12:01:29 +01:00
Ruud
e2c5be0fcd Merge branch 'develop' of git://github.com/cptjhmiller/CouchPotatoServer into cptjhmiller-develop 2013-11-16 11:57:27 +01:00
Ruud
3d42c55560 Merge branch 'refs/heads/techmunk-develop' into develop 2013-11-16 11:56:35 +01:00
Ruud
9d287f140b Reorder deluge import 2013-11-16 11:56:29 +01:00
Jamie
5a8f28764d Fix to help find working proxy 2013-11-16 02:30:31 +00:00
Joel Kåberg
a2c5074d66 fixed bithdtv provider 2013-11-16 02:58:29 +01:00
Joel Kåberg
6acc125d4f bithdtv provider
thanks to @lansinghd ,
https://github.com/RuudBurger/CouchPotatoServer/pull/2460
2013-11-16 02:57:06 +01:00
Techmunk
7b9ebc2f34 Fixed issue https://github.com/RuudBurger/CouchPotatoServer/issues/2440, by returning a 'True' status when an existing torrent in deluge is added from CP. 2013-11-15 21:25:19 +10:00
Ruud
4e0d6ec980 Merge branch 'refs/heads/clinton-hall-develop' into develop 2013-11-14 22:36:17 +01:00
Ruud
c1944c987d Add some more double char replacements 2013-11-14 22:35:13 +01:00
Ruud
cdb889a985 Merge branch 'develop' of git://github.com/clinton-hall/CouchPotatoServer into clinton-hall-develop 2013-11-14 21:56:31 +01:00
Jamie
f6281c6dcc Update __init__.py 2013-11-14 14:51:27 +00:00
Jamie
c832a9e2b2 Added proxy support 2013-11-14 14:50:50 +00:00
Ruud
0c4851e436 Escape filename before using it in a regex. fixes #2430 2013-11-13 19:32:59 +01:00
Ruud
ce1b205993 Allow 720p tag for screener 2013-11-13 19:22:19 +01:00
Clinton Hall
b771aa303f replace multiple separators. fixes #2448 2013-11-13 21:41:29 +10:30
Ruud Burger
81178b4c8b Merge pull request #2438 from fuzeman/feature/dev_rtorrent
rTorrent: Delete Torrent Directories
2013-11-10 08:28:29 -08:00
Dean Gardiner
0317681597 Added directory removal to the rtorrent downloader 2013-11-11 03:21:00 +13:00
Ruud
ddba0e318f Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-11-09 23:42:15 +01:00
Ruud
3ef9591abd Cleanup import 2013-11-09 23:42:08 +01:00
Joel Kåberg
0d3c0c4077 use delete_files 2013-11-09 22:42:09 +01:00
Ruud
22b32364b6 Missing ) 2013-11-09 21:41:03 +01:00
Ruud Burger
db8fd20d67 Merge pull request #2436 from jkaberg/develop
rTorrent: remove only files, not folder
2013-11-09 09:46:15 -08:00
Joel Kåberg
3c061095e9 remove only files, not folder
(or in worst case the download clients root folder and anything in it)
2013-11-09 18:33:21 +01:00
Ruud
05853bca89 Don't put plot over trailer z-index 2013-11-06 22:16:02 +01:00
Ruud
aa489bb709 Force name as string 2013-11-05 23:42:44 +01:00
Ruud
0b70465578 Add Flickcharts to userscript 2013-11-05 23:35:47 +01:00
Ruud
5c64ba3c9e Add box office top10 to IMDB automation. closes #2427 2013-11-05 22:45:25 +01:00
Ruud
e119020016 Ignore releases without any info. 2013-11-05 22:16:26 +01:00
Ruud
9b92a3d396 Make sure the ignored files get used. fix #2425 2013-11-05 21:24:47 +01:00
Ruud
c73dc10aeb Add a bit of padding to plot 2013-11-04 22:47:20 +01:00
Ruud
c5ee0a576e Merge branch 'refs/heads/jerbob92-suggestdescription' into develop 2013-11-04 22:26:14 +01:00
Ruud
3e2ede585a Animate plot to show more text 2013-11-04 22:26:08 +01:00
Ruud
ba3dd263ac Merge branch 'suggestdescription' of git://github.com/jerbob92/CouchPotatoServer into jerbob92-suggestdescription
# Please enter a commit message to explain why this merge is necessary,
# especially if it merges an updated upstream into a topic branch.
#
# Lines starting with '#' will be ignored, and an empty message aborts
# the commit.
2013-11-03 21:59:39 +01:00
Ruud
7c955ecc80 XMPP notification support
thanks @wernight
2013-11-03 17:17:59 +01:00
Ruud Burger
48193b38c5 Merge pull request #2415 from mano3m/develop_fix_scanner
Cleanup file size code in scanner
2013-11-03 07:44:30 -08:00
Ruud Burger
2f5a233e63 Merge pull request #2416 from mano3m/develop_remote
Default movie_folder to from folder
2013-11-02 11:56:55 -07:00
mano3m
7b86fe5587 Default movie_folder to from folder
In case remote downloaders return a path that does not exist locally,
the movie_folder and files are updated to the from folder. Fixes #2412,
#1762, #1667, #1047
2013-11-02 11:20:34 +01:00
mano3m
5396343940 Cleanup file size code in scanner 2013-11-02 10:43:22 +01:00
mano3m
fa1baa73e8 Introduce path cleaning
A new function sp is introduced. It does the same as ss but also cleans
the path.
2013-11-02 10:15:50 +01:00
Ruud
9fa62de6dd Wrong variable logged in email notification 2013-10-30 23:09:45 +01:00
Adrien RAFFIN
7c5748ac87 Add support for starttls and allow modification of SMTP server port 2013-10-30 23:06:49 +01:00
Ruud
47de84259d Cleanup searcher PR 2013-10-30 22:51:26 +01:00
Ruud
f2b483b16e Merge branch 'refs/heads/fuzeman-dev_searcher' into develop 2013-10-30 22:09:10 +01:00
Ruud
98efe89833 Merge branch 'dev_searcher' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-dev_searcher 2013-10-30 22:08:59 +01:00
Ruud
f8872e2803 Use getter to prevent keyerror. fix #2410 2013-10-30 22:04:51 +01:00
Ruud
a1fd581bca Add HD quality tags 2013-10-29 21:31:02 +01:00
Ruud
6a4bc1eb08 Don't add tags twice for dvd-r quality 2013-10-29 21:16:32 +01:00
Ruud
94d1f99315 Add ignored group 2013-10-29 21:14:53 +01:00
Ruud
7c51bdbdaf Allow par3 files in binsearch validation 2013-10-27 20:21:02 +01:00
Ruud
d275dfd8cc Add br2dvd as DVD alternative. fix #1604 2013-10-27 20:16:03 +01:00
Ruud
82b879fbb4 Add proper detail url for OMGWTF 2013-10-27 19:50:26 +01:00
Ruud
cc32bd7050 OMGWTF https url 2013-10-27 19:22:58 +01:00
Ruud
4f4ba470e0 Prevent files keyerror for release_download files. fix #2392 2013-10-26 15:26:19 +02:00
Ruud
ce47429701 Only show n/a if undefined 2013-10-26 15:12:54 +02:00
Ruud
550051b3f6 Use order for quality allow calculation. fix #2396 2013-10-26 15:09:30 +02:00
Ruud
b149528406 Cleanup older releases calling the wrong function 2013-10-22 14:11:13 +02:00
Ruud
22c257618d Remove unused movie.search function 2013-10-21 00:00:13 +02:00
Ruud
e1c3c334d9 Use new provider named events for search. fix #2379 2013-10-20 23:56:31 +02:00
Ruud
c5e7159952 Don't add identifier score double when scoring 2013-10-20 23:40:16 +02:00
Ruud
fe8946e3b5 Cache qualities.all 2013-10-20 23:29:36 +02:00
Ruud
c354d3c6d5 Guess qualities based on score. fix #2373 2013-10-20 22:47:18 +02:00
Ruud
53cd907db1 Code cleanup 2013-10-20 17:43:30 +02:00
Ruud
605f340be5 Merge branch 'refs/heads/mano3m-develop_torrent_files' into develop 2013-10-20 17:39:36 +02:00
Ruud
e014ce7a47 Merge branch 'develop_torrent_files' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_torrent_files 2013-10-20 17:39:13 +02:00
mano3m
579c1fa53c Fix categories error 2013-10-20 13:24:01 +02:00
mano3m
4bfb5c6397 Make sure Transmission folders are 'normpath'-ed 2013-10-20 02:41:48 +02:00
mano3m
639d635913 Implement better folder checking
Fixes #2360, thanks @clinton_hall
2013-10-20 02:41:48 +02:00
mano3m
37e5f2c48b Fix SabNZBd folder bug
If only one file is extracted the storage key contains the extracted
file instead of the folder. This leads to CPS skipping the renamer. This
check fixes that.
2013-10-20 01:50:06 +02:00
mano3m
583bb1d0d9 Fix debug message 2013-10-19 00:14:04 +02:00
Jeroen Bobbeldijk
d0cffb5863 Fix up tabs 2013-10-18 23:35:20 +02:00
Jeroen Bobbeldijk
548686ebfe Added pilot to suggestion 2013-10-18 23:32:59 +02:00
Ruud
0635c571e4 Remove Notifo 2013-10-18 17:57:44 +02:00
Ruud
4764925ae6 Only skip data dir paths when updating source 2013-10-18 17:13:06 +02:00
mano3m
80e9831c03 Make uTorrent language independent
Fixes #2341
2013-10-18 00:48:42 +02:00
Dean Gardiner
f7e1fa1406 'release.download' renamed to 'release.manual_download', Moved 'searcher.download' and 'searcher.try_download_result' to 'release.*'. 2013-10-17 23:27:24 +13:00
Dean Gardiner
dc73e5c58f Added back migration code in 'searcher.download' 2013-10-17 22:53:44 +13:00
mano3m
526d383929 Fix for release.update
The done release has no release info. This is fixed by doing it in the
same way as the interface.
2013-10-16 22:17:22 +02:00
mano3m
89f7cfb896 tagging fixes 2013-10-16 22:17:21 +02:00
mano3m
6abc4cc549 Upgrade tagging
Havent tested this yet, but it should work with both one filed torrents
and folders. Everything mixed, let's go crazy!!
2013-10-16 22:17:21 +02:00
mano3m
6aa7cfc0fe Wrong use of "is" 2013-10-16 22:17:20 +02:00
mano3m
345d0b8211 Add status to renamer.scan api call
This allows for scripts to send the seeding status with the scan
2013-10-16 22:17:20 +02:00
mano3m
eb17afc368 Fixed bug where it didnt do anything... 2013-10-16 22:17:19 +02:00
mano3m
c12b189f5f Fixed variables in scanner 2013-10-16 22:17:19 +02:00
mano3m
5edc745727 Typo 2013-10-16 22:17:18 +02:00
mano3m
bc877df513 Cleanup variable naming
Use release_download variable for all item/status/download_info
variables (which are by now all the same thing)
2013-10-16 22:17:18 +02:00
mano3m
57cb22c9aa Fix type of torrent_files 2013-10-16 22:17:18 +02:00
mano3m
719aca88b7 Clean-up read only files uTorrent 2013-10-16 22:17:17 +02:00
rbfblk
b1e66478f0 Fixing an issue which strips all read bits from utorrent downloaded files on Linux 2013-10-16 22:17:17 +02:00
Dean Gardiner
25f0462c15 Added files for rTorrent 2013-10-16 22:17:16 +02:00
mano3m
caded0694c include files for Transmission 2013-10-16 22:17:16 +02:00
mano3m
39190495be Correct path for one file torrent 2013-10-16 22:17:15 +02:00
Techmunk
1cc998bc95 Include files for renamer in Deluge downloader. 2013-10-16 22:17:15 +02:00
mano3m
54c7aad57a Include files from downloader in renamer 2013-10-16 22:17:14 +02:00
Dean Gardiner
1c8fed5457 Minor cleanup to Searcher and Matcher
Conflicts:

	couchpotato/core/plugins/matcher/main.py
2013-10-16 15:46:46 +13:00
Dean Gardiner
8e51513ee0 Moved 'searcher.create_releases' from Searcher to Release.
Conflicts:

	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/show/searcher/main.py
2013-10-16 15:46:24 +13:00
Dean Gardiner
1788440a5c Cleaned up usage of helper functions
Conflicts:

	couchpotato/core/media/show/searcher/main.py
	couchpotato/core/plugins/matcher/main.py
2013-10-16 15:40:25 +13:00
Dean Gardiner
f467e4d75a Fix to Provider getCatId when returning the cet_backup_id 2013-10-16 15:38:41 +13:00
Dean Gardiner
1e3f8410c0 Added 'searcher.get_media_searcher_id' event, Cleaned up some 'status.get' calls, Renamed some references of 'nzb' to 'rel'.
Conflicts:

	couchpotato/core/media/_base/searcher/main.py
2013-10-16 15:37:52 +13:00
Dean Gardiner
cbb7b96391 'searcher.correct_release' can now return a float indicating the weight/accuracy which is used to scale the score. Fix to IPT _buildUrl method.
Conflicts:

	couchpotato/core/providers/torrent/iptorrents/main.py
2013-10-16 15:34:08 +13:00
Dean Gardiner
5f24338bd2 Renamed 'movie' -> 'media' in 'searcher.download'
Conflicts:

	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/plugins/release/main.py
2013-10-16 15:32:25 +13:00
Dean Gardiner
56f049cd7d Created 'searcher.try_download_result' event from section in MovieSearcher.single 2013-10-16 15:04:10 +13:00
Ruud Burger
a09e8b63ae Merge pull request #2350 from einartryggvi/develop
Make ubuntu init script executable so it can be symlinked to /etc/init.d
2013-10-14 13:29:53 -07:00
Einar Tryggvi Leifsson
400643cbcd Make ubuntu init script executable so it can be symlinked to /etc/init.d 2013-10-14 20:27:21 +00:00
Ruud
ce68a37441 Zero fill imdb ids found 2013-10-14 22:24:23 +02:00
Ruud
1377b6315c Allow imdb id with int of 4-7 2013-10-14 22:05:32 +02:00
Ruud
0e18dcb8a1 Use success when adding movies 2013-10-14 21:13:31 +02:00
Ruud
7277ef3bd8 Remove SceneHD as we can't login with captcha. fix #2146 2013-10-14 21:07:37 +02:00
Ruud
5bf3b929a2 Detect Windows 8 tablets as touchdevice also. 2013-10-14 00:01:38 +02:00
Ruud
66967f8326 Whatever! #2283
@clinton ;)
2013-10-13 22:37:15 +02:00
Ruud
e9abf982fe Flixter decode json before parsing. closes #2305 2013-10-13 22:21:32 +02:00
Ruud
3535f44db9 No need to use disable check in automation 2013-10-13 22:12:27 +02:00
Ruud
c772758683 Add category to renamer replacements. fix #2283 2013-10-13 22:12:15 +02:00
Ruud
2fc097c0e8 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-10-13 21:47:21 +02:00
Ruud
c9d7418899 Force unicode name for newznab. fix #2347 2013-10-13 21:46:16 +02:00
Ruud Burger
1317a4c6b7 Merge pull request #2346 from mano3m/develop_fix_dashboard
Move and fix cleanreleases
2013-10-13 12:17:49 -07:00
mano3m
4b0a5bdd9b Move and fix cleanreleases 2013-10-13 16:53:45 +02:00
Ruud
2b57bdcd03 Revert "Make sure to untag downloading dir if it's completed. fix #2341"
This reverts commit 65f039e9ed.
2013-10-13 15:17:39 +02:00
Ruud
65f039e9ed Make sure to untag downloading dir if it's completed. fix #2341 2013-10-13 14:25:50 +02:00
Ruud
3be6389fbf Use json in flixter 2013-10-13 14:16:59 +02:00
Ruud
9bf01e3a0b Plex endless loop when no clients connected 2013-10-13 14:01:18 +02:00
Ruud
1305327564 Merge branch 'refs/heads/fuzeman-feature/dev_plex' into develop 2013-10-13 13:56:38 +02:00
Ruud
97b6cf013f Merge branch 'feature/dev_plex' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-feature/dev_plex 2013-10-13 13:56:26 +02:00
Ruud
e1a6b813a5 Merge branch 'refs/heads/mano3m-develop_fix_dashboard' into develop 2013-10-13 13:45:45 +02:00
Ruud
b0e30921ae Merge branch 'develop_fix_dashboard' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_fix_dashboard 2013-10-13 13:45:27 +02:00
Ruud
f4c4f013da Cleanup searcher and release checking 2013-10-13 13:44:26 +02:00
Ruud
43ef982d95 Merge branch 'refs/heads/fuzeman-dev_searcher' into develop 2013-10-13 12:57:43 +02:00
Ruud
d930bc4afd Merge branch 'dev_searcher' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-dev_searcher 2013-10-13 12:57:22 +02:00
Kevin Carter
6dbdd4c0be Load lsb init-functions so that status_of_proc is available 2013-10-13 12:50:28 +02:00
Ruud
93bd75acc8 Make iframe https 2013-10-12 23:12:45 +02:00
Dean Gardiner
bdeace8a68 New clients added that aren't in the current client cache now trigger a reload if the list isn't "stale" yet. 2013-10-13 03:00:52 +13:00
Dean Gardiner
efdf70acb2 When notifications fail to send the client list is automatically reloaded in case the client address has changed. 2013-10-13 02:52:55 +13:00
Dean Gardiner
d31ca2677e Cleaned up Plex notifications plugin. 2013-10-13 02:26:35 +13:00
mano3m
3a117b6077 Make sure movies are removed from dashboard 2013-10-12 13:42:48 +02:00
mano3m
6d2889f88d Fix releases missing from Snatched&Available
Fixes #1958
2013-10-12 13:42:30 +02:00
Ruud Burger
213b03589a Merge pull request #2339 from cicavey/develop
Changed MIME type of JSONP requests to text/javascript
2013-10-12 04:26:08 -07:00
cicavey
79fd5fe332 Changed MIME type of JSONP requests to text/javascript 2013-10-12 07:11:37 -04:00
Ruud Burger
25a5b72d26 Merge pull request #2331 from fuzeman/feature/dev_rtorrent
rTorrent Downloader - fixes to scgi on Python 2.6
2013-10-12 03:07:31 -07:00
Dean Gardiner
8970e7fbba Fix to Searcher.createReleases (media_id doesn't exist yet) 2013-10-12 15:24:06 +13:00
Dean Gardiner
e96724beaf Fix to MovieSearcher.single to set default media type as types aren't in develop yet. 2013-10-12 15:11:46 +13:00
Dean Gardiner
73d7d01ae4 Fixed ResultList.append call to 'movie.searcher.correct_movie' instead of 'searcher.correct_release' 2013-10-12 15:10:26 +13:00
Dean Gardiner
34c69786de Merge base/movie searcher changes from branch 'tv' into develop 2013-10-12 14:25:00 +13:00
Dean Gardiner
8587b9b780 Updated rTorrent library - MethodError exceptions when calling group methods should be fixed. 2013-10-11 13:33:20 +13:00
Dean Gardiner
b9f88f431b Updated rTorrent library and fixed call to MethodError.message (should be MethodError.msg) in _update_provider_group 2013-10-11 04:12:36 +13:00
Dean Gardiner
df90ee0a55 Updated rtorrent library - scgi fix for Python 2.6 2013-10-10 15:58:35 +13:00
Ruud Burger
32a4075979 Merge pull request #2326 from fuzeman/feature/dev_rtorrent
rTorrent Downloader - scgi support
2013-10-09 08:08:04 -07:00
Ruud
99606e22d6 Make YIFY a imdbid search. fix #2323 2013-10-09 16:45:45 +02:00
Ruud
5fd0253089 Import Media, not Movie. fix #2320 2013-10-09 16:37:16 +02:00
Ruud
a46241bb9f Better year name guessing. #2323 2013-10-09 16:36:13 +02:00
Dean Gardiner
a8087c8ce9 Updated rTorrent downloader options 2013-10-09 23:07:14 +13:00
Dean Gardiner
0a90ad5db7 Updated rtorrent library to current master - scgi:// support 2013-10-09 22:24:22 +13:00
Ruud
75bda46f64 Userscript styling fixes 2013-10-08 21:53:03 +02:00
Ruud
a0d2a64e57 Userscript didn't load properly 2013-10-08 21:51:34 +02:00
Ruud
d1c3f0c241 Use Media for all Movie db actions 2013-10-08 09:57:36 +02:00
Ruud
107606ce65 Add tv branch column aliases 2013-10-08 09:57:17 +02:00
Ruud
32646d0608 Use movie instaid of media model 2013-10-08 09:22:05 +02:00
Ruud
eabd2b6c41 Rename mediaplugin 2013-10-08 09:21:53 +02:00
Ruud
b8ac093182 Remove refresh from movie media
Conflicts:
	couchpotato/core/media/movie/_base/main.py
2013-10-08 09:15:41 +02:00
Ruud
bac3055726 Move media refresh to media plugin 2013-10-08 08:46:32 +02:00
Ruud
955814397a Revert "TorrentBytes login url change. fix #2317"
This reverts commit 95d0dacd28.
2013-10-07 23:38:53 +02:00
Ruud
10fe175ff5 Move suggestions to movie folder 2013-10-07 22:52:05 +02:00
Ruud
bca4a2e241 Move search item to movie folder 2013-10-07 22:51:23 +02:00
Ruud
3925d4c215 Make search work for multiple media types 2013-10-07 21:23:09 +02:00
Ruud
8ca5c62575 YIFY use IMDB id for search. fix #2313 2013-10-07 15:52:25 +02:00
Ruud
95d0dacd28 TorrentBytes login url change. fix #2317 2013-10-07 09:20:01 +02:00
Ruud
b6f850dc27 in_ needs list.. 2013-10-03 08:30:13 +02:00
Ruud
38ce63795c Check snatched with single query 2013-10-03 08:26:02 +02:00
Ruud
bbf42da875 ILoveTorrents cleanup 2013-09-30 22:18:36 +02:00
salfab
8df0ecc223 disabled by default 2013-09-30 21:55:33 +02:00
salfab
c37bf12c8a improve resilience to retrieve description in get_more_info 2013-09-30 21:55:29 +02:00
salfab
83051b2576 support getting more info. 2013-09-30 21:55:24 +02:00
salfab
75360f734c use a proper name, instead of the link 2013-09-30 21:55:20 +02:00
salfab
87754047fa torrents are found and appended to the results argument 2013-09-30 21:55:16 +02:00
salfab
f121db059e add new provider for ILT. 2013-09-30 21:55:08 +02:00
Ruud
c9e693287c Merge branch 'refs/heads/mano3m-develop_release' into develop 2013-09-30 20:52:51 +02:00
Ruud
0876d1ff8e Rename release.update to update_status 2013-09-30 20:52:04 +02:00
Ruud
6bbcc5af77 Merge branch 'develop_release' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_release 2013-09-30 20:31:50 +02:00
Ruud Burger
6a9f6a6fc8 Merge pull request #2099 from mano3m/develop_folder
Remove all empty folders after rename
2013-09-30 11:24:21 -07:00
Ruud Burger
1da3546f2d Merge pull request #2270 from fuzeman/feature/dev_rtorrent
rTorrent Downloader fixes
2013-09-30 11:23:00 -07:00
Ruud Burger
d233425a77 Merge pull request #2272 from fuzeman/feature/dev_plex
Fixed Plex notifications on latest PHT
2013-09-30 11:22:26 -07:00
Ruud
8883d505ba Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-09-30 20:12:31 +02:00
Ruud Burger
c51d806840 Merge pull request #2282 from mano3m/develop_encryptedasfailed
Consider encrypted as failed fix #2260
2013-09-30 11:12:08 -07:00
Ruud
13a0c4607d Merge branch 'refs/heads/jkaberg-develop' into develop 2013-09-30 20:11:51 +02:00
mano3m
fd8e50b533 [SabNZBd] Consider encrypted as failed 2013-09-30 20:05:34 +02:00
Ruud Burger
682216dcf4 Merge pull request #2281 from mano3m/develop_seedfix
Fix seeding status check #2278
2013-09-30 10:44:43 -07:00
mano3m
6bda5f5b03 Don't use movie done status to check seeding
Fixes #2278
2013-09-30 19:34:12 +02:00
mano3m
6174f121c8 fix log message 2013-09-30 19:27:11 +02:00
mano3m
89daa836e7 Remove all empty folders
Quite often there is a subfolder in the movie folder after extraction.
This folder is deleted but the actual movie folder remains behind. This
update fixes that in both cases: move_folder is known, or we work in the
'from' folder.
2013-09-30 19:24:46 +02:00
mano3m
7c5616cc79 fix colour order 2013-09-30 19:16:19 +02:00
mano3m
27fdbff619 Set missing to ignored after 1 week 2013-09-30 19:16:13 +02:00
mano3m
516447a104 Remove movie_dict 2013-09-30 19:16:08 +02:00
mano3m
0c6c172d6a Update movie quality status colour and text
It isnt perfect this way. I think we need to add a sepperate function to
do this and call that from both when CPS is loading the page and when it
updates a release (e.g. just rebuild the icons)
2013-09-30 19:16:01 +02:00
mano3m
d11f9d26c0 Add missing status 2013-09-30 19:15:51 +02:00
mano3m
a2cb0ec8ad frontend release.update 2013-09-30 19:15:44 +02:00
mano3m
1bddadf3a4 clean-up searcher 2013-09-30 19:15:30 +02:00
mano3m
f0f843f746 Add release.update event
Proof of concept commit.

It updates the database and calls movie.update.id to refresh the entire movie in the frontend. It would be better to crease a static js file in the release folder and add release functionality there including updating one release only.
2013-09-30 19:15:10 +02:00
Joel Kåberg
317a1f119b not needed 2013-09-29 18:03:52 +02:00
Joel Kåberg
b128ef17c9 Added directory option
and an option to append label to directory path
2013-09-29 15:32:23 +02:00
Ruud
cc4350b0f9 NZBGet missing in wizard. fix #2262 2013-09-29 14:05:28 +02:00
Dean Gardiner
0b00f2d9e6 Fixed Plex notifications on latest PHT (protocol renamed to 'plex') 2013-09-30 00:49:00 +13:00
Ruud
e7aa91b3e1 Don't try to use custom_plugins when folder doesn't exist 2013-09-29 13:44:52 +02:00
Ruud
333abd2486 Custom plugin folder outside source. fix #2076 2013-09-29 13:25:10 +02:00
Dean Gardiner
226835e3d0 Added a check to ensure a torrent has been loaded (and found). 2013-09-29 23:32:03 +13:00
Dean Gardiner
48db4c8b8e Updated rtorrent-python library 2013-09-29 23:21:53 +13:00
Ruud
ae4e15286a Don't try to loop over None. fix #2268 2013-09-29 12:17:09 +02:00
Ruud
1b96489656 Merge branch 'refs/heads/jkaberg-develop' into develop 2013-09-29 10:06:22 +02:00
Ruud
99c899ea3a Proper variable naming 2013-09-29 10:06:12 +02:00
Ruud
8f76dd7a2e Merge branch 'develop' of git://github.com/jkaberg/CouchPotatoServer into jkaberg-develop 2013-09-29 09:57:17 +02:00
Ruud
1f2c2269e6 Ignore thumbs.db files and don't fail on single path split. fix #2265 2013-09-29 09:54:37 +02:00
Joel Kåberg
201185f7e7 better english damnit! 2013-09-29 01:49:51 +02:00
Joel Kåberg
e38d68c019 actual code 2013-09-29 01:45:50 +02:00
Joel Kåberg
91332e06e5 add option to create sub directory 2013-09-29 01:45:24 +02:00
Ruud
96b4af1fea Hide first item in combined table 2013-09-29 00:08:26 +02:00
Ruud
b4bccc9be2 Flixter automation support
Thanks @mikedm139
2013-09-28 23:41:15 +02:00
Ruud
d6ddee236a Merge branch 'refs/heads/mano3m-develop_bluray' into develop 2013-09-28 23:17:42 +02:00
Ruud
364e355114 Also try to load the root module for each path 2013-09-28 21:25:25 +02:00
Ruud
7d4f9d60b1 Code formating 2013-09-28 19:17:41 +02:00
Ruud
116bc839fc Make description more clear 2013-09-28 19:12:05 +02:00
Ruud
153d4b2b1d Merge branch 'develop_bluray' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_bluray 2013-09-28 18:20:47 +02:00
Ruud
2f4f140662 Don't overwrite data variable in utorrent download. fix #2222 2013-09-28 18:19:17 +02:00
Ruud
475ac1bb9c Only use filename for identification when possible. fix #2233 & #954 2013-09-28 18:06:45 +02:00
Ruud
49015b7d64 Be sure to ss quality alt in guess 2013-09-28 17:45:32 +02:00
Ruud
99efcce4d0 Merge branch 'refs/heads/techmunk-2235' into develop 2013-09-28 17:04:00 +02:00
Ruud
c3c971db23 Merge branch '2235' of git://github.com/techmunk/CouchPotatoServer into techmunk-2235 2013-09-28 17:03:27 +02:00
Ruud
8011634b7a Use correct encoding for emails. fix #2254 2013-09-28 16:39:31 +02:00
Ruud
ededfcb822 Escape spaces for each request. fix #2256 2013-09-28 16:28:46 +02:00
Ruud
92a0af5ce3 Use label for quality guess also. closes #2237 2013-09-28 15:23:45 +02:00
Ruud
ffaffbc66f Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2013-09-28 14:31:47 +02:00
Ruud
2596bbe2bc Merge branch 'refs/heads/saxicek-tsh_scene_only' into develop 2013-09-28 14:30:27 +02:00
Ruud
3310bdf551 Don't use quotes for torrentshack 2013-09-28 14:30:20 +02:00
Ruud Burger
19d357b866 Merge pull request #2261 from mano3m/develop_transmission
[Transmission] Fix  #2168
2013-09-28 04:49:31 -07:00
mano3m
871aecb689 Fix transmission #2168 2013-09-28 13:35:26 +02:00
mano3m
00bb055474 set backlog to False after backlog search 2013-09-28 12:36:43 +02:00
mano3m
f10d182468 Added Blu-ray.com backlog automation
I missed a few movies, so I added backlog functionality to Blu-ray.com

If you want to add all Blu-rays that ever came out to the wanted list,
you can use this. Be careful with what you wish for :D
2013-09-28 12:36:43 +02:00
Techmunk
74a4e7d19d Indenting on deluge auth fix was incorrect. 2013-09-27 14:59:03 +10:00
sax
c7c64c6002 Changed implementation of "scene_only" parameter to use filter criteria instead of parsing the information from query result. 2013-09-25 14:05:16 +02:00
Techmunk
8474d0d95d Fix the way the client auth file is found and processed to match the defaults in the deluge clients. 2013-09-25 21:44:05 +10:00
Ruud
4a5c878c36 Wrong config name for plex host 2013-09-24 22:44:14 +02:00
Ruud
2b0a70355a Merge branch 'refs/heads/fuzeman-feature/dev_plex' into develop 2013-09-24 22:37:46 +02:00
Ruud
9b5166826f Cleanup Plex notification 2013-09-24 22:37:40 +02:00
Ruud
3b1efb2c30 Merge branch 'feature/dev_plex' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-feature/dev_plex
Conflicts:
	couchpotato/core/notifications/plex/main.py
2013-09-24 21:35:36 +02:00
Ruud
8d108b92bf One Up 2013-09-23 21:48:12 +02:00
Ruud
46783028b1 Merge branch 'refs/heads/develop' into desktop 2013-09-23 21:36:45 +02:00
Ruud
b5d2a41d60 Enable NewzNab bij default 2013-09-23 21:35:40 +02:00
Ruud
cc3aad49ed Remove FTDWorld 2013-09-23 21:35:29 +02:00
Ruud
2365e1859f Don't show suggestions if there aren't any. fix #2153 2013-09-22 10:47:13 +02:00
Ruud
03700e0a04 Userscript image didn't show 2013-09-22 00:43:50 +02:00
Ruud
1ff4901846 Make sure to remove listener, even after fail 2013-09-21 22:29:15 +02:00
Ruud
d70a71a12e Make nonblock debug message 2013-09-21 22:17:01 +02:00
Ruud
866d9621cb Create new listener list 2013-09-21 22:16:44 +02:00
Ruud
2d3fc03a00 Revert back to UTF8 when ss encoding fails. fix #2220 2013-09-21 13:56:17 +02:00
Ruud
19f782e4a5 Don't try to change elements that don't exist. fix #2219 2013-09-21 12:41:06 +02:00
Ruud
fdd851d29a Binsearch age parse failed for release new than 1 day. fix #2217 2013-09-21 12:14:40 +02:00
Ruud
6cd38a3469 Providers missing in wizard 2013-09-21 11:20:53 +02:00
Ruud
bfa3b87188 Only show soon and late with no releases 2013-09-21 11:07:16 +02:00
Ruud
69a9fa1193 Simplify string before checking on imdb 2013-09-20 18:08:27 +02:00
Ruud
9e0805ec89 Hide IE clear button on search 2013-09-20 18:08:12 +02:00
Dean Gardiner
b824ef93bd Fix plex notifications test method. 2013-08-04 15:39:02 +12:00
Dean Gardiner
c92aa91aa7 Corrected notify() force parameter default. 2013-08-02 02:43:55 +12:00
Dean Gardiner
a6c32a7e30 Fixed Plex notifications
Conflicts:

	couchpotato/core/notifications/plex/main.py
2013-08-02 02:43:37 +12:00
140 changed files with 8286 additions and 2083 deletions

View File

@@ -44,12 +44,13 @@ class NonBlockHandler(RequestHandler):
def onNewMessage(self, response):
if self.request.connection.stream.closed():
self.on_connection_close()
return
try:
self.finish(response)
except:
log.error('Failed doing nonblock request: %s', (traceback.format_exc()))
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
@@ -109,6 +110,7 @@ class ApiHandler(RequestHandler):
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:

View File

@@ -298,6 +298,7 @@ class SourceUpdater(BaseUpdater):
def replaceWith(self, path):
app_dir = ss(Env.get('app_dir'))
data_dir = ss(Env.get('data_dir'))
# Get list of files we want to overwrite
self.deletePyc()
@@ -329,12 +330,15 @@ class SourceUpdater(BaseUpdater):
log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc()))
return False
if Env.get('app_dir') not in Env.get('data_dir'):
for still_exists in existing_files:
try:
os.remove(still_exists)
except:
log.error('Failed removing non-used file: %s', traceback.format_exc())
for still_exists in existing_files:
if data_dir in still_exists:
continue
try:
os.remove(still_exists)
except:
log.error('Failed removing non-used file: %s', traceback.format_exc())
return True

View File

@@ -1,4 +1,4 @@
config = {
config = [{
'name': 'download_providers',
'groups': [
{
@@ -10,4 +10,4 @@ config = {
'options': [],
},
],
}
}]

View File

@@ -66,36 +66,36 @@ class Downloader(Provider):
def getAllDownloadStatus(self):
return
def _removeFailed(self, item):
def _removeFailed(self, release_download):
if self.isDisabled(manual = True, data = {}):
return
if item and item.get('downloader') == self.getName():
if release_download and release_download.get('downloader') == self.getName():
if self.conf('delete_failed'):
return self.removeFailed(item)
return self.removeFailed(release_download)
return False
return
def removeFailed(self, item):
def removeFailed(self, release_download):
return
def _processComplete(self, item):
def _processComplete(self, release_download):
if self.isDisabled(manual = True, data = {}):
return
if item and item.get('downloader') == self.getName():
if release_download and release_download.get('downloader') == self.getName():
if self.conf('remove_complete', default = False):
return self.processComplete(item = item, delete_files = self.conf('delete_files', default = False))
return self.processComplete(release_download = release_download, delete_files = self.conf('delete_files', default = False))
return False
return
def processComplete(self, item, delete_files):
def processComplete(self, release_download, delete_files):
return
def isCorrectProtocol(self, item_protocol):
is_correct = item_protocol in self.protocol
def isCorrectProtocol(self, protocol):
is_correct = protocol in self.protocol
if not is_correct:
log.debug("Downloader doesn't support this protocol")
@@ -151,20 +151,20 @@ class Downloader(Provider):
(d_manual and manual or d_manual is False) and \
(not data or self.isCorrectProtocol(data.get('protocol')))
def _pause(self, item, pause = True):
def _pause(self, release_download, pause = True):
if self.isDisabled(manual = True, data = {}):
return
if item and item.get('downloader') == self.getName():
self.pause(item, pause)
if release_download and release_download.get('downloader') == self.getName():
self.pause(release_download, pause)
return True
return False
def pause(self, item, pause):
def pause(self, release_download, pause):
return
class StatusList(list):
class ReleaseDownloadList(list):
provider = None
@@ -173,7 +173,7 @@ class StatusList(list):
self.provider = provider
self.kwargs = kwargs
super(StatusList, self).__init__()
super(ReleaseDownloadList, self).__init__()
def extend(self, results):
for r in results:
@@ -181,7 +181,7 @@ class StatusList(list):
def append(self, result):
new_result = self.fillResult(result)
super(StatusList, self).append(new_result)
super(ReleaseDownloadList, self).append(new_result)
def fillResult(self, result):
@@ -190,6 +190,7 @@ class StatusList(list):
'status': 'busy',
'downloader': self.provider.getName(),
'folder': '',
'files': '',
}
return mergeDicts(defaults, result)

View File

@@ -35,6 +35,13 @@ config = [{
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
},
{
'name': 'create_subdir',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Create a sub directory when saving the .nzb (or .torrent).',
},
{
'name': 'manual',
'default': 0,

View File

@@ -33,17 +33,27 @@ class Blackhole(Downloader):
log.error('No nzb/torrent available: %s', data.get('url'))
return False
fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
file_name = self.createFileName(data, filedata, movie)
full_path = os.path.join(directory, file_name)
if self.conf('create_subdir'):
try:
new_path = os.path.splitext(full_path)[0]
if not os.path.exists(new_path):
os.makedirs(new_path)
full_path = os.path.join(new_path, file_name)
except:
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try:
if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.', (data.get('protocol'), fullPath))
with open(fullPath, 'wb') as f:
if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f:
f.write(filedata)
os.chmod(fullPath, Env.getPermission('file'))
os.chmod(full_path, Env.getPermission('file'))
return True
else:
log.info('File %s already exists.', fullPath)
log.info('File %s already exists.', full_path)
return True
except:

View File

@@ -1,12 +1,14 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt, ss
from base64 import b64encode, b16encode, b32decode
from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from hashlib import sha1
from synchronousdeluge import DelugeClient
import os.path
import re
import traceback
log = CPLog(__name__)
@@ -72,7 +74,7 @@ class Deluge(Downloader):
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, movie)
remote_torrent = self.drpc.add_torrent_file(filename, b64encode(filedata), options)
remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent:
log.error('Failed sending torrent to Deluge')
@@ -85,14 +87,10 @@ class Deluge(Downloader):
log.debug('Checking Deluge download status.')
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if not self.connect():
return False
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents()
@@ -101,50 +99,55 @@ class Deluge(Downloader):
return False
for torrent_id in queue:
item = queue[torrent_id]
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (item['name'], item['hash'], item['save_path'], item['move_completed_path'], item['hash'], item['progress'], item['state'], item['eta'], item['ratio'], item['stop_ratio'], item['is_seed'], item['is_finished'], item['paused']))
torrent = queue[torrent_id]
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
if item['is_seed'] and tryFloat(item['ratio']) < tryFloat(item['stop_ratio']):
# We have item['seeding_time'] to work out what the seeding time is, but we do not
if torrent['is_seed'] and tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio']):
# We have torrent['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio.
# See above comment in download().
status = 'seeding'
elif item['is_seed'] and item['is_finished'] and item['paused'] and item['state'] == 'Paused':
elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
status = 'completed'
download_dir = item['save_path']
if item['move_on_completed']:
download_dir = item['move_completed_path']
download_dir = sp(torrent['save_path'])
if torrent['move_on_completed']:
download_dir = torrent['move_completed_path']
statuses.append({
'id': item['hash'],
'name': item['name'],
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
release_downloads.append({
'id': torrent['hash'],
'name': torrent['name'],
'status': status,
'original_status': item['state'],
'seed_ratio': item['ratio'],
'timeleft': str(timedelta(seconds = item['eta'])),
'folder': ss(os.path.join(download_dir, item['name'])),
'original_status': torrent['state'],
'seed_ratio': torrent['ratio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])),
'files': '|'.join(torrent_files),
})
return statuses
return release_downloads
def pause(self, item, pause = True):
def pause(self, release_download, pause = True):
if pause:
return self.drpc.pause_torrent([item['id']])
return self.drpc.pause_torrent([release_download['id']])
else:
return self.drpc.resume_torrent([item['id']])
return self.drpc.resume_torrent([release_download['id']])
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
return self.drpc.remove_torrent(item['id'], True)
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.drpc.remove_torrent(release_download['id'], True)
def processComplete(self, item, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(item['id'], remove_local_data = delete_files)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files)
class DelugeRPC(object):
@@ -171,7 +174,10 @@ class DelugeRPC(object):
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options).get()
if options['label']:
if not torrent_id:
torrent_id = self._check_torrent(True, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err:
log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
@@ -185,8 +191,11 @@ class DelugeRPC(object):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, torrent, options).get()
if options['label']:
torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get()
if not torrent_id:
torrent_id = self._check_torrent(False, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err:
log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
@@ -242,3 +251,22 @@ class DelugeRPC(object):
def disconnect(self):
self.client.disconnect()
def _check_torrent(self, magnet, torrent):
# Torrent not added, check if it already existed.
if magnet:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0]
else:
info = bdecode(torrent)["info"]
torrent_hash = sha1(benc(info)).hexdigest()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
torrent_hash = torrent_hash.lower()
torrent_check = self.client.core.get_torrent_status(torrent_hash, {}).get()
if torrent_check['hash']:
return torrent_hash
return False

View File

@@ -12,6 +12,7 @@ config = [{
'name': 'nzbget',
'label': 'NZBGet',
'description': 'Use <a href="http://nzbget.sourceforge.net/Main_Page" target="_blank">NZBGet</a> to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',

View File

@@ -1,6 +1,6 @@
from base64 import standard_b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import ss, sp
from couchpotato.core.helpers.variable import tryInt, md5
from couchpotato.core.logger import CPLog
from datetime import timedelta
@@ -99,60 +99,60 @@ class NZBGet(Downloader):
log.error('Failed getting data: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
for item in groups:
log.debug('Found %s in NZBGet download queue', item['NZBFilename'])
for nzb in groups:
log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
try:
nzb_id = [param['Value'] for param in item['Parameters'] if param['Name'] == 'couchpotato'][0]
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = item['NZBID']
nzb_id = nzb['NZBID']
timeleft = -1
try:
if item['ActiveDownloads'] > 0 and item['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = item['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except:
pass
statuses.append({
release_downloads.append({
'id': nzb_id,
'name': item['NZBFilename'],
'original_status': 'DOWNLOADING' if item['ActiveDownloads'] > 0 else 'QUEUED',
'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft,
})
for item in queue: # 'Parameters' is not passed in rpc.postqueue
log.debug('Found %s in NZBGet postprocessing queue', item['NZBFilename'])
statuses.append({
'id': item['NZBID'],
'name': item['NZBFilename'],
'original_status': item['Stage'],
for nzb in queue: # 'Parameters' is not passed in rpc.postqueue
log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
release_downloads.append({
'id': nzb['NZBID'],
'name': nzb['NZBFilename'],
'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
for item in history:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (item['NZBFilename'] , item['ParStatus'], item['ScriptStatus'] , item['Log']))
for nzb in history:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
try:
nzb_id = [param['Value'] for param in item['Parameters'] if param['Name'] == 'couchpotato'][0]
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = item['NZBID']
statuses.append({
nzb_id = nzb['NZBID']
release_downloads.append({
'id': nzb_id,
'name': item['NZBFilename'],
'status': 'completed' if item['ParStatus'] in ['SUCCESS','NONE'] and item['ScriptStatus'] in ['SUCCESS','NONE'] else 'failed',
'original_status': item['ParStatus'] + ', ' + item['ScriptStatus'],
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': ss(item['DestDir'])
'folder': sp(nzb['DestDir'])
})
return statuses
return release_downloads
def removeFailed(self, item):
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name'])
log.info('%s failed downloading, deleting...', release_download['name'])
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
@@ -179,9 +179,9 @@ class NZBGet(Downloader):
for hist in history:
for param in hist['Parameters']:
if param['Name'] == 'couchpotato' and param['Value'] == item['id']:
if param['Name'] == 'couchpotato' and param['Value'] == release_download['id']:
nzb_id = hist['ID']
path = hist['DestDir']
path = hist['DestDir']
if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
shutil.rmtree(path, True)

View File

@@ -1,6 +1,6 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from urllib2 import URLError
@@ -30,10 +30,10 @@ class NZBVortex(Downloader):
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, movie)
self.call('nzb/add', params = {'file': (ss(nzb_filename), filedata)}, multipart = True)
self.call('nzb/add', params = {'file': (nzb_filename, filedata)}, multipart = True)
raw_statuses = self.call('nzb')
nzb_id = [item['id'] for item in raw_statuses.get('nzbs', []) if item['name'] == nzb_filename][0]
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if nzb['name'] == nzb_filename][0]
return self.downloadReturnId(nzb_id)
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
@@ -43,33 +43,33 @@ class NZBVortex(Downloader):
raw_statuses = self.call('nzb')
statuses = StatusList(self)
for item in raw_statuses.get('nzbs', []):
release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []):
# Check status
status = 'busy'
if item['state'] == 20:
if nzb['state'] == 20:
status = 'completed'
elif item['state'] in [21, 22, 24]:
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
statuses.append({
'id': item['id'],
'name': item['uiTitle'],
release_downloads.append({
'id': nzb['id'],
'name': nzb['uiTitle'],
'status': status,
'original_status': item['state'],
'original_status': nzb['state'],
'timeleft':-1,
'folder': ss(item['destinationPath']),
'folder': sp(nzb['destinationPath']),
})
return statuses
return release_downloads
def removeFailed(self, item):
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name'])
log.info('%s failed downloading, deleting...', release_download['name'])
try:
self.call('nzb/%s/cancel' % item['id'])
self.call('nzb/%s/cancel' % release_download['id'])
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False

View File

@@ -23,6 +23,8 @@ config = [{
{
'name': 'url',
'default': 'http://localhost:80/RPC2',
'description': 'XML-RPC Endpoint URI. Usually <strong>scgi://localhost:5000</strong> '
'or <strong>http://localhost:80/RPC2</strong>'
},
{
'name': 'username',
@@ -35,6 +37,11 @@ config = [{
'name': 'label',
'description': 'Label to apply on added torrents.',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default rTorrent download directory.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
@@ -51,6 +58,14 @@ config = [{
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'append_label',
'label': 'Append Label',
'default': False,
'advanced': True,
'type': 'bool',
'description': 'Append label to download location. Requires you to set the download location above.',
},
{
'name': 'paused',
'type': 'bool',

View File

@@ -1,13 +1,13 @@
from base64 import b16encode, b32decode
from bencode import bencode, bdecode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
from rtorrent import RTorrent
from rtorrent.err import MethodError
import shutil
import os
log = CPLog(__name__)
@@ -71,7 +71,7 @@ class rTorrent(Downloader):
group.set_command()
group.disable()
except MethodError, err:
log.error('Unable to set group options: %s', err.message)
log.error('Unable to set group options: %s', err.msg)
return False
return True
@@ -91,6 +91,7 @@ class rTorrent(Downloader):
if self.conf('label'):
torrent_params['label'] = self.conf('label')
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
@@ -116,10 +117,19 @@ class rTorrent(Downloader):
# Send torrent to rTorrent
torrent = self.rt.load_torrent(filedata)
if not torrent:
log.error('Unable to find the torrent, did it fail to load?')
return False
# Set label
if self.conf('label'):
torrent.set_custom(1, self.conf('label'))
if self.conf('directory') and self.conf('append_label'):
torrent.set_directory(os.path.join(self.conf('directory'), self.conf('label')))
elif self.conf('directory'):
torrent.set_directory(self.conf('directory'))
# Set Ratio Group
torrent.set_visible(group_name)
@@ -141,37 +151,42 @@ class rTorrent(Downloader):
try:
torrents = self.rt.get_torrents()
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
for torrent in torrents:
torrent_files = []
for file_item in torrent.get_files():
torrent_files.append(sp(os.path.join(torrent.directory, file_item.path)))
for item in torrents:
status = 'busy'
if item.complete:
if item.active:
if torrent.complete:
if torrent.active:
status = 'seeding'
else:
status = 'completed'
statuses.append({
'id': item.info_hash,
'name': item.name,
release_downloads.append({
'id': torrent.info_hash,
'name': torrent.name,
'status': status,
'seed_ratio': item.ratio,
'original_status': item.state,
'timeleft': str(timedelta(seconds = float(item.left_bytes) / item.down_rate)) if item.down_rate > 0 else -1,
'folder': ss(item.directory)
'seed_ratio': torrent.ratio,
'original_status': torrent.state,
'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
'folder': sp(torrent.directory),
'files': '|'.join(torrent_files)
})
return statuses
return release_downloads
except Exception, err:
log.error('Failed to get status from rTorrent: %s', err)
return False
def pause(self, download_info, pause = True):
def pause(self, release_download, pause = True):
if not self.connect():
return False
torrent = self.rt.find_torrent(download_info['id'])
torrent = self.rt.find_torrent(release_download['id'])
if torrent is None:
return False
@@ -179,23 +194,34 @@ class rTorrent(Downloader):
return torrent.pause()
return torrent.resume()
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
return self.processComplete(item, delete_files = True)
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.processComplete(release_download, delete_files = True)
def processComplete(self, item, delete_files):
def processComplete(self, release_download, delete_files):
log.debug('Requesting rTorrent to remove the torrent %s%s.',
(item['name'], ' and cleanup the downloaded files' if delete_files else ''))
(release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
torrent = self.rt.find_torrent(item['id'])
torrent = self.rt.find_torrent(release_download['id'])
if torrent is None:
return False
if delete_files:
for file_item in torrent.get_files(): # will only delete files, not dir/sub-dir
os.unlink(os.path.join(torrent.directory, file_item.path))
if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
# Remove empty directories bottom up
try:
for path, _, _ in os.walk(torrent.directory, topdown = False):
os.rmdir(path)
except OSError:
log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)
torrent.erase() # just removes the torrent, doesn't delete data
if delete_files:
shutil.rmtree(item['folder'], True)
return True

View File

@@ -1,11 +1,12 @@
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import os
import traceback
log = CPLog(__name__)
@@ -86,47 +87,58 @@ class Sabnzbd(Downloader):
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
for nzb in queue.get('slots', []):
status = 'busy'
if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['filename'],
'status': status,
'original_status': nzb['status'],
'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for item in history.get('slots', []):
for nzb in history.get('slots', []):
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
elif nzb['status'] == 'Completed':
status = 'completed'
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['name'],
'status': status,
'original_status': item['status'],
'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': ss(item['storage']),
'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
})
return statuses
return release_downloads
def removeFailed(self, item):
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name'])
log.info('%s failed downloading, deleting...', release_download['name'])
try:
self.call({
'mode': 'queue',
'name': 'delete',
'del_files': '1',
'value': release_download['id']
}, use_json = False)
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
'value': release_download['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
@@ -134,15 +146,15 @@ class Sabnzbd(Downloader):
return True
def processComplete(self, item, delete_files = False):
log.debug('Requesting SabNZBd to remove the NZB %s.', item['name'])
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting SabNZBd to remove the NZB %s.', release_download['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '0',
'value': item['id']
'value': release_download['id']
}, use_json = False)
except:
log.error('Failed removing: %s', traceback.format_exc(0))

View File

@@ -3,6 +3,7 @@ from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.logger import CPLog
import json
import requests
import traceback
log = CPLog(__name__)
@@ -34,12 +35,12 @@ class Synology(Downloader):
elif data['protocol'] in ['nzb', 'torrent']:
log.info('Adding %s' % data['protocol'])
if not filedata:
log.error('No %s data found' % data['protocol'])
log.error('No %s data found', data['protocol'])
else:
filename = data['name'] + '.' + data['protocol']
response = srpc.create_task(filename = filename, filedata = filedata)
except Exception, err:
log.error('Exception while adding torrent: %s', err)
except:
log.error('Exception while adding torrent: %s', traceback.format_exc())
finally:
return response

View File

@@ -1,9 +1,8 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt, ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
import httplib
import json
@@ -89,10 +88,10 @@ class Transmission(Downloader):
if not self.connect():
return False
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit']
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files']
}
queue = self.trpc.get_alltorrents(return_params)
@@ -100,47 +99,48 @@ class Transmission(Downloader):
log.debug('Nothing in queue or error')
return False
for item in queue['torrents']:
for torrent in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / isFinished=%s',
(item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], item['isFinished']))
(torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent['eta'], torrent['uploadRatio'], torrent['isFinished']))
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(torrent['downloadDir'], file_item['name'])))
status = 'busy'
if item['isStalled'] and self.conf('stalled_as_failed'):
if torrent.get('isStalled') and self.conf('stalled_as_failed'):
status = 'failed'
elif item['status'] == 0 and item['percentDone'] == 1:
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif item['status'] in [5, 6]:
elif torrent['status'] in [5, 6]:
status = 'seeding'
statuses.append({
'id': item['hashString'],
'name': item['name'],
release_downloads.append({
'id': torrent['hashString'],
'name': torrent['name'],
'status': status,
'original_status': item['status'],
'seed_ratio': item['uploadRatio'],
'timeleft': str(timedelta(seconds = item['eta'])),
'folder': ss(os.path.join(item['downloadDir'], item['name'])),
'original_status': torrent['status'],
'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(torrent['downloadDir'] if len(torrent_files) == 1 else os.path.join(torrent['downloadDir'], torrent['name'])),
'files': '|'.join(torrent_files)
})
return statuses
return release_downloads
def pause(self, item, pause = True):
def pause(self, release_download, pause = True):
if pause:
return self.trpc.stop_torrent(item['id'])
return self.trpc.stop_torrent(release_download['id'])
else:
return self.trpc.start_torrent(item['id'])
return self.trpc.start_torrent(release_download['id'])
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
return self.trpc.remove_torrent(item['hashString'], True)
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.trpc.remove_torrent(release_download['id'], True)
def processComplete(self, item, delete_files = False):
log.debug('Requesting Transmission to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.trpc.remove_torrent(item['hashString'], delete_files)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.trpc.remove_torrent(release_download['id'], delete_files)
class TransmissionRPC(object):

View File

@@ -1,7 +1,7 @@
from base64 import b16encode, b32decode
from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt, ss
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, ss, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog
from datetime import timedelta
@@ -102,39 +102,6 @@ class uTorrent(Downloader):
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
count = 0
while True:
count += 1
# Check if torrent is saved in subfolder of torrent name
data = self.utorrent_api.get_files(torrent_hash)
torrent_files = json.loads(data)
if torrent_files.get('error'):
log.error('Error getting data from uTorrent: %s', torrent_files.get('error'))
return False
if (torrent_files.get('files') and len(torrent_files['files'][1]) > 0) or count > 60:
break
time.sleep(1)
# Torrent has only one file, so uTorrent wont create a folder for it
if len(torrent_files['files'][1]) == 1:
# Remove torrent and try again
self.utorrent_api.remove_torrent(torrent_hash, remove_data = True)
# Send request to uTorrent
if data.get('protocol') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'), add_folder = True)
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata, add_folder = True)
# Change settings of added torrent
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
return self.downloadReturnId(torrent_hash)
def getAllDownloadStatus(self):
@@ -144,7 +111,7 @@ class uTorrent(Downloader):
if not self.connect():
return False
statuses = StatusList(self)
release_downloads = ReleaseDownloadList(self)
data = self.utorrent_api.get_status()
if not data:
@@ -161,52 +128,74 @@ class uTorrent(Downloader):
return False
# Get torrents
for item in queue['torrents']:
for torrent in queue['torrents']:
#Get files of the torrent
torrent_files = []
try:
torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
except:
log.debug('Failed getting files from torrent: %s', torrent[2])
status_flags = {
"STARTED" : 1,
"CHECKING" : 2,
"CHECK-START" : 4,
"CHECKED" : 8,
"ERROR" : 16,
"PAUSED" : 32,
"QUEUED" : 64,
"LOADED" : 128
}
# item[21] = Paused | Downloading | Seeding | Finished
status = 'busy'
if 'Finished' in item[21]:
status = 'completed'
self.removeReadOnly(item[26])
elif 'Seeding' in item[21]:
if (torrent[1] & status_flags["STARTED"] or torrent[1] & status_flags["QUEUED"]) and torrent[4] == 1000:
status = 'seeding'
self.removeReadOnly(item[26])
elif (torrent[1] & status_flags["ERROR"]):
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
statuses.append({
'id': item[0],
'name': item[2],
'status': status,
'seed_ratio': float(item[7]) / 1000,
'original_status': item[1],
'timeleft': str(timedelta(seconds = item[10])),
'folder': ss(item[26]),
if not status == 'busy':
self.removeReadOnly(torrent_files)
release_downloads.append({
'id': torrent[0],
'name': torrent[2],
'status': status,
'seed_ratio': float(torrent[7]) / 1000,
'original_status': torrent[1],
'timeleft': str(timedelta(seconds = torrent[10])),
'folder': sp(torrent[26]),
'files': '|'.join(torrent_files)
})
return statuses
return release_downloads
def pause(self, item, pause = True):
def pause(self, release_download, pause = True):
if not self.connect():
return False
return self.utorrent_api.pause_torrent(item['id'], pause)
return self.utorrent_api.pause_torrent(release_download['id'], pause)
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
if not self.connect():
return False
return self.utorrent_api.remove_torrent(item['id'], remove_data = True)
return self.utorrent_api.remove_torrent(release_download['id'], remove_data = True)
def processComplete(self, item, delete_files = False):
log.debug('Requesting uTorrent to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else ''))
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting uTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
return self.utorrent_api.remove_torrent(item['id'], remove_data = delete_files)
def removeReadOnly(self, folder):
#Removes all read-only flags in a folder
if folder and os.path.isdir(folder):
for root, folders, filenames in os.walk(folder):
for filename in filenames:
os.chmod(os.path.join(root, filename), stat.S_IWRITE)
return self.utorrent_api.remove_torrent(release_download['id'], remove_data = delete_files)
def removeReadOnly(self, files):
#Removes all read-on ly flags in a for all files
for filepath in files:
if os.path.isfile(filepath):
#Windows only needs S_IWRITE, but we bitwise-or with current perms to preserve other permission bits on Linux
os.chmod(filepath, stat.S_IWRITE | os.stat(filepath).st_mode)
class uTorrentAPI(object):
@@ -304,13 +293,13 @@ class uTorrentAPI(object):
utorrent_settings = json.loads(self._request(action))
# Create settings dict
for item in utorrent_settings['settings']:
if item[1] == 0: # int
settings_dict[item[0]] = int(item[2] if not item[2].strip() == '' else '0')
elif item[1] == 1: # bool
settings_dict[item[0]] = True if item[2] == 'true' else False
elif item[1] == 2: # string
settings_dict[item[0]] = item[2]
for setting in utorrent_settings['settings']:
if setting[1] == 0: # int
settings_dict[setting[0]] = int(setting[2] if not setting[2].strip() == '' else '0')
elif setting[1] == 1: # bool
settings_dict[setting[0]] = True if setting[2] == 'true' else False
elif setting[1] == 2: # string
settings_dict[setting[0]] = setting[2]
#log.debug('uTorrent settings: %s', settings_dict)

View File

@@ -1,6 +1,7 @@
from couchpotato.core.logger import CPLog
from string import ascii_letters, digits
from urllib import quote_plus
import os
import re
import traceback
import unicodedata
@@ -38,8 +39,18 @@ def toUnicode(original, *args):
return toUnicode(ascii_text)
def ss(original, *args):
from couchpotato.environment import Env
return toUnicode(original, *args).encode(Env.get('encoding'))
u_original = toUnicode(original, *args)
try:
from couchpotato.environment import Env
return u_original.encode(Env.get('encoding'))
except Exception, e:
log.debug('Failed ss encoding char, force UTF8: %s', e)
return u_original.encode('UTF-8')
def sp(path, *args):
# Standardise encoding, normalise case, path and strip trailing '/' or '\'
return os.path.normcase(os.path.normpath(ss(path, *args))).rstrip(os.path.sep)
def ek(original, *args):
if isinstance(original, (str, unicode)):

View File

@@ -1,5 +1,6 @@
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.logger import CPLog
import collections
import hashlib
import os.path
import platform
@@ -136,18 +137,20 @@ def getImdb(txt, check_inside = False, multiple = False):
output.close()
try:
ids = re.findall('(tt\d{7})', txt)
ids = re.findall('(tt\d{4,7})', txt)
if multiple:
return list(set(ids)) if len(ids) > 0 else []
return ids[0]
return list(set(['tt%07d' % tryInt(x[2:]) for x in ids])) if len(ids) > 0 else []
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
def tryInt(s):
def tryInt(s, default = 0):
try: return int(s)
except: return 0
except: return default
def tryFloat(s):
try:
@@ -163,6 +166,11 @@ def natsortKey(s):
def natcmp(a, b):
return cmp(natsortKey(a), natsortKey(b))
def toIterable(value):
if isinstance(value, collections.Iterable):
return value
return [value]
def getTitle(library_dict):
try:
try:
@@ -205,3 +213,6 @@ def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
def splitString(str, split_on = ',', clean = True):
list = [x.strip() for x in str.split(split_on)] if str else []
return filter(None, list) if clean else list
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])

View File

@@ -1,7 +1,8 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.logger import CPLog
import glob
from importlib import import_module
import os
import sys
import traceback
log = CPLog(__name__)
@@ -12,17 +13,6 @@ class Loader(object):
providers = {}
modules = {}
def addPath(self, root, base_path, priority, recursive = False):
for filename in os.listdir(os.path.join(root, *base_path)):
path = os.path.join(os.path.join(root, *base_path), filename)
if os.path.isdir(path) and filename[:2] != '__':
if u'__init__.py' in os.listdir(path):
new_base_path = ''.join(s + '.' for s in base_path) + filename
self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path)
if recursive:
self.addPath(root, base_path + [filename], priority, recursive = True)
def preload(self, root = ''):
core = os.path.join(root, 'couchpotato', 'core')
@@ -39,6 +29,14 @@ class Loader(object):
# Add media to loader
self.addPath(root, ['couchpotato', 'core', 'media'], 25, recursive = True)
# Add custom plugin folder
from couchpotato.environment import Env
custom_plugin_dir = os.path.join(Env.get('data_dir'), 'custom_plugins')
if os.path.isdir(custom_plugin_dir):
sys.path.insert(0, custom_plugin_dir)
self.paths['custom_plugins'] = (30, '', custom_plugin_dir)
# Loop over all paths and add to module list
for plugin_type, plugin_tuple in self.paths.iteritems():
priority, module, dir_name = plugin_tuple
self.addFromDir(plugin_type, priority, module, dir_name)
@@ -46,8 +44,9 @@ class Loader(object):
def run(self):
did_save = 0
for priority in self.modules:
for priority in sorted(self.modules):
for module_name, plugin in sorted(self.modules[priority].iteritems()):
# Load module
try:
if plugin.get('name')[:2] == '__':
@@ -56,7 +55,6 @@ class Loader(object):
m = self.loadModule(module_name)
if m is None:
continue
m = getattr(m, plugin.get('name'))
log.info('Loading %s: %s', (plugin['type'], plugin['name']))
@@ -78,20 +76,26 @@ class Loader(object):
if did_save:
fireEvent('settings.save')
def addPath(self, root, base_path, priority, recursive = False):
root_path = os.path.join(root, *base_path)
for filename in os.listdir(root_path):
path = os.path.join(root_path, filename)
if os.path.isdir(path) and filename[:2] != '__':
if u'__init__.py' in os.listdir(path):
new_base_path = ''.join(s + '.' for s in base_path) + filename
self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path)
if recursive:
self.addPath(root, base_path + [filename], priority, recursive = True)
def addFromDir(self, plugin_type, priority, module, dir_name):
# Load dir module
try:
m = __import__(module)
splitted = module.split('.')
for sub in splitted[1:]:
m = getattr(m, sub)
except:
raise
if module and len(module) > 0:
self.addModule(priority, plugin_type, module, os.path.basename(dir_name))
for cur_file in glob.glob(os.path.join(dir_name, '*')):
name = os.path.basename(cur_file)
if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(cur_file, '__init__.py')):
for name in os.listdir(dir_name):
if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(dir_name, name, '__init__.py')):
module_name = '%s.%s' % (module, name)
self.addModule(priority, plugin_type, module_name, name)
@@ -131,6 +135,7 @@ class Loader(object):
if not self.modules.get(priority):
self.modules[priority] = {}
module = module.lstrip('.')
self.modules[priority][module] = {
'priority': priority,
'module': module,
@@ -140,11 +145,7 @@ class Loader(object):
def loadModule(self, name):
try:
m = __import__(name)
splitted = name.split('.')
for sub in splitted[1:-1]:
m = getattr(m, sub)
return m
return import_module(name)
except ImportError:
log.debug('Skip loading module plugin %s: %s', (name, traceback.format_exc()))
return None

View File

@@ -1,13 +1,44 @@
from couchpotato.core.event import addEvent
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media
class MediaBase(Plugin):
_type = None
default_dict = {
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}, 'files':{}, 'info': {}},
'library': {'titles': {}, 'files':{}},
'files': {},
'status': {},
'category': {},
}
def initType(self):
addEvent('media.types', self.getType)
def getType(self):
return self._type
def createOnComplete(self, id):
def onComplete():
db = get_session()
media = db.query(Media).filter_by(id = id).first()
fireEventAsync('%s.searcher.single' % media.type, media.to_dict(self.default_dict), on_complete = self.createNotifyFront(id))
db.expire_all()
return onComplete
def createNotifyFront(self, media_id):
def notifyFront():
db = get_session()
media = db.query(Media).filter_by(id = media_id).first()
fireEvent('notify.frontend', type = '%s.update.%s' % (media.type, media.id), data = media.to_dict(self.default_dict))
db.expire_all()
return notifyFront

View File

@@ -0,0 +1,6 @@
from .main import MediaPlugin
def start():
return MediaPlugin()
config = []

View File

@@ -0,0 +1,49 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from couchpotato.core.settings.model import Media
log = CPLog(__name__)
class MediaPlugin(MediaBase):
def __init__(self):
addApiView('media.refresh', self.refresh, docs = {
'desc': 'Refresh a any media type by ID',
'params': {
'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addEvent('app.load', self.addSingleRefresh)
def refresh(self, id = '', **kwargs):
db = get_session()
for x in splitString(id):
media = db.query(Media).filter_by(id = x).first()
if media:
# Get current selected title
default_title = ''
for title in media.library.titles:
if title.default: default_title = title.title
fireEvent('notify.frontend', type = '%s.busy.%s' % (media.type, x), data = True)
fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
db.expire_all()
return {
'success': True,
}
def addSingleRefresh(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)

View File

@@ -0,0 +1,6 @@
from .main import Search
def start():
return Search()
config = []

View File

@@ -0,0 +1,59 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class Search(Plugin):
def __init__(self):
addApiView('search', self.search, docs = {
'desc': 'Search the info in providers for a movie',
'params': {
'q': {'desc': 'The (partial) movie name you want to search for'},
'type': {'desc': 'Search for a specific media type. Leave empty to search all.'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'movies': array,
'show': array,
etc
}"""}
})
addEvent('app.load', self.addSingleSearches)
def search(self, q = '', types = None, **kwargs):
# Make sure types is the correct instance
if isinstance(types, (str, unicode)):
types = [types]
elif isinstance(types, (list, tuple, set)):
types = list(types)
if not types:
result = fireEvent('info.search', q = q, merge = True)
else:
result = {}
for media_type in types:
result[media_type] = fireEvent('%s.search' % media_type)
return mergeDicts({
'success': True,
}, result)
def createSingleSearch(self, media_type):
def singleSearch(q, **kwargs):
return self.search(q, type = media_type, **kwargs)
return singleSearch
def addSingleSearches(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.search' % media_type, self.createSingleSearch(media_type))

View File

@@ -129,13 +129,13 @@
overflow-x: hidden;
}
.movie_result {
.media_result {
overflow: hidden;
height: 50px;
position: relative;
}
.movie_result .options {
.media_result .options {
position: absolute;
height: 100%;
top: 0;
@@ -147,48 +147,48 @@
border-radius: 0;
box-shadow: inset 0 1px 8px rgba(0,0,0,0.25);
}
.movie_result .options > .in_library_wanted {
.media_result .options > .in_library_wanted {
margin-top: -7px;
}
.movie_result .options > div {
.media_result .options > div {
border: 0;
}
.movie_result .options .thumbnail {
.media_result .options .thumbnail {
vertical-align: middle;
}
.movie_result .options select {
.media_result .options select {
vertical-align: middle;
display: inline-block;
margin-right: 10px;
}
.movie_result .options select[name=title] { width: 170px; }
.movie_result .options select[name=profile] { width: 90px; }
.movie_result .options select[name=category] { width: 80px; }
.media_result .options select[name=title] { width: 170px; }
.media_result .options select[name=profile] { width: 90px; }
.media_result .options select[name=category] { width: 80px; }
@media all and (max-width: 480px) {
.movie_result .options select[name=title] { width: 90px; }
.movie_result .options select[name=profile] { width: 50px; }
.movie_result .options select[name=category] { width: 50px; }
.media_result .options select[name=title] { width: 90px; }
.media_result .options select[name=profile] { width: 50px; }
.media_result .options select[name=category] { width: 50px; }
}
.movie_result .options .button {
.media_result .options .button {
vertical-align: middle;
display: inline-block;
}
.movie_result .options .message {
.media_result .options .message {
height: 100%;
font-size: 20px;
color: #fff;
line-height: 20px;
}
.movie_result .data {
.media_result .data {
position: absolute;
height: 100%;
top: 0;
@@ -199,20 +199,20 @@
border-top: 1px solid rgba(255,255,255, 0.08);
transition: all .4s cubic-bezier(0.9,0,0.1,1);
}
.movie_result .data.open {
.media_result .data.open {
left: 100% !important;
}
.movie_result:last-child .data { border-bottom: 0; }
.media_result:last-child .data { border-bottom: 0; }
.movie_result .in_wanted, .movie_result .in_library {
.media_result .in_wanted, .media_result .in_library {
position: absolute;
bottom: 2px;
left: 14px;
font-size: 11px;
}
.movie_result .thumbnail {
.media_result .thumbnail {
width: 34px;
min-height: 100%;
display: block;
@@ -220,7 +220,7 @@
vertical-align: top;
}
.movie_result .info {
.media_result .info {
position: absolute;
top: 20%;
left: 15px;
@@ -228,7 +228,7 @@
vertical-align: middle;
}
.movie_result .info h2 {
.media_result .info h2 {
margin: 0;
font-weight: normal;
font-size: 20px;
@@ -240,7 +240,7 @@
width: 100%;
}
.movie_result .info h2 .title {
.media_result .info h2 .title {
display: block;
margin: 0;
text-overflow: ellipsis;
@@ -253,7 +253,7 @@
width: 88%;
}
.movie_result .info h2 .year {
.media_result .info h2 .year {
padding: 0 5px;
text-align: center;
position: absolute;
@@ -271,7 +271,7 @@
}
.search_form .mask,
.movie_result .mask {
.media_result .mask {
position: absolute;
height: 100%;
width: 100%;

View File

@@ -0,0 +1,188 @@
Block.Search = new Class({
Extends: BlockBase,
cache: {},
create: function(){
var self = this;
var focus_timer = 0;
self.el = new Element('div.search_form').adopt(
new Element('div.input').adopt(
self.input = new Element('input', {
'placeholder': 'Search & add a new media',
'events': {
'keyup': self.keyup.bind(self),
'focus': function(){
if(focus_timer) clearTimeout(focus_timer);
self.el.addClass('focused')
if(this.get('value'))
self.hideResults(false)
},
'blur': function(){
focus_timer = (function(){
self.el.removeClass('focused')
}).delay(100);
}
}
}),
new Element('a.icon2', {
'events': {
'click': self.clear.bind(self),
'touchend': self.clear.bind(self)
}
})
),
self.result_container = new Element('div.results_container', {
'tween': {
'duration': 200
},
'events': {
'mousewheel': function(e){
(e).stopPropagation();
}
}
}).adopt(
self.results = new Element('div.results')
)
);
self.mask = new Element('div.mask').inject(self.result_container).fade('hide');
},
clear: function(e){
var self = this;
(e).preventDefault();
if(self.last_q === ''){
self.input.blur()
self.last_q = null;
}
else {
self.last_q = '';
self.input.set('value', '');
self.input.focus()
self.media = {}
self.results.empty()
self.el.removeClass('filled')
}
},
hideResults: function(bool){
var self = this;
if(self.hidden == bool) return;
self.el[bool ? 'removeClass' : 'addClass']('shown');
if(bool){
History.removeEvent('change', self.hideResults.bind(self, !bool));
self.el.removeEvent('outerClick', self.hideResults.bind(self, !bool));
}
else {
History.addEvent('change', self.hideResults.bind(self, !bool));
self.el.addEvent('outerClick', self.hideResults.bind(self, !bool));
}
self.hidden = bool;
},
keyup: function(e){
var self = this;
self.el[self.q() ? 'addClass' : 'removeClass']('filled')
if(self.q() != self.last_q){
if(self.api_request && self.api_request.isRunning())
self.api_request.cancel();
if(self.autocomplete_timer) clearTimeout(self.autocomplete_timer)
self.autocomplete_timer = self.autocomplete.delay(300, self)
}
},
autocomplete: function(){
var self = this;
if(!self.q()){
self.hideResults(true)
return
}
self.list()
},
list: function(){
var self = this,
q = self.q(),
cache = self.cache[q];
self.hideResults(false);
if(!cache){
self.mask.fade('in');
if(!self.spinner)
self.spinner = createSpinner(self.mask);
self.api_request = Api.request('search', {
'data': {
'q': q
},
'onComplete': self.fill.bind(self, q)
})
}
else
self.fill(q, cache)
self.last_q = q;
},
fill: function(q, json){
var self = this;
self.cache[q] = json
self.media = {}
self.results.empty()
Object.each(json, function(media, type){
if(typeOf(media) == 'array'){
Object.each(media, function(m){
var m = new Block.Search[m.type.capitalize() + 'Item'](m);
$(m).inject(self.results)
self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m
if(q == m.imdb)
m.showOptions()
});
}
})
// Calculate result heights
var w = window.getSize(),
rc = self.result_container.getCoordinates();
self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px')
self.mask.fade('out')
},
loading: function(bool){
this.el[bool ? 'addClass' : 'removeClass']('loading')
},
q: function(){
return this.input.get('value').trim();
}
});

View File

@@ -47,7 +47,7 @@ config = [{
{
'name': 'ignored_words',
'label': 'Ignored',
'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs',
'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs, vain',
'description': 'Ignores releases that match any of these sets. (Works like explained above)'
},
],

View File

@@ -1,17 +1,11 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.variable import md5, getTitle
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
import datetime
import re
import time
import traceback
log = CPLog(__name__)
@@ -23,7 +17,8 @@ class Searcher(SearcherBase):
addEvent('searcher.contains_other_quality', self.containsOtherQuality)
addEvent('searcher.correct_year', self.correctYear)
addEvent('searcher.correct_name', self.correctName)
addEvent('searcher.download', self.download)
addEvent('searcher.correct_words', self.correctWords)
addEvent('searcher.search', self.search)
addApiView('searcher.full_search', self.searchAllView, docs = {
'desc': 'Starts a full search for all media',
@@ -49,86 +44,21 @@ class Searcher(SearcherBase):
progress = fireEvent('searcher.progress', merge = True)
return progress
def download(self, data, movie, manual = False):
def search(self, protocols, media, quality):
results = []
if not data.get('protocol'):
data['protocol'] = data['type']
data['type'] = 'movie'
for search_protocol in protocols:
protocol_results = fireEvent('provider.search.%s.%s' % (search_protocol, media['type']), media, quality, merge = True)
if protocol_results:
results += protocol_results
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
if downloader_enabled:
download_preference = self.conf('preferred_method', section = 'searcher')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent'))
snatched_status = fireEvent('status.get', 'snatched', single = True)
# Download movie to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
download_result = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
done_status = fireEvent('status.get', 'done', single = True)
rls.status_id = done_status.get('id') if not renamer_enabled else snatched_status.get('id')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark movie done
if not renamer_enabled:
active_status = fireEvent('status.get', 'active', single = True)
done_status = fireEvent('status.get', 'done', single = True)
try:
if movie['status_id'] == active_status.get('id'):
for profile_type in movie['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking movie as finished: %s', log_movie)
# Mark release done
rls.status_id = done_status.get('id')
rls.last_edit = int(time.time())
db.commit()
# Mark movie done
mvie = db.query(Movie).filter_by(id = movie['id']).first()
mvie.status_id = done_status.get('id')
mvie.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())
except:
log.error('Failed marking movie finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol')))
return False
return sorted_results
def getSearchProtocols(self):
@@ -217,7 +147,7 @@ class Searcher(SearcherBase):
except: pass
# Match longest name between []
try: check_names.append(max(check_name.split('['), key = len))
try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip())
except: pass
for check_name in list(set(check_names)):
@@ -234,5 +164,49 @@ class Searcher(SearcherBase):
return False
def correctWords(self, rel_name, media):
media_title = fireEvent('searcher.get_search_title', media, single = True)
media_words = re.split('\W+', simplifyString(media_title))
rel_name = simplifyString(rel_name)
rel_words = re.split('\W+', rel_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = list(set(required_words + splitString(media['category']['required'].lower())))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(rel_words) & set(req))) == len(req)
if len(required_words) > 0 and req_match == 0:
log.info2('Wrong: Required word missing: %s', rel_name)
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = list(set(ignored_words + splitString(media['category']['ignored'].lower())))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored)
if len(ignored_words) > 0 and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", rel_name)
return False
# Ignore porn stuff
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'cock', 'dick']
pron_words = list(set(rel_words) & set(pron_tags) - set(media_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', rel_name)
return False
return True
class SearchSetupError(Exception):
pass

View File

@@ -1,14 +1,13 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt, \
mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Library, LibraryTitle, Movie, \
from couchpotato.core.settings.model import Library, LibraryTitle, Media, \
Release
from couchpotato.environment import Env
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_, asc, not_, desc
from string import ascii_lowercase
@@ -19,14 +18,7 @@ log = CPLog(__name__)
class MovieBase(MovieTypeBase):
default_dict = {
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}, 'files':{}, 'info': {}},
'library': {'titles': {}, 'files':{}},
'files': {},
'status': {},
'category': {},
}
_type = 'movie'
def __init__(self):
@@ -34,17 +26,6 @@ class MovieBase(MovieTypeBase):
super(MovieBase, self).__init__()
self.initType()
addApiView('movie.search', self.search, docs = {
'desc': 'Search the movie providers for a movie',
'params': {
'q': {'desc': 'The (partial) movie name you want to search for'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'movies': array, movies found,
}"""}
})
addApiView('movie.list', self.listView, docs = {
'desc': 'List movies in wanted list',
'params': {
@@ -66,12 +47,6 @@ class MovieBase(MovieTypeBase):
'id': {'desc': 'The id of the movie'},
}
})
addApiView('movie.refresh', self.refresh, docs = {
'desc': 'Refresh a movie by id',
'params': {
'id': {'desc': 'Movie ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('movie.available_chars', self.charView)
addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
@@ -103,34 +78,6 @@ class MovieBase(MovieTypeBase):
addEvent('movie.list', self.list)
addEvent('movie.restatus', self.restatus)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanReleases)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanReleases, hours = 4)
def cleanReleases(self):
log.debug('Removing releases from dashboard')
now = time.time()
week = 262080
done_status, available_status, snatched_status = \
fireEvent('status.get', ['done', 'available', 'snatched'], single = True)
db = get_session()
# get movies last_edit more than a week ago
movies = db.query(Movie) \
.filter(Movie.status_id == done_status.get('id'), Movie.last_edit < (now - week)) \
.all()
for movie in movies:
for rel in movie.releases:
if rel.status_id in [available_status.get('id'), snatched_status.get('id')]:
fireEvent('release.delete', id = rel.id, single = True)
db.expire_all()
def getView(self, id = None, **kwargs):
movie = self.get(id) if id else None
@@ -147,9 +94,9 @@ class MovieBase(MovieTypeBase):
imdb_id = getImdb(str(movie_id))
if imdb_id:
m = db.query(Movie).filter(Movie.library.has(identifier = imdb_id)).first()
m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
else:
m = db.query(Movie).filter_by(id = movie_id).first()
m = db.query(Media).filter_by(id = movie_id).first()
results = None
if m:
@@ -169,20 +116,20 @@ class MovieBase(MovieTypeBase):
release_status = [release_status]
# query movie ids
q = db.query(Movie) \
.with_entities(Movie.id) \
.group_by(Movie.id)
q = db.query(Media) \
.with_entities(Media.id) \
.group_by(Media.id)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Movie.status_id.in_(statuses))
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.join(Movie.releases)
q = q.join(Media.releases)
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
@@ -191,7 +138,7 @@ class MovieBase(MovieTypeBase):
# Only join when searching / ordering
if starts_with or search or order != 'release_order':
q = q.join(Movie.library, Library.titles) \
q = q.join(Media.library, Library.titles) \
.filter(LibraryTitle.default == True)
# Add search filters
@@ -242,13 +189,13 @@ class MovieBase(MovieTypeBase):
releases_count[release.movie_id] += 1
# Get main movie data
q2 = db.query(Movie) \
q2 = db.query(Media) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
q2 = q2.filter(Movie.id.in_(movie_ids))
q2 = q2.filter(Media.id.in_(movie_ids))
results = q2.all()
@@ -291,14 +238,14 @@ class MovieBase(MovieTypeBase):
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
q = db.query(Movie)
q = db.query(Media)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Movie.status_id.in_(statuses))
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
@@ -306,7 +253,7 @@ class MovieBase(MovieTypeBase):
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.join(Movie.releases) \
q = q.join(Media.releases) \
.filter(Release.status_id.in_(statuses))
q = q.join(Library, LibraryTitle) \
@@ -367,47 +314,6 @@ class MovieBase(MovieTypeBase):
'chars': chars,
}
def refresh(self, id = '', **kwargs):
db = get_session()
for x in splitString(id):
movie = db.query(Movie).filter_by(id = x).first()
if movie:
# Get current selected title
default_title = ''
for title in movie.library.titles:
if title.default: default_title = title.title
fireEvent('notify.frontend', type = 'movie.busy.%s' % x, data = True)
fireEventAsync('library.update.movie', identifier = movie.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
db.expire_all()
return {
'success': True,
}
def search(self, q = '', **kwargs):
cache_key = u'%s/%s' % (__name__, simplifyString(q))
movies = Env.get('cache').get(cache_key)
if not movies:
if getImdb(q):
movies = [fireEvent('movie.info', identifier = q, merge = True)]
else:
movies = fireEvent('movie.search', q = q, merge = True)
Env.get('cache').set(cache_key, movies)
return {
'success': True,
'empty': len(movies) == 0 if movies else 0,
'movies': movies,
}
def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None):
if not params: params = {}
@@ -438,12 +344,12 @@ class MovieBase(MovieTypeBase):
cat_id = params.get('category_id')
db = get_session()
m = db.query(Movie).filter_by(library_id = library.get('id')).first()
m = db.query(Media).filter_by(library_id = library.get('id')).first()
added = True
do_search = False
search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
if not m:
m = Movie(
m = Media(
library_id = library.get('id'),
profile_id = params.get('profile_id', default_profile.get('id')),
status_id = status_id if status_id else status_active.get('id'),
@@ -500,15 +406,12 @@ class MovieBase(MovieTypeBase):
db.expire_all()
return movie_dict
def addView(self, **kwargs):
movie_dict = self.add(params = kwargs)
add_dict = self.add(params = kwargs)
return {
'success': True,
'added': True if movie_dict else False,
'movie': movie_dict,
'success': True if add_dict else False,
'movie': add_dict,
}
def edit(self, id = '', **kwargs):
@@ -520,7 +423,7 @@ class MovieBase(MovieTypeBase):
ids = splitString(id)
for movie_id in ids:
m = db.query(Movie).filter_by(id = movie_id).first()
m = db.query(Media).filter_by(id = movie_id).first()
if not m:
continue
@@ -567,7 +470,7 @@ class MovieBase(MovieTypeBase):
db = get_session()
movie = db.query(Movie).filter_by(id = movie_id).first()
movie = db.query(Media).filter_by(id = movie_id).first()
if movie:
deleted = False
if delete_from == 'all':
@@ -617,7 +520,7 @@ class MovieBase(MovieTypeBase):
db = get_session()
m = db.query(Movie).filter_by(id = movie_id).first()
m = db.query(Media).filter_by(id = movie_id).first()
if not m or len(m.library.titles) == 0:
log.debug('Can\'t restatus movie, doesn\'t seem to exist.')
return False
@@ -638,24 +541,3 @@ class MovieBase(MovieTypeBase):
db.commit()
return True
def createOnComplete(self, movie_id):
def onComplete():
db = get_session()
movie = db.query(Movie).filter_by(id = movie_id).first()
fireEventAsync('movie.searcher.single', movie.to_dict(self.default_dict), on_complete = self.createNotifyFront(movie_id))
db.expire_all()
return onComplete
def createNotifyFront(self, movie_id):
def notifyFront():
db = get_session()
movie = db.query(Movie).filter_by(id = movie_id).first()
fireEvent('notify.frontend', type = 'movie.update.%s' % movie.id, data = movie.to_dict(self.default_dict))
db.expire_all()
return notifyFront

View File

@@ -422,7 +422,7 @@ var MovieList = new Class({
var self = this;
var ids = self.getSelectedMovies()
Api.request('movie.refresh', {
Api.request('media.refresh', {
'data': {
'id': ids.join(','),
}

View File

@@ -18,11 +18,13 @@ var MovieAction = new Class({
create: function(){},
disable: function(){
this.el.addClass('disable')
if(this.el)
this.el.addClass('disable')
},
enable: function(){
this.el.removeClass('disable')
if(this.el)
this.el.removeClass('disable')
},
getTitle: function(){
@@ -239,7 +241,6 @@ MA.Release = new Class({
}
})
).inject(self.release_container);
release['el'] = item;
if(status.identifier == 'ignored' || status.identifier == 'failed' || status.identifier == 'snatched'){
@@ -249,13 +250,37 @@ MA.Release = new Class({
else if(!self.next_release && status.identifier == 'available'){
self.next_release = release;
}
var update_handle = function(notification) {
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
status = Status.get(release.status_id),
new_status = Status.get(notification.data);
release.status_id = new_status.id
release.el.set('class', 'item ' + new_status.identifier);
var status_el = release.el.getElement('.release_status');
status_el.set('class', 'release_status ' + new_status.identifier);
status_el.set('text', new_status.identifier);
if(!q && (new_status.identifier == 'snatched' || new_status.identifier == 'seeding' || new_status.identifier == 'done'))
var q = self.addQuality(release.quality_id);
if(new_status && q && !q.hasClass(new_status.identifier)) {
q.removeClass(status.identifier).addClass(new_status.identifier);
q.set('title', q.get('title').replace(status.label, new_status.label));
}
}
App.addEvent('release.update_status.' + release.id, update_handle);
});
if(self.last_release)
self.release_container.getElement('#release_'+self.last_release.id).addClass('last_release');
self.release_container.getElements('#release_'+self.last_release.id).addClass('last_release');
if(self.next_release)
self.release_container.getElement('#release_'+self.next_release.id).addClass('next_release');
self.release_container.getElements('#release_'+self.next_release.id).addClass('next_release');
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
@@ -356,7 +381,7 @@ MA.Release = new Class({
},
get: function(release, type){
return release.info[type] || 'n/a'
return release.info[type] !== undefined ? release.info[type] : 'n/a'
},
download: function(release){
@@ -365,21 +390,25 @@ MA.Release = new Class({
var release_el = self.release_container.getElement('#release_'+release.id),
icon = release_el.getElement('.download.icon2');
icon.addClass('icon spinner').removeClass('download');
if(icon)
icon.addClass('icon spinner').removeClass('download');
Api.request('release.download', {
Api.request('release.manual_download', {
'data': {
'id': release.id
},
'onComplete': function(json){
icon.removeClass('icon spinner');
if(icon)
icon.removeClass('icon spinner');
if(json.success){
icon.addClass('completed');
if(icon)
icon.addClass('completed');
release_el.getElement('.release_status').set('text', 'snatched');
}
else
icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.');
if(icon)
icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.');
}
});
},
@@ -391,17 +420,6 @@ MA.Release = new Class({
'data': {
'id': release.id
},
'onComplete': function(){
var el = release.el;
if(el.hasClass('failed') || el.hasClass('ignored')){
el.removeClass('failed').removeClass('ignored');
el.getElement('.release_status').set('text', 'available');
}
else {
el.addClass('ignored');
el.getElement('.release_status').set('text', 'ignored');
}
}
})
},
@@ -688,7 +706,7 @@ MA.Refresh = new Class({
var self = this;
(e).preventDefault();
Api.request('movie.refresh', {
Api.request('media.refresh', {
'data': {
'id': self.movie.get('id')
}

View File

@@ -419,22 +419,25 @@
}
.movies .data .quality .available,
.movies .data .quality .snatched {
.movies .data .quality .snatched,
.movies .data .quality .seeding {
opacity: 1;
cursor: pointer;
}
.movies .data .quality .available { background-color: #578bc3; }
.movies .data .quality .failed { background-color: #a43d34; }
.movies .data .quality .failed,
.movies .data .quality .missing,
.movies .data .quality .ignored { background-color: #a43d34; }
.movies .data .quality .snatched { background-color: #a2a232; }
.movies .data .quality .seeding { background-color: #0a6819; }
.movies .data .quality .done {
background-color: #369545;
opacity: 1;
}
.movies .data .quality .seeding { background-color: #0a6819; }
.movies .data .quality .finish {
background-image: url('../images/sprite.png');
background-repeat: no-repeat;
background-image: url('../../images/sprite.png');
background-repeat: no-repeat;
background-position: 0 2px;
padding-left: 14px;
background-size: 14px
@@ -646,7 +649,7 @@
margin-top: 25px;
}
}
.trailer_container.hide {
height: 0 !important;
}
@@ -989,7 +992,7 @@
}
.movies .empty_wanted {
background-image: url('../images/emptylist.png');
background-image: url('../../images/emptylist.png');
background-position: 80% 0;
height: 750px;
width: 100%;
@@ -1029,7 +1032,7 @@
.movies .progress > div .folder {
display: inline-block;
padding: 5px 20px 5px 0;
white-space: nowrap;
white-space: nowrap;
text-overflow: ellipsis;
overflow: hidden;
width: 85%;

View File

@@ -181,18 +181,18 @@ var Movie = new Class({
// Add releases
if(self.data.releases)
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'done'))
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
Object.each(self.options.actions, function(action, key){
@@ -256,7 +256,8 @@ var Movie = new Class({
self.el.removeEvents('outerClick')
setTimeout(function(){
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
if(self.el)
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
}, 600);
self.data_container.removeClass('hide_right');
@@ -266,9 +267,10 @@ var Movie = new Class({
changeView: function(new_view){
var self = this;
self.el
.removeClass(self.view+'_view')
.addClass(new_view+'_view')
if(self.el)
self.el
.removeClass(self.view+'_view')
.addClass(new_view+'_view')
self.view = new_view;
},

View File

@@ -1,189 +1,4 @@
Block.Search = new Class({
Extends: BlockBase,
cache: {},
create: function(){
var self = this;
var focus_timer = 0;
self.el = new Element('div.search_form').adopt(
new Element('div.input').adopt(
self.input = new Element('input', {
'placeholder': 'Search & add a new movie',
'events': {
'keyup': self.keyup.bind(self),
'focus': function(){
if(focus_timer) clearTimeout(focus_timer);
self.el.addClass('focused')
if(this.get('value'))
self.hideResults(false)
},
'blur': function(){
focus_timer = (function(){
self.el.removeClass('focused')
}).delay(100);
}
}
}),
new Element('a.icon2', {
'events': {
'click': self.clear.bind(self),
'touchend': self.clear.bind(self)
}
})
),
self.result_container = new Element('div.results_container', {
'tween': {
'duration': 200
},
'events': {
'mousewheel': function(e){
(e).stopPropagation();
}
}
}).adopt(
self.results = new Element('div.results')
)
);
self.mask = new Element('div.mask').inject(self.result_container).fade('hide');
},
clear: function(e){
var self = this;
(e).preventDefault();
if(self.last_q === ''){
self.input.blur()
self.last_q = null;
}
else {
self.last_q = '';
self.input.set('value', '');
self.input.focus()
self.movies = []
self.results.empty()
self.el.removeClass('filled')
}
},
hideResults: function(bool){
var self = this;
if(self.hidden == bool) return;
self.el[bool ? 'removeClass' : 'addClass']('shown');
if(bool){
History.removeEvent('change', self.hideResults.bind(self, !bool));
self.el.removeEvent('outerClick', self.hideResults.bind(self, !bool));
}
else {
History.addEvent('change', self.hideResults.bind(self, !bool));
self.el.addEvent('outerClick', self.hideResults.bind(self, !bool));
}
self.hidden = bool;
},
keyup: function(e){
var self = this;
self.el[self.q() ? 'addClass' : 'removeClass']('filled')
if(self.q() != self.last_q){
if(self.api_request && self.api_request.isRunning())
self.api_request.cancel();
if(self.autocomplete_timer) clearTimeout(self.autocomplete_timer)
self.autocomplete_timer = self.autocomplete.delay(300, self)
}
},
autocomplete: function(){
var self = this;
if(!self.q()){
self.hideResults(true)
return
}
self.list()
},
list: function(){
var self = this,
q = self.q(),
cache = self.cache[q];
self.hideResults(false);
if(!cache){
self.mask.fade('in');
if(!self.spinner)
self.spinner = createSpinner(self.mask);
self.api_request = Api.request('movie.search', {
'data': {
'q': q
},
'onComplete': self.fill.bind(self, q)
})
}
else
self.fill(q, cache)
self.last_q = q;
},
fill: function(q, json){
var self = this;
self.cache[q] = json
self.movies = {}
self.results.empty()
Object.each(json.movies, function(movie){
var m = new Block.Search.Item(movie);
$(m).inject(self.results)
self.movies[movie.imdb || 'r-'+Math.floor(Math.random()*10000)] = m
if(q == movie.imdb)
m.showOptions()
});
// Calculate result heights
var w = window.getSize(),
rc = self.result_container.getCoordinates();
self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px')
self.mask.fade('out')
},
loading: function(bool){
this.el[bool ? 'addClass' : 'removeClass']('loading')
},
q: function(){
return this.input.get('value').trim();
}
});
Block.Search.Item = new Class({
Block.Search.MovieItem = new Class({
Implements: [Options, Events],
@@ -201,7 +16,7 @@ Block.Search.Item = new Class({
var self = this,
info = self.info;
self.el = new Element('div.movie_result', {
self.el = new Element('div.media_result', {
'id': info.imdb
}).adopt(
self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', {
@@ -292,7 +107,7 @@ Block.Search.Item = new Class({
self.options_el.empty();
self.options_el.adopt(
new Element('div.message', {
'text': json.added ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs'
'text': json.success ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs'
})
);
self.mask.fade('out');

View File

@@ -151,7 +151,7 @@ class MovieLibraryPlugin(LibraryBase):
else:
dates = library.info.get('release_date')
if dates and dates.get('expires', 0) < time.time() or not dates:
if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
library.info.update({'release_date': dates })
db.commit()

View File

@@ -1,16 +1,14 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
from couchpotato.core.helpers.variable import md5, getTitle, splitString, \
possibleTitles, getImdb
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
from couchpotato.core.settings.model import Media, Release
from couchpotato.environment import Env
from datetime import date
from sqlalchemy.exc import InterfaceError
import random
import re
import time
@@ -29,9 +27,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
addEvent('movie.searcher.all', self.searchAll)
addEvent('movie.searcher.all_view', self.searchAllView)
addEvent('movie.searcher.single', self.single)
addEvent('movie.searcher.correct_movie', self.correctMovie)
addEvent('movie.searcher.try_next_release', self.tryNextRelease)
addEvent('movie.searcher.could_be_released', self.couldBeReleased)
addEvent('searcher.correct_release', self.correctRelease)
addEvent('searcher.get_search_title', self.getSearchTitle)
addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
@@ -74,8 +73,8 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
db = get_session()
movies = db.query(Movie).filter(
Movie.status.has(identifier = 'active')
movies = db.query(Media).filter(
Media.status.has(identifier = 'active')
).all()
random.shuffle(movies)
@@ -117,6 +116,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
def single(self, movie, search_protocols = None, manual = False):
# movies don't contain 'type' yet, so just set to default here
if not movie.has_key('type'):
movie['type'] = 'movie'
# Find out search type
try:
if not search_protocols:
@@ -167,82 +170,20 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.info('Search for %s in %s', (default_title, quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = []
for search_protocol in search_protocols:
protocol_results = fireEvent('provider.search.%s.movie' % search_protocol, movie, quality, merge = True)
if protocol_results:
results += protocol_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
if len(sorted_results) == 0:
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
download_preference = self.conf('preferred_method', section = 'searcher')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent'))
# Check if movie isn't deleted while searching
if not db.query(Movie).filter_by(id = movie.get('id')).first():
if not db.query(Media).filter_by(id = movie.get('id')).first():
break
# Add them to this movie releases list
for nzb in sorted_results:
found_releases += fireEvent('release.create_from_search', results, movie, quality_type, single = True)
nzb_identifier = md5(nzb['url'])
found_releases.append(nzb_identifier)
rls = db.query(Release).filter_by(identifier = nzb_identifier).first()
if not rls:
rls = Release(
identifier = nzb_identifier,
movie_id = movie.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
db.add(rls)
else:
[db.delete(old_info) for old_info in rls.info]
rls.last_edit = int(time.time())
db.commit()
for info in nzb:
try:
if not isinstance(nzb[info], (str, unicode, int, long, float)):
continue
rls_info = ReleaseInfo(
identifier = info,
value = toUnicode(nzb[info])
)
rls.info.append(rls_info)
except InterfaceError:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.commit()
nzb['status_id'] = rls.status_id
for nzb in sorted_results:
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and nzb.get('age') <= quality_type.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), nzb['name']))
continue
if nzb['status_id'] in [ignored_status.get('id'), failed_status.get('id')]:
log.info('Ignored: %s', nzb['name'])
continue
if nzb['score'] <= 0:
log.info('Ignored, score to low: %s', nzb['name'])
continue
downloaded = fireEvent('searcher.download', data = nzb, movie = movie, manual = manual, single = True)
if downloaded is True:
ret = True
break
elif downloaded != 'try_next':
break
# Try find a valid result and download it
if fireEvent('release.try_download_result', results, movie, quality_type, manual, single = True):
ret = True
# Remove releases that aren't found anymore
for release in movie.get('releases', []):
@@ -265,7 +206,11 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return ret
def correctMovie(self, nzb = None, movie = None, quality = None, **kwargs):
def correctRelease(self, nzb = None, media = None, quality = None, **kwargs):
if media.get('type') != 'movie': return
media_title = fireEvent('searcher.get_search_title', media, single = True)
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
@@ -274,50 +219,14 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
return False
movie_name = getTitle(movie['library'])
movie_words = re.split('\W+', simplifyString(movie_name))
nzb_name = simplifyString(nzb['name'])
nzb_words = re.split('\W+', nzb_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = list(set(required_words + splitString(movie['category']['required'].lower())))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(nzb_words) & set(req))) == len(req)
if len(required_words) > 0 and req_match == 0:
log.info2('Wrong: Required word missing: %s', nzb['name'])
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = list(set(ignored_words + splitString(movie['category']['ignored'].lower())))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(nzb_words) & set(ignored))) == len(ignored)
if len(ignored_words) > 0 and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name']))
return False
# Ignore porn stuff
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'cock', 'dick']
pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', (nzb['name']))
# Check for required and ignored words
if not fireEvent('searcher.correct_words', nzb['name'], media, single = True):
return False
preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
if fireEvent('searcher.contains_other_quality', nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality, single = True):
if fireEvent('searcher.contains_other_quality', nzb, movie_year = media['library']['year'], preferred_quality = preferred_quality, single = True):
log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label']))
return False
@@ -347,23 +256,23 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return True
# Check if nzb contains imdb link
if getImdb(nzb.get('description', '')) == movie['library']['identifier']:
if getImdb(nzb.get('description', '')) == media['library']['identifier']:
return True
for raw_title in movie['library']['titles']:
for raw_title in media['library']['titles']:
for movie_title in possibleTitles(raw_title['title']):
movie_words = re.split('\W+', simplifyString(movie_title))
if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], movie['library']['year'], 1, single = True):
if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 1, single = True):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], movie['library']['year'], 0, single = True):
if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 0, single = True):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], movie_name, movie['library']['year']))
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['library']['year']))
return False
def couldBeReleased(self, is_pre_release, dates, year = None):
@@ -434,5 +343,9 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
def getSearchTitle(self, media):
if media['type'] == 'movie':
return getTitle(media['library'])
class SearchSetupError(Exception):
pass

View File

@@ -3,7 +3,7 @@ from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie, Library
from couchpotato.core.settings.model import Media, Library
from couchpotato.environment import Env
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_
@@ -29,9 +29,9 @@ class Suggestion(Plugin):
if not movies or len(movies) == 0:
db = get_session()
active_movies = db.query(Movie) \
active_movies = db.query(Media) \
.options(joinedload_all('library')) \
.filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
.filter(or_(*[Media.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
if not ignored or len(ignored) == 0:
@@ -89,10 +89,10 @@ class Suggestion(Plugin):
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
db = get_session()
active_movies = db.query(Movie) \
active_movies = db.query(Media) \
.join(Library) \
.with_entities(Library.identifier) \
.filter(Movie.status_id.in_([active_status.get('id'), done_status.get('id')])).all()
.filter(Media.status_id.in_([active_status.get('id'), done_status.get('id')])).all()
movies = [x[0] for x in active_movies]
movies.extend(seen)
@@ -102,6 +102,6 @@ class Suggestion(Plugin):
if suggestions:
new_suggestions.extend(suggestions)
self.setCache('suggestion_cached', new_suggestions, timeout = 6048000)
self.setCache('suggestion_cached', new_suggestions, timeout = 3024000)
return new_suggestions

View File

@@ -0,0 +1,160 @@
.suggestions {
}
.suggestions > h2 {
height: 40px;
}
.suggestions .media_result {
display: inline-block;
width: 33.333%;
height: 150px;
}
@media all and (max-width: 960px) {
.suggestions .media_result {
width: 50%;
}
}
@media all and (max-width: 600px) {
.suggestions .media_result {
width: 100%;
}
}
.suggestions .media_result .data {
left: 100px;
background: #4e5969;
border: none;
}
.suggestions .media_result .data .info {
top: 10px;
left: 15px;
right: 15px;
bottom: 10px;
overflow: hidden;
}
.suggestions .media_result .data .info h2 {
white-space: normal;
max-height: 120px;
font-size: 18px;
line-height: 18px;
}
.suggestions .media_result .data .info .rating,
.suggestions .media_result .data .info .genres,
.suggestions .media_result .data .info .year {
position: static;
display: block;
padding: 0;
opacity: .6;
}
.suggestions .media_result .data .info .year {
margin: 10px 0 0;
}
.suggestions .media_result .data .info .rating {
font-size: 20px;
float: right;
margin-top: -20px;
}
.suggestions .media_result .data .info .rating:before {
content: "\e031";
font-family: 'Elusive-Icons';
font-size: 14px;
margin: 0 5px 0 0;
vertical-align: bottom;
}
.suggestions .media_result .data .info .genres {
font-size: 11px;
font-style: italic;
text-align: right;
}
.suggestions .media_result .data .info .plot {
display: block;
font-size: 11px;
overflow: hidden;
text-align: justify;
height: 100%;
z-index: 2;
top: 64px;
position: absolute;
background: #4e5969;
cursor: pointer;
transition: all .4s ease-in-out;
padding: 0 3px 10px 0;
}
.suggestions .media_result .data:before {
bottom: 0;
content: '';
display: block;
height: 10px;
right: 0;
left: 0;
bottom: 10px;
position: absolute;
background: linear-gradient(
0deg,
rgba(78, 89, 105, 1) 0%,
rgba(78, 89, 105, 0) 100%
);
z-index: 3;
pointer-events: none;
}
.suggestions .media_result .data .info .plot.full {
top: 0;
overflow: auto;
}
.suggestions .media_result .data {
cursor: default;
}
.suggestions .media_result .options {
left: 100px;
}
.suggestions .media_result .options select[name=title] { width: 100%; }
.suggestions .media_result .options select[name=profile] { width: 100%; }
.suggestions .media_result .options select[name=category] { width: 100%; }
.suggestions .media_result .button {
position: absolute;
margin: 2px 0 0 0;
right: 15px;
bottom: 15px;
}
.suggestions .media_result .thumbnail {
width: 100px;
}
.suggestions .media_result .actions {
position: absolute;
top: 10px;
right: 10px;
display: none;
width: 140px;
}
.suggestions .media_result:hover .actions {
display: block;
}
.suggestions .media_result:hover h2 .title {
opacity: 0;
}
.suggestions .media_result .data.open .actions {
display: none;
}
.suggestions .media_result .actions a {
margin-left: 10px;
vertical-align: middle;
}

View File

@@ -0,0 +1,153 @@
var SuggestList = new Class({
Implements: [Options, Events],
initialize: function(options){
var self = this;
self.setOptions(options);
self.create();
},
create: function(){
var self = this;
self.el = new Element('div.suggestions', {
'events': {
'click:relay(a.delete)': function(e, el){
(e).stop();
$(el).getParent('.media_result').destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': el.get('data-ignore')
},
'onComplete': self.fill.bind(self)
});
},
'click:relay(a.eye-open)': function(e, el){
(e).stop();
$(el).getParent('.media_result').destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': el.get('data-seen'),
'mark_seen': 1
},
'onComplete': self.fill.bind(self)
});
}
}
}).grab(
new Element('h2', {
'text': 'You might like these'
})
);
self.api_request = Api.request('suggestion.view', {
'onComplete': self.fill.bind(self)
});
},
fill: function(json){
var self = this;
if(!json || json.count == 0){
self.el.hide();
}
else {
Object.each(json.suggestions, function(movie){
var m = new Block.Search.MovieItem(movie, {
'onAdded': function(){
self.afterAdded(m, movie)
}
});
m.data_container.grab(
new Element('div.actions').adopt(
new Element('a.add.icon2', {
'title': 'Add movie with your default quality',
'data-add': movie.imdb,
'events': {
'click': m.showOptions.bind(m)
}
}),
$(new MA.IMDB(m)),
$(new MA.Trailer(m, {
'height': 150
})),
new Element('a.delete.icon2', {
'title': 'Don\'t suggest this movie again',
'data-ignore': movie.imdb
}),
new Element('a.eye-open.icon2', {
'title': 'Seen it, like it, don\'t add',
'data-seen': movie.imdb
})
)
);
m.data_container.removeEvents('click');
var plot = false;
if(m.info.plot && m.info.plot.length > 0)
plot = m.info.plot;
// Add rating
m.info_container.adopt(
m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', {
'text': parseFloat(m.info.rating.imdb[0]),
'title': parseInt(m.info.rating.imdb[1]) + ' votes'
}) : null,
m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', {
'text': m.info.genres.slice(0, 3).join(', ')
}) : null,
m.plot = plot ? new Element('span.plot', {
'text': plot,
'events': {
'click': function(){
this.toggleClass('full')
}
}
}) : null
)
$(m).inject(self.el);
});
}
self.fireEvent('loaded');
},
afterAdded: function(m, movie){
var self = this;
setTimeout(function(){
$(m).destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': movie.imdb,
'remove_only': true
},
'onComplete': self.fill.bind(self)
});
}, 3000);
},
toElement: function(){
return this.el;
}
})

View File

@@ -1,4 +1,4 @@
config = {
config = [{
'name': 'notification_providers',
'groups': [
{
@@ -10,4 +10,4 @@ config = {
'options': [],
},
],
}
}]

View File

@@ -198,13 +198,16 @@ class CoreNotifier(Notification):
def removeListener(self, callback):
self.m_lock.acquire()
new_listeners = []
for list_tuple in self.listeners:
try:
listener, last_id = list_tuple
if listener == callback:
self.listeners.remove(list_tuple)
if listener != callback:
new_listeners.append(list_tuple)
except:
log.debug('Failed removing listener: %s', traceback.format_exc())
self.listeners = new_listeners
self.m_lock.release()
def cleanMessages(self):

View File

@@ -28,12 +28,23 @@ config = [{
'name': 'smtp_server',
'label': 'SMTP server',
},
{ 'name': 'smtp_port',
'label': 'SMTP server port',
'default': '25',
'type': 'int',
},
{
'name': 'ssl',
'label': 'Enable SSL',
'default': 0,
'type': 'bool',
},
{
'name': 'starttls',
'label': 'Enable StartTLS',
'default': 0,
'type': 'bool',
},
{
'name': 'smtp_user',
'label': 'SMTP user',

View File

@@ -2,6 +2,7 @@ from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.environment import Env
from email.mime.text import MIMEText
import smtplib
import traceback
@@ -21,18 +22,28 @@ class Email(Notification):
smtp_server = self.conf('smtp_server')
smtp_user = self.conf('smtp_user')
smtp_pass = self.conf('smtp_pass')
smtp_port = self.conf('smtp_port')
starttls = self.conf('starttls')
# Make the basic message
message = MIMEText(toUnicode(message))
message = MIMEText(toUnicode(message), _charset = Env.get('encoding'))
message['Subject'] = self.default_title
message['From'] = from_address
message['To'] = to_address
try:
# Open the SMTP connection, via SSL if requested
log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port))
log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled"))
mailserver = smtplib.SMTP_SSL(smtp_server) if ssl == 1 else smtplib.SMTP(smtp_server)
if (starttls):
log.debug("Using StartTLS to initiate the connection with the SMTP server")
mailserver.starttls()
# Say hello to the server
mailserver.ehlo()
# Check too see if an login attempt should be attempted
if len(smtp_user) > 0:
log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else ""))

View File

@@ -1,36 +0,0 @@
from .main import Notifo
def start():
return Notifo()
config = [{
'name': 'notifo',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifo',
'description': 'Keep in mind that Notifo service will end soon.',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'username',
},
{
'name': 'api_key',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

View File

@@ -1,39 +0,0 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import base64
import json
import traceback
log = CPLog(__name__)
class Notifo(Notification):
url = 'https://api.notifo.com/v1/send_notification'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
params = {
'label': self.default_title,
'msg': toUnicode(message),
}
headers = {
'Authorization': "Basic %s" % base64.encodestring('%s:%s' % (self.conf('username'), self.conf('api_key')))[:-1]
}
handle = self.urlopen(self.url, params = params, headers = headers)
result = json.loads(handle)
if result['status'] != 'success' or result['response_message'] != 'OK':
raise Exception
except:
log.error('Notification failed: %s', traceback.format_exc())
return False
log.info('Notifo notification successful.')
return True

11
couchpotato/core/notifications/plex/__init__.py Normal file → Executable file
View File

@@ -17,10 +17,15 @@ config = [{
'type': 'enabler',
},
{
'name': 'host',
'name': 'media_server',
'label': 'Media Server',
'default': 'localhost',
'description': 'Default should be on localhost',
'advanced': True,
'description': 'Hostname/IP, default localhost'
},
{
'name': 'clients',
'default': '',
'description': 'Comma separated list of client names\'s (computer names). Top right when you start Plex'
},
{
'name': 'on_snatch',

View File

@@ -0,0 +1,85 @@
import json
from couchpotato import CPLog
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
import requests
log = CPLog(__name__)
class PlexClientProtocol(object):
def __init__(self, plex):
self.plex = plex
addEvent('notify.plex.notifyClient', self.notify)
def notify(self, client, message):
raise NotImplementedError()
class PlexClientHTTP(PlexClientProtocol):
def request(self, command, client):
url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % (
client['address'],
client['port'],
tryUrlencode(command)
)
headers = {}
try:
self.plex.urlopen(url, headers = headers, timeout = 3, show_error = False)
except Exception, err:
log.error("Couldn't sent command to Plex: %s", err)
return False
return True
def notify(self, client, message):
if client.get('protocol') != 'xbmchttp':
return None
data = {
'command': 'ExecBuiltIn',
'parameter': 'Notification(CouchPotato, %s)' % message
}
return self.request(data, client)
class PlexClientJSON(PlexClientProtocol):
def request(self, method, params, client):
log.debug('sendJSON("%s", %s, %s)', (method, params, client))
url = 'http://%s:%s/jsonrpc' % (
client['address'],
client['port']
)
headers = {
'Content-Type': 'application/json'
}
request = {
'id': 1,
'jsonrpc': '2.0',
'method': method,
'params': params
}
try:
requests.post(url, headers = headers, timeout = 3, data = json.dumps(request))
except Exception, err:
log.error("Couldn't sent command to Plex: %s", err)
return False
return True
def notify(self, client, message):
if client.get('protocol') not in ['xbmcjson', 'plex']:
return None
params = {
'title': 'CouchPotato',
'message': message
}
return self.request('GUI.ShowNotification', params, client)

114
couchpotato/core/notifications/plex/main.py Normal file → Executable file
View File

@@ -1,78 +1,64 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from urllib2 import URLError
from urlparse import urlparse
from xml.dom import minidom
import traceback
from .client import PlexClientHTTP, PlexClientJSON
from .server import PlexServer
log = CPLog(__name__)
class Plex(Notification):
http_time_between_calls = 0
def __init__(self):
super(Plex, self).__init__()
self.server = PlexServer(self)
self.client_protocols = {
'http': PlexClientHTTP(self),
'json': PlexClientJSON(self)
}
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = None):
def addToLibrary(self, message = None, group = {}):
if self.isDisabled(): return
if not group: group = {}
log.info('Sending notification to Plex')
hosts = self.getHosts(port = 32400)
return self.server.refresh()
for host in hosts:
def getClientNames(self):
return [
x.strip().lower()
for x in self.conf('clients').split(',')
]
source_type = ['movie']
base_url = '%s/library/sections' % host
refresh_url = '%s/%%s/refresh' % base_url
def notifyClients(self, message, client_names):
success = True
try:
sections_xml = self.urlopen(base_url)
xml_sections = minidom.parseString(sections_xml)
sections = xml_sections.getElementsByTagName('Directory')
for client_name in client_names:
for s in sections:
if s.getAttribute('type') in source_type:
url = refresh_url % s.getAttribute('key')
self.urlopen(url)
client_success = False
client = self.server.clients.get(client_name)
except:
log.error('Plex library update failed for %s, Media Server not running: %s', (host, traceback.format_exc(1)))
return False
if client and client['found']:
client_success = fireEvent('notify.plex.notifyClient', client, message, single = True)
return True
if not client_success:
if self.server.staleClients() or not client:
log.info('Failed to send notification to client "%s". '
'Client list is stale, updating the client list and retrying.', client_name)
self.server.updateClients(self.getClientNames())
else:
log.warning('Failed to send notification to client %s, skipping this time', client_name)
success = False
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
return success
hosts = self.getHosts(port = 3000)
successful = 0
for host in hosts:
if self.send({'command': 'ExecBuiltIn', 'parameter': 'Notification(CouchPotato, %s)' % message}, host):
successful += 1
return successful == len(hosts)
def send(self, command, host):
url = '%s/xbmcCmds/xbmcHttp/?%s' % (host, tryUrlencode(command))
headers = {}
try:
self.urlopen(url, headers = headers, show_error = False)
except URLError:
log.error("Couldn't sent command to Plex, probably just running Media Server")
return False
except:
log.error("Couldn't sent command to Plex: %s", traceback.format_exc())
return False
log.info('Plex notification to %s successful.', host)
return True
def notify(self, message = '', data = {}, listener = None):
return self.notifyClients(message, self.getClientNames())
def test(self, **kwargs):
@@ -80,28 +66,12 @@ class Plex(Notification):
log.info('Sending test to %s', test_type)
success = self.notify(
notify_success = self.notify(
message = self.test_message,
data = {},
listener = 'test'
)
success2 = self.addToLibrary()
return {
'success': success or success2
}
refresh_success = self.addToLibrary()
def getHosts(self, port = None):
raw_hosts = splitString(self.conf('host'))
hosts = []
for h in raw_hosts:
h = cleanHost(h)
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
hosts.append(h)
return hosts
return {'success': notify_success or refresh_success}

View File

@@ -0,0 +1,114 @@
from datetime import timedelta, datetime
from couchpotato.core.helpers.variable import cleanHost
from couchpotato import CPLog
from urlparse import urlparse
import traceback
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__)
class PlexServer(object):
def __init__(self, plex):
self.plex = plex
self.clients = {}
self.last_clients_update = None
def staleClients(self):
if not self.last_clients_update:
return True
return self.last_clients_update + timedelta(minutes=15) < datetime.now()
def request(self, path, data_type='xml'):
if not self.plex.conf('media_server'):
log.warning("Plex media server hostname is required")
return None
if path.startswith('/'):
path = path[1:]
data = self.plex.urlopen('%s/%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path
))
if data_type == 'xml':
return etree.fromstring(data)
else:
return data
def updateClients(self, client_names):
log.info('Searching for clients on Plex Media Server')
self.clients = {}
result = self.request('clients')
if not result:
return
found_clients = [
c for c in result.findall('Server')
if c.get('name') and c.get('name').lower() in client_names
]
# Store client details in cache
for client in found_clients:
name = client.get('name').lower()
self.clients[name] = {
'name': client.get('name'),
'found': True,
'address': client.get('address'),
'port': client.get('port'),
'protocol': client.get('protocol', 'xbmchttp')
}
client_names.remove(name)
# Store dummy info for missing clients
for client_name in client_names:
self.clients[client_name] = {
'found': False
}
if len(client_names) > 0:
log.debug('Unable to find clients: %s', ', '.join(client_names))
self.last_clients_update = datetime.now()
def refresh(self, section_types=None):
if not section_types:
section_types = ['movie']
sections = self.request('library/sections')
try:
for section in sections.findall('Directory'):
if section.get('type') not in section_types:
continue
self.request('library/sections/%s/refresh' % section.get('key'), 'text')
except:
log.error('Plex library update failed for %s, Media Server not running: %s',
(self.plex.conf('media_server'), traceback.format_exc(1)))
return False
return True
def createHost(self, host, port = None):
h = cleanHost(host)
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
return h

View File

@@ -0,0 +1,52 @@
from .main import Xmpp
def start():
return Xmpp()
config = [{
'name': 'xmpp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'xmpp',
'label': 'XMPP',
'description`': 'for Jabber, Hangouts (Google Talk), AIM...',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'username',
'description': 'User sending the message. For Hangouts, e-mail of a single-step authentication Google account.',
},
{
'name': 'password',
'type': 'Password',
},
{
'name': 'hostname',
'default': 'talk.google.com',
},
{
'name': 'to',
'description': 'Username (or e-mail for Hangouts) of the person to send the messages to.',
},
{
'name': 'port',
'type': 'int',
'default': 5222,
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

View File

@@ -0,0 +1,43 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from time import sleep
import traceback
import xmpp
log = CPLog(__name__)
class Xmpp(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
jid = xmpp.protocol.JID(self.conf('username'))
client = xmpp.Client(jid.getDomain(), debug = [])
# Connect
if not client.connect(server = (self.conf('hostname'), self.conf('port'))):
log.error('XMPP failed: Connection to server failed.')
return False
# Authenticate
if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()):
log.error('XMPP failed: Failed to authenticate.')
return False
# Send message
client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat'))
# Disconnect
# some older servers will not send the message if you disconnect immediately after sending
sleep(1)
client.disconnect()
log.info('XMPP notifications sent.')
return True
except:
log.error('XMPP failed: %s', traceback.format_exc())
return False

View File

@@ -1,7 +1,7 @@
from StringIO import StringIO
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \
toUnicode
toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
@@ -121,7 +121,7 @@ class Plugin(object):
# http request
def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True):
url = ss(url)
url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not params: params = {}
@@ -291,10 +291,10 @@ class Plugin(object):
def createNzbName(self, data, movie):
tag = self.cpTag(movie)
return '%s%s' % (toSafeString(data.get('name')[:127 - len(tag)]), tag)
return '%s%s' % (toSafeString(toUnicode(data.get('name'))[:127 - len(tag)]), tag)
def createFileName(self, data, filedata, movie):
name = os.path.join(self.createNzbName(data, movie))
name = sp(os.path.join(self.createNzbName(data, movie)))
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))

View File

@@ -4,7 +4,7 @@ from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie, Category
from couchpotato.core.settings.model import Media, Category
log = CPLog(__name__)
@@ -113,7 +113,7 @@ class CategoryPlugin(Plugin):
def removeFromMovie(self, category_id):
db = get_session()
movies = db.query(Movie).filter(Movie.category_id == category_id).all()
movies = db.query(Media).filter(Media.category_id == category_id).all()
if len(movies) > 0:
for movie in movies:

View File

@@ -0,0 +1,6 @@
from .main import Custom
def start():
return Custom()
config = []

View File

@@ -0,0 +1,21 @@
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
import os
log = CPLog(__name__)
class Custom(Plugin):
def __init__(self):
addEvent('app.load', self.createStructure)
def createStructure(self):
custom_dir = os.path.join(Env.get('data_dir'), 'custom_plugins')
if not os.path.isdir(custom_dir):
self.makeDir(custom_dir)
self.createFile(os.path.join(custom_dir, '__init__.py'), '# Don\'t remove this file')

View File

@@ -4,9 +4,10 @@ from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie, Library, LibraryTitle
from couchpotato.core.settings.model import Media, Library, LibraryTitle, \
Release
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import asc, or_
import random as rndm
import time
@@ -48,12 +49,14 @@ class Dashboard(Plugin):
limit = tryInt(splt[0])
# Get all active movies
active_status = fireEvent('status.get', ['active'], single = True)
q = db.query(Movie) \
active_status, ignored_status = fireEvent('status.get', ['active', 'ignored'], single = True)
q = db.query(Media) \
.join(Library) \
.filter(Movie.status_id == active_status.get('id')) \
.with_entities(Movie.id, Movie.profile_id, Library.info, Library.year) \
.group_by(Movie.id)
.outerjoin(Media.releases) \
.filter(Media.status_id == active_status.get('id')) \
.with_entities(Media.id, Media.profile_id, Library.info, Library.year) \
.group_by(Media.id) \
.filter(or_(Release.id == None, Release.status_id == ignored_status.get('id')))
if not random:
q = q.join(LibraryTitle) \
@@ -98,11 +101,11 @@ class Dashboard(Plugin):
if len(movie_ids) > 0:
# Get all movie information
movies_raw = db.query(Movie) \
movies_raw = db.query(Media) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('files')) \
.filter(Movie.id.in_(movie_ids)) \
.filter(Media.id.in_(movie_ids)) \
.all()
# Create dict by movie id

View File

@@ -222,9 +222,10 @@ class Manage(Plugin):
groups = fireEvent('scanner.scan', folder = folder, files = files, single = True)
for group in groups.itervalues():
if group['library'] and group['library'].get('identifier'):
fireEvent('release.add', group = group)
if groups:
for group in groups.itervalues():
if group['library'] and group['library'].get('identifier'):
fireEvent('release.add', group = group)
def getDiskSpace(self):

View File

@@ -4,7 +4,7 @@ from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Profile, ProfileType, Movie
from couchpotato.core.settings.model import Profile, ProfileType, Media
from sqlalchemy.orm import joinedload_all
log = CPLog(__name__)
@@ -38,7 +38,7 @@ class ProfilePlugin(Plugin):
active_status = fireEvent('status.get', 'active', single = True)
db = get_session()
movies = db.query(Movie).filter(Movie.status_id == active_status.get('id'), Movie.profile == None).all()
movies = db.query(Media).filter(Media.status_id == active_status.get('id'), Media.profile == None).all()
if len(movies) > 0:
default_profile = self.default()

View File

@@ -1,7 +1,7 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -17,12 +17,12 @@ class QualityPlugin(Plugin):
qualities = [
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': '1080p', 'hd': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']},
{'identifier': '720p', 'hd': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
{'identifier': '1080p', 'hd': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts', 'x264', 'h264']},
{'identifier': '720p', 'hd': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':['avi'], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd'], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r')]},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg'], 'tags': ['webrip', ('web', 'rip')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':['avi', 'mpg', 'mpeg'], 'tags': ['webrip', ('web', 'rip')]},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
@@ -30,6 +30,9 @@ class QualityPlugin(Plugin):
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
cached_qualities = None
cached_order = None
def __init__(self):
addEvent('quality.all', self.all)
addEvent('quality.single', self.single)
@@ -59,6 +62,9 @@ class QualityPlugin(Plugin):
def all(self):
if self.cached_qualities:
return self.cached_qualities
db = get_session()
qualities = db.query(Quality).all()
@@ -68,6 +74,7 @@ class QualityPlugin(Plugin):
q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict())
temp.append(q)
self.cached_qualities = temp
return temp
def single(self, identifier = ''):
@@ -96,6 +103,8 @@ class QualityPlugin(Plugin):
setattr(quality, kwargs.get('value_type'), kwargs.get('value'))
db.commit()
self.cached_qualities = None
return {
'success': True
}
@@ -161,68 +170,118 @@ class QualityPlugin(Plugin):
if cached and len(extra) == 0: return cached
qualities = self.all()
# Start with 0
score = {}
for quality in qualities:
score[quality.get('identifier')] = 0
for cur_file in files:
words = re.split('\W+', cur_file.lower())
found = {}
for quality in qualities:
contains = self.containsTag(quality, words, cur_file)
if contains:
found[quality['identifier']] = True
for quality in qualities:
# Check identifier
if quality['identifier'] in words:
if len(found) == 0 or len(found) == 1 and found.get(quality['identifier']):
log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file))
return self.setCache(cache_key, quality)
# Check alt and tags
contains = self.containsTag(quality, words, cur_file)
if contains:
return self.setCache(cache_key, quality)
contains_score = self.containsTagScore(quality, words, cur_file)
self.calcScore(score, quality, contains_score)
# Try again with loose testing
quality = self.guessLoose(cache_key, files = files, extra = extra)
if quality:
return self.setCache(cache_key, quality)
for quality in qualities:
loose_score = self.guessLooseScore(quality, files = files, extra = extra)
self.calcScore(score, quality, loose_score)
# Return nothing if all scores are 0
has_non_zero = 0
for s in score:
if score[s] > 0:
has_non_zero += 1
if not has_non_zero:
return None
heighest_quality = max(score, key = score.get)
if heighest_quality:
for quality in qualities:
if quality.get('identifier') == heighest_quality:
return self.setCache(cache_key, quality)
log.debug('Could not identify quality for: %s', files)
return None
def containsTag(self, quality, words, cur_file = ''):
def containsTagScore(self, quality, words, cur_file = ''):
cur_file = ss(cur_file)
score = 0
points = {
'identifier': 10,
'label': 10,
'alternative': 9,
'tags': 9,
'ext': 3,
}
# Check alt and tags
for tag_type in ['alternative', 'tags']:
for alt in quality.get(tag_type, []):
if isinstance(alt, tuple) and '.'.join(alt) in '.'.join(words):
for tag_type in ['identifier', 'alternative', 'tags', 'label']:
qualities = quality.get(tag_type, [])
qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities
for alt in qualities:
if (isinstance(alt, tuple)):
if len(set(words) & set(alt)) == len(alt):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
elif len(set(words) & set(alt)) > 0:
partial = list(set(words) & set(alt))[0]
if len(partial) > 2:
log.debug('Found %s via partial %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type) / 3
if (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True
score += points.get(tag_type) / 2
if list(set(quality.get(tag_type, [])) & set(words)):
if list(set(qualities) & set(words)):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True
score += points.get(tag_type)
return
# Check extention
for ext in quality.get('ext', []):
if ext == words[-1]:
log.debug('Found %s extension in %s', (ext, cur_file))
score += points['ext']
def guessLoose(self, cache_key, files = None, extra = None):
return score
def guessLooseScore(self, quality, files = None, extra = None):
score = 0
if extra:
for quality in self.all():
# Check width resolution, range 20
if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0)))
return self.setCache(cache_key, quality)
# Check width resolution, range 20
if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0)))
score += 5
# Check height resolution, range 20
if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0)))
return self.setCache(cache_key, quality)
# Check height resolution, range 20
if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0)))
score += 5
if 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Found as dvdrip')
return self.setCache(cache_key, self.single('dvdrip'))
if quality.get('identifier') == 'dvdrip' and 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Add point for correct dvdrip resolutions')
score += 1
return None
return score
def calcScore(self, score, quality, add_score):
score[quality['identifier']] += add_score
# Set order for allow calculation (and cache)
if not self.cached_order:
self.cached_order = {}
for q in self.qualities:
self.cached_order[q.get('identifier')] = self.qualities.index(q)
if add_score != 0:
for allow in quality.get('allow', []):
score[allow] -= 40 if self.cached_order[allow] < self.cached_order[quality['identifier']] else 5

View File

@@ -1,14 +1,20 @@
from couchpotato import get_session
from couchpotato import get_session, md5
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.encoding import ss, toUnicode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.scanner.main import Scanner
from couchpotato.core.settings.model import File, Release as Relea, Movie
from couchpotato.core.settings.model import File, Release as Relea, Media, \
ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
from sqlalchemy.exc import InterfaceError
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import and_, or_
import os
import time
import traceback
log = CPLog(__name__)
@@ -19,7 +25,7 @@ class Release(Plugin):
def __init__(self):
addEvent('release.add', self.add)
addApiView('release.download', self.download, docs = {
addApiView('release.manual_download', self.manualDownload, docs = {
'desc': 'Send a release manually to the downloaders',
'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
@@ -44,9 +50,45 @@ class Release(Plugin):
}
})
addEvent('release.download', self.download)
addEvent('release.try_download_result', self.tryDownloadResult)
addEvent('release.create_from_search', self.createFromSearch)
addEvent('release.for_movie', self.forMovie)
addEvent('release.delete', self.delete)
addEvent('release.clean', self.clean)
addEvent('release.update_status', self.updateStatus)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanDone)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 4)
def cleanDone(self):
log.debug('Removing releases from dashboard')
now = time.time()
week = 262080
done_status, available_status, snatched_status, downloaded_status, ignored_status = \
fireEvent('status.get', ['done', 'available', 'snatched', 'downloaded', 'ignored'], single = True)
db = get_session()
# get movies last_edit more than a week ago
media = db.query(Media) \
.filter(Media.status_id == done_status.get('id'), Media.last_edit < (now - week)) \
.all()
for item in media:
for rel in item.releases:
# Remove all available releases
if rel.status_id in [available_status.get('id')]:
fireEvent('release.delete', id = rel.id, single = True)
# Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the move
elif rel.status_id in [snatched_status.get('id'), downloaded_status.get('id')]:
self.updateStatus(id = rel.id, status = ignored_status)
db.expire_all()
def add(self, group):
@@ -58,9 +100,9 @@ class Release(Plugin):
done_status, snatched_status = fireEvent('status.get', ['done', 'snatched'], single = True)
# Add movie
movie = db.query(Movie).filter_by(library_id = group['library'].get('id')).first()
movie = db.query(Media).filter_by(library_id = group['library'].get('id')).first()
if not movie:
movie = Movie(
movie = Media(
library_id = group['library'].get('id'),
profile_id = 0,
status_id = done_status.get('id')
@@ -104,7 +146,6 @@ class Release(Plugin):
return True
def saveFile(self, filepath, type = 'unknown', include_media_info = False):
properties = {}
@@ -159,26 +200,23 @@ class Release(Plugin):
rel = db.query(Relea).filter_by(id = id).first()
if rel:
ignored_status, failed_status, available_status = fireEvent('status.get', ['ignored', 'failed', 'available'], single = True)
rel.status_id = available_status.get('id') if rel.status_id in [ignored_status.get('id'), failed_status.get('id')] else ignored_status.get('id')
db.commit()
self.updateStatus(id, available_status if rel.status_id in [ignored_status.get('id'), failed_status.get('id')] else ignored_status)
return {
'success': True
}
def download(self, id = None, **kwargs):
def manualDownload(self, id = None, **kwargs):
db = get_session()
snatched_status, done_status = fireEvent('status.get', ['snatched', 'done'], single = True)
rel = db.query(Relea).filter_by(id = id).first()
if rel:
item = {}
for info in rel.info:
item[info.identifier] = info.value
fireEvent('notify.frontend', type = 'release.download', data = True, message = 'Snatching "%s"' % item['name'])
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
@@ -190,23 +228,18 @@ class Release(Plugin):
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
success = fireEvent('searcher.download', data = item, movie = rel.movie.to_dict({
success = self.download(data = item, media = rel.movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
}), manual = True, single = True)
}), manual = True)
if success:
db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again
if rel.status_id != done_status.get('id'):
rel.status_id = snatched_status.get('id')
db.commit()
fireEvent('notify.frontend', type = 'release.download', data = True, message = 'Successfully snatched "%s"' % item['name'])
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': success
}
@@ -217,6 +250,152 @@ class Release(Plugin):
'success': False
}
def download(self, data, media, manual = False):
if not data.get('protocol'):
data['protocol'] = data['type']
data['type'] = 'movie'
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if downloader_enabled:
snatched_status, done_status, active_status = fireEvent('status.get', ['snatched', 'done', 'active'], single = True)
# Download release to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
download_result = fireEvent('download', data = data, movie = media, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark media done
if not renamer_enabled:
try:
if media['status_id'] == active_status.get('id'):
for profile_type in media['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls.id, status = done_status)
# Mark media done
mdia = db.query(Media).filter_by(id = media['id']).first()
mdia.status_id = done_status.get('id')
mdia.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking media finished, renamer disabled: %s', traceback.format_exc())
else:
self.updateStatus(rls.id, status = snatched_status)
except:
log.error('Failed marking media finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol')))
return False
def tryDownloadResult(self, results, media, quality_type, manual = False):
ignored_status, failed_status = fireEvent('status.get', ['ignored', 'failed'], single = True)
for rel in results:
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and rel.get('age') <= quality_type.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), rel['name']))
continue
if rel['status_id'] in [ignored_status.get('id'), failed_status.get('id')]:
log.info('Ignored: %s', rel['name'])
continue
if rel['score'] <= 0:
log.info('Ignored, score to low: %s', rel['name'])
continue
downloaded = fireEvent('release.download', data = rel, media = media, manual = manual, single = True)
if downloaded is True:
return True
elif downloaded != 'try_next':
break
return False
def createFromSearch(self, search_results, media, quality_type):
available_status = fireEvent('status.get', ['available'], single = True)
db = get_session()
found_releases = []
for rel in search_results:
rel_identifier = md5(rel['url'])
found_releases.append(rel_identifier)
rls = db.query(Relea).filter_by(identifier = rel_identifier).first()
if not rls:
rls = Relea(
identifier = rel_identifier,
movie_id = media.get('id'),
#media_id = media.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
db.add(rls)
else:
[db.delete(old_info) for old_info in rls.info]
rls.last_edit = int(time.time())
db.commit()
for info in rel:
try:
if not isinstance(rel[info], (str, unicode, int, long, float)):
continue
rls_info = ReleaseInfo(
identifier = info,
value = toUnicode(rel[info])
)
rls.info.append(rls_info)
except InterfaceError:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.commit()
rel['status_id'] = rls.status_id
return found_releases
def forMovie(self, id = None):
db = get_session()
@@ -241,3 +420,32 @@ class Release(Plugin):
'success': True
}
def updateStatus(self, id, status = None):
if not status: return False
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel and status and rel.status_id != status.get('id'):
item = {}
for info in rel.info:
item[info.identifier] = info.value
if rel.files:
for file_item in rel.files:
if file_item.type.identifier == 'movie':
release_name = os.path.basename(file_item.path)
break
else:
release_name = item['name']
#update status in Db
log.debug('Marking release %s as %s', (release_name, status.get("label")))
rel.status_id = status.get('id')
rel.last_edit = int(time.time())
db.commit()
#Update all movie info as there is no release update function
fireEvent('notify.frontend', type = 'release.update_status.%s' % rel.id, data = status.get('id'))
return True

View File

@@ -28,6 +28,7 @@ rename_options = {
'cd': 'CD number (cd1)',
'cd_nr': 'Just the cd nr. (1)',
'mpaa': 'MPAA Rating',
'category': 'Category label',
},
}

View File

@@ -1,9 +1,9 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt
getImdb, link, symlink, tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library, File, Profile, Release, \
@@ -31,8 +31,10 @@ class Renamer(Plugin):
'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'movie_folder': {'desc': 'Optional: The folder of the movie to scan. Keep empty for default renamer folder.'},
'downloader' : {'desc': 'Optional: The downloader this movie has been downloaded with'},
'download_id': {'desc': 'Optional: The downloader\'s nzb/torrent ID'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same movie_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'downloader' : {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in movie_folder. \'downloader\' is required with this option.'},
'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''},
},
})
@@ -62,23 +64,25 @@ class Renamer(Plugin):
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0))
movie_folder = kwargs.get('movie_folder')
movie_folder = sp(kwargs.get('movie_folder'))
downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id')
files = '|'.join([sp(filename) for filename in splitString(kwargs.get('files'), '|')])
status = kwargs.get('status', 'completed')
download_info = {'folder': movie_folder} if movie_folder else None
if download_info:
download_info.update({'id': download_id, 'downloader': downloader} if download_id else {})
release_download = {'folder': movie_folder} if movie_folder else None
if release_download:
release_download.update({'id': download_id, 'downloader': downloader, 'status': status, 'files': files} if download_id else {})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', download_info)
fire_handle('renamer.scan', release_download)
return {
'success': True
}
def scan(self, download_info = None):
def scan(self, release_download = None):
if self.isDisabled():
return
@@ -87,22 +91,66 @@ class Renamer(Plugin):
log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return
movie_folder = download_info and download_info.get('folder')
from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
# Check to see if the "to" folder is inside the "from" folder.
if movie_folder and not os.path.isdir(movie_folder) or not os.path.isdir(self.conf('from')) or not os.path.isdir(self.conf('to')):
l = log.debug if movie_folder else log.error
l('Both the "To" and "From" have to exist.')
return
elif self.conf('from') in self.conf('to'):
log.error('The "to" can\'t be inside of the "from" folder. You\'ll get an infinite loop.')
return
elif movie_folder and movie_folder in [self.conf('to'), self.conf('from')]:
log.error('The "to" and "from" folders can\'t be inside of or the same as the provided movie folder.')
# Get movie folder to process
movie_folder = release_download and release_download.get('folder')
# Get all folders that should not be processed
no_process = [to_folder]
cat_list = fireEvent('category.all', single = True) or []
no_process.extend([item['destination'] for item in cat_list])
try:
if Env.setting('library', section = 'manage').strip():
no_process.extend([sp(manage_folder) for manage_folder in splitString(Env.setting('library', section = 'manage'), '::')])
except:
pass
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(from_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" have to exist.')
return
else:
for item in no_process:
if from_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the "from" folder.')
return
# Check to see if the no_process folders are inside the provided movie_folder
if movie_folder and not os.path.isdir(movie_folder):
log.debug('The provided movie folder %s does not exist. Trying to find it in the \'from\' folder.', movie_folder)
# Update to the from folder
if len(release_download.get('files')) == 1:
new_movie_folder = from_folder
else:
new_movie_folder = os.path.join(from_folder, os.path.basename(movie_folder))
if not os.path.isdir(new_movie_folder):
log.error('The provided movie folder %s does not exist and could also not be found in the \'from\' folder.', movie_folder)
return
# Update the files
new_files = [os.path.join(new_movie_folder, os.path.relpath(filename, movie_folder)) for filename in splitString(release_download.get('files'), '|')]
if new_files and not os.path.isfile(new_files[0]):
log.error('The provided movie folder %s does not exist and its files could also not be found in the \'from\' folder.', movie_folder)
return
# Update release_download info to the from folder
log.debug('Release %s found in the \'from\' folder.', movie_folder)
release_download['folder'] = new_movie_folder
release_download['files'] = '|'.join(new_files)
movie_folder = new_movie_folder
if movie_folder:
for item in no_process:
if movie_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the provided movie folder.')
return
# Make sure a checkSnatched marked all downloads/seeds as such
if not download_info and self.conf('run_every') > 0:
if not release_download and self.conf('run_every') > 0:
fireEvent('renamer.check_snatched')
self.renaming_started = True
@@ -112,29 +160,35 @@ class Renamer(Plugin):
files = []
if movie_folder:
log.info('Scanning movie folder %s...', movie_folder)
movie_folder = movie_folder.rstrip(os.path.sep)
folder = os.path.dirname(movie_folder)
# Get all files from the specified folder
try:
for root, folders, names in os.walk(movie_folder):
files.extend([os.path.join(root, name) for name in names])
except:
log.error('Failed getting files from %s: %s', (movie_folder, traceback.format_exc()))
if release_download.get('files', ''):
files = splitString(release_download['files'], '|')
# If there is only one file in the torrent, the downloader did not create a subfolder
if len(files) == 1:
folder = movie_folder
else:
# Get all files from the specified folder
try:
for root, folders, names in os.walk(movie_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (movie_folder, traceback.format_exc()))
db = get_session()
# Extend the download info with info stored in the downloaded release
download_info = self.extendDownloadInfo(download_info)
release_download = self.extendReleaseDownload(release_download)
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files,
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(download_info))
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download))
groups = fireEvent('scanner.scan', folder = folder if folder else self.conf('from'),
files = files, download_info = download_info, return_ignored = False, single = True)
groups = fireEvent('scanner.scan', folder = folder if folder else from_folder,
files = files, release_download = release_download, return_ignored = False, single = True) or []
folder_name = self.conf('folder_name')
file_name = self.conf('file_name')
@@ -142,9 +196,9 @@ class Renamer(Plugin):
nfo_name = self.conf('nfo_name')
separator = self.conf('separator')
# Statusses
done_status, active_status, downloaded_status, snatched_status = \
fireEvent('status.get', ['done', 'active', 'downloaded', 'snatched'], single = True)
# Statuses
done_status, active_status, downloaded_status, snatched_status, seeding_status = \
fireEvent('status.get', ['done', 'active', 'downloaded', 'snatched', 'seeding'], single = True)
for group_identifier in groups:
@@ -157,7 +211,7 @@ class Renamer(Plugin):
# Add _UNKNOWN_ if no library item is connected
if not group['library'] or not movie_title:
self.tagDir(group, 'unknown')
self.tagRelease(group = group, tag = 'unknown')
continue
# Rename the files using the library data
else:
@@ -172,8 +226,13 @@ class Renamer(Plugin):
movie_title = getTitle(library)
# Overwrite destination when set in category
destination = self.conf('to')
destination = to_folder
category_label = ''
for movie in library_ent.movies:
if movie.category and movie.category.label:
category_label = movie.category.label
if movie.category and movie.category.destination and len(movie.category.destination) > 0 and movie.category.destination != 'None':
destination = movie.category.destination
log.debug('Setting category destination for "%s": %s' % (movie_title, destination))
@@ -190,7 +249,7 @@ class Renamer(Plugin):
if extr_files:
group['before_rename'].extend(extr_files)
# Remove weird chars from moviename
# Remove weird chars from movie name
movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', movie_title)
# Put 'The' at the end
@@ -217,6 +276,7 @@ class Renamer(Plugin):
'cd': '',
'cd_nr': '',
'mpaa': library['info'].get('mpaa', ''),
'category': category_label,
}
for file_type in group['files']:
@@ -225,7 +285,7 @@ class Renamer(Plugin):
if file_type is 'nfo' and not self.conf('rename_nfo'):
log.debug('Skipping, renaming of %s disabled', file_type)
for current_file in group['files'][file_type]:
if self.conf('cleanup') and (not self.downloadIsTorrent(download_info) or self.fileIsAdded(current_file, group)):
if self.conf('cleanup') and (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
continue
@@ -385,7 +445,7 @@ class Renamer(Plugin):
log.info('Better quality release already exists for %s, with quality %s', (movie.library.titles[0].title, release.quality.label))
# Add exists tag to the .ignore file
self.tagDir(group, 'exists')
self.tagRelease(group = group, tag = 'exists')
# Notify on rename fail
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (movie.library.titles[0].title, group['meta_data']['quality']['label'], release.quality.label)
@@ -393,16 +453,20 @@ class Renamer(Plugin):
remove_leftovers = False
break
elif release.status_id is snatched_status.get('id'):
if release.quality.id is group['meta_data']['quality']['id']:
log.debug('Marking release as downloaded')
try:
release.status_id = downloaded_status.get('id')
release.last_edit = int(time.time())
except Exception, e:
log.error('Failed marking release as finished: %s %s', (e, traceback.format_exc()))
db.commit()
elif release.status_id in [snatched_status.get('id'), seeding_status.get('id')]:
if release_download and release_download.get('rls_id'):
if release_download['rls_id'] == release.id:
if release_download['status'] == 'completed':
# Set the release to downloaded
fireEvent('release.update_status', release.id, status = downloaded_status, single = True)
elif release_download['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', release.id, status = seeding_status, single = True)
elif release.quality.id is group['meta_data']['quality']['id']:
# Set the release to downloaded
fireEvent('release.update_status', release.id, status = downloaded_status, single = True)
# Remove leftover files
if not remove_leftovers: # Don't remove anything
@@ -411,7 +475,7 @@ class Renamer(Plugin):
log.debug('Removing leftover files')
for current_file in group['files']['leftover']:
if self.conf('cleanup') and not self.conf('move_leftover') and \
(not self.downloadIsTorrent(download_info) or self.fileIsAdded(current_file, group)):
(not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
# Remove files
@@ -427,17 +491,17 @@ class Renamer(Plugin):
log.info('Removing "%s"', src)
try:
src = ss(src)
src = sp(src)
if os.path.isfile(src):
os.remove(src)
parent_dir = os.path.normpath(os.path.dirname(src))
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and not parent_dir in [destination, movie_folder] and not self.conf('from') in parent_dir:
parent_dir = os.path.dirname(src)
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and not parent_dir in [destination, movie_folder] and not from_folder in parent_dir:
delete_folders.append(parent_dir)
except:
log.error('Failed removing %s: %s', (src, traceback.format_exc()))
self.tagDir(group, 'failed_remove')
self.tagRelease(group = group, tag = 'failed_remove')
# Delete leftover folder from older releases
for delete_folder in delete_folders:
@@ -457,15 +521,15 @@ class Renamer(Plugin):
self.makeDir(os.path.dirname(dst))
try:
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(download_info) or self.fileIsAdded(src, group))
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group))
group['renamed_files'].append(dst)
except:
log.error('Failed moving the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
self.tagDir(group, 'failed_rename')
self.tagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(movie_folder) and self.downloadIsTorrent(download_info):
self.tagDir(group, 'renamed_already')
if self.movieInFromFolder(movie_folder) and self.downloadIsTorrent(release_download):
self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases
for release in remove_releases:
@@ -475,12 +539,19 @@ class Renamer(Plugin):
except:
log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(download_info):
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download):
if movie_folder:
# Delete the movie folder
group_folder = movie_folder
else:
# Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = sp(os.path.join(from_folder, os.path.relpath(group['parentdir'], from_folder).split(os.path.sep)[0]))
try:
log.info('Deleting folder: %s', group['parentdir'])
self.deleteEmptyFolder(group['parentdir'])
log.info('Deleting folder: %s', group_folder)
self.deleteEmptyFolder(group_folder)
except:
log.error('Failed removing %s: %s', (group['parentdir'], traceback.format_exc()))
log.error('Failed removing %s: %s', (group_folder, traceback.format_exc()))
# Notify on download, search for trailers etc
download_message = 'Downloaded %s (%s)' % (movie_title, replacements['quality'])
@@ -515,18 +586,9 @@ class Renamer(Plugin):
return rename_files
# This adds a file to ignore / tag a release so it is ignored later
def tagDir(self, group, tag):
ignore_file = None
if isinstance(group, dict):
for movie_file in sorted(list(group['files']['movie'])):
ignore_file = '%s.%s.ignore' % (os.path.splitext(movie_file)[0], tag)
break
else:
if not os.path.isdir(group) or not tag:
return
ignore_file = os.path.join(group, '%s.ignore' % tag)
def tagRelease(self, tag, group = None, release_download = None):
if not tag:
return
text = """This file is from CouchPotato
It has marked this release as "%s"
@@ -534,25 +596,88 @@ This file hides the release from the renamer
Remove it if you want it to be renamed (again, or at least let it try again)
""" % tag
if ignore_file:
self.createFile(ignore_file, text)
tag_files = []
def untagDir(self, folder, tag = ''):
if not os.path.isdir(folder):
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
elif isinstance(release_download, dict):
# Tag download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
# Tag all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag)
if not os.path.isfile(tag_filename):
self.createFile(tag_filename, text)
def untagRelease(self, release_download, tag = ''):
if not release_download:
return
# Remove any .ignore files
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, '*%s.ignore' % tag):
os.remove((os.path.join(root, filename)))
tag_files = []
def hastagDir(self, folder, tag = ''):
folder = release_download['folder']
if not os.path.isdir(folder):
return False
# Find any .ignore files
# Untag download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
# Untag all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
ignore_files = []
for root, dirnames, filenames in os.walk(folder):
if fnmatch.filter(filenames, '*%s.ignore' % tag):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and delete if found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (re.escape(os.path.splitext(tag_file)[0]), tag if tag else '*'))
for filename in ignore_file:
try:
os.remove(filename)
except:
log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc()))
def hastagRelease(self, release_download, tag = ''):
if not release_download:
return False
folder = release_download['folder']
if not os.path.isdir(folder):
return False
tag_files = []
ignore_files = []
# Find tag on download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
# Find tag on all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and return True found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))
if ignore_file:
return True
return False
@@ -571,7 +696,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
link(old, dest)
except:
# Try to simlink next
log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s. ', (old, dest, traceback.format_exc()))
log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s.', (old, dest, traceback.format_exc()))
shutil.copy(old, dest)
try:
symlink(dest, old + '.link')
@@ -615,22 +740,38 @@ Remove it if you want it to be renamed (again, or at least let it try again)
replaced = toUnicode(string)
for x, r in replacements.iteritems():
if x in ['thename', 'namethe']:
continue
if r is not None:
replaced = replaced.replace(u'<%s>' % toUnicode(x), toUnicode(r))
else:
#If information is not available, we don't want the tag in the filename
replaced = replaced.replace('<' + x + '>', '')
replaced = self.replaceDoubles(replaced.lstrip('. '))
for x, r in replacements.iteritems():
if x in ['thename', 'namethe']:
replaced = replaced.replace(u'<%s>' % toUnicode(x), toUnicode(r))
replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced)
sep = self.conf('foldersep') if folder else self.conf('separator')
return self.replaceDoubles(replaced.lstrip('. ')).replace(' ', ' ' if not sep else sep)
return replaced.replace(' ', ' ' if not sep else sep)
def replaceDoubles(self, string):
return string.replace(' ', ' ').replace(' .', '.')
replaces = [
('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '),
('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'),
]
for r in replaces:
reg, replace_with = r
string = re.sub(reg, replace_with, string)
return string
def deleteEmptyFolder(self, folder, show_error = True):
folder = ss(folder)
folder = sp(folder)
loge = log.error if show_error else log.debug
for root, dirs, files in os.walk(folder):
@@ -656,117 +797,115 @@ Remove it if you want it to be renamed (again, or at least let it try again)
self.checking_snatched = True
snatched_status, ignored_status, failed_status, done_status, seeding_status, downloaded_status = \
fireEvent('status.get', ['snatched', 'ignored', 'failed', 'done', 'seeding', 'downloaded'], single = True)
snatched_status, ignored_status, failed_status, seeding_status, downloaded_status, missing_status = \
fireEvent('status.get', ['snatched', 'ignored', 'failed', 'seeding', 'downloaded', 'missing'], single = True)
db = get_session()
rels = db.query(Release).filter_by(status_id = snatched_status.get('id')).all()
rels.extend(db.query(Release).filter_by(status_id = seeding_status.get('id')).all())
rels = db.query(Release).filter(
Release.status_id.in_([snatched_status.get('id'), seeding_status.get('id'), missing_status.get('id')])
).all()
scan_items = []
scan_releases = []
scan_required = False
if rels:
log.debug('Checking status snatched releases...')
statuses = fireEvent('download.status', merge = True)
if not statuses:
release_downloads = fireEvent('download.status', merge = True)
if not release_downloads:
log.debug('Download status functionality is not implemented for active downloaders.')
scan_required = True
else:
try:
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
movie_dict = fireEvent('movie.get', rel.movie_id, single = True)
if not isinstance(rel_dict['info'], (dict)):
log.error('Faulty release found without any info, ignoring.')
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
continue
# check status
nzbname = self.createNzbName(rel_dict['info'], movie_dict)
found = False
for item in statuses:
for release_download in release_downloads:
found_release = False
if rel_dict['info'].get('download_id'):
if item['id'] == rel_dict['info']['download_id'] and item['downloader'] == rel_dict['info']['download_downloader']:
log.debug('Found release by id: %s', item['id'])
if release_download['id'] == rel_dict['info']['download_id'] and release_download['downloader'] == rel_dict['info']['download_downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
else:
if item['name'] == nzbname or rel_dict['info']['name'] in item['name'] or getImdb(item['name']) == movie_dict['library']['identifier']:
if release_download['name'] == nzbname or rel_dict['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['library']['identifier']:
found_release = True
if found_release:
timeleft = 'N/A' if item['timeleft'] == -1 else item['timeleft']
log.debug('Found %s: %s, time to go: %s', (item['name'], item['status'].upper(), timeleft))
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
if item['status'] == 'busy':
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if item['folder'] and self.conf('from') in item['folder']:
self.tagDir(item['folder'], 'downloading')
elif item['status'] == 'seeding':
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.movie.status_id == done_status.get('id') and self.statusInfoComplete(item):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (item['name'], item['seed_ratio']))
if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag
self.untagDir(item['folder'], 'downloading')
rel.status_id = seeding_status.get('id')
rel.last_edit = int(time.time())
db.commit()
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and set the torrent to paused if required
item.update({'pause': True, 'scan': True, 'process_complete': False})
scan_items.append(item)
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
if rel.status_id != seeding_status.get('id'):
rel.status_id = seeding_status.get('id')
rel.last_edit = int(time.time())
db.commit()
#let it seed
log.debug('%s is seeding with ratio: %s', (item['name'], item['seed_ratio']))
elif item['status'] == 'failed':
fireEvent('download.remove_failed', item, single = True)
rel.status_id = failed_status.get('id')
rel.last_edit = int(time.time())
db.commit()
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.id, status = failed_status, single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', movie_id = rel.movie_id)
elif item['status'] == 'completed':
log.info('Download of %s completed!', item['name'])
if self.statusInfoComplete(item):
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done
if rel.status_id == seeding_status.get('id'):
if rel.movie.status_id == done_status.get('id'):
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
rel.status_id = downloaded_status.get('id')
rel.last_edit = int(time.time())
db.commit()
fireEvent('release.update_status', rel.id, status = downloaded_status, single = True)
# Allow the downloader to clean-up
item.update({'pause': False, 'scan': False, 'process_complete': True})
scan_items.append(item)
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to snatched so that the renamer can process the release as if it was never seeding
rel.status_id = snatched_status.get('id')
rel.last_edit = int(time.time())
db.commit()
# Scan and Allow the downloader to clean-up
item.update({'pause': False, 'scan': True, 'process_complete': True})
scan_items.append(item)
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Remove the downloading tag
self.untagDir(item['folder'], 'downloading')
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
item.update({'pause': False, 'scan': True, 'process_complete': True})
scan_items.append(item)
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
@@ -776,25 +915,33 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if not found:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.status_id == missing_status.get('id'):
if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
else:
# Set the release to missing
fireEvent('release.update_status', rel.id, status = missing_status, single = True)
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go
for item in scan_items:
for release_download in scan_releases:
# Ask the renamer to scan the item
if item['scan']:
if item['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', item = item, pause = True, single = True)
fireEvent('renamer.scan', download_info = item)
if item['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', item = item, pause = False, single = True)
if item['process_complete']:
if release_download['scan']:
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = True, single = True)
fireEvent('renamer.scan', release_download = release_download)
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if release_download['process_complete']:
#First make sure the files were succesfully processed
if not self.hastagDir(item['folder'], 'failed_rename'):
if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'):
# Remove the seeding tag if it exists
self.untagDir(item['folder'], 'renamed_already')
self.untagRelease(release_download = release_download, tag = 'renamed_already')
# Ask the downloader to process the item
fireEvent('download.process_complete', item = item, single = True)
fireEvent('download.process_complete', release_download = release_download, single = True)
if scan_required:
fireEvent('renamer.scan')
@@ -803,16 +950,16 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return True
def extendDownloadInfo(self, download_info):
def extendReleaseDownload(self, release_download):
rls = None
if download_info and download_info.get('id') and download_info.get('downloader'):
if release_download and release_download.get('id') and release_download.get('downloader'):
db = get_session()
rlsnfo_dwnlds = db.query(ReleaseInfo).filter_by(identifier = 'download_downloader', value = download_info.get('downloader')).all()
rlsnfo_ids = db.query(ReleaseInfo).filter_by(identifier = 'download_id', value = download_info.get('id')).all()
rlsnfo_dwnlds = db.query(ReleaseInfo).filter_by(identifier = 'download_downloader', value = release_download.get('downloader')).all()
rlsnfo_ids = db.query(ReleaseInfo).filter_by(identifier = 'download_id', value = release_download.get('id')).all()
for rlsnfo_dwnld in rlsnfo_dwnlds:
for rlsnfo_id in rlsnfo_ids:
@@ -822,32 +969,33 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if rls: break
if not rls:
log.error('Download ID %s from downloader %s not found in releases', (download_info.get('id'), download_info.get('downloader')))
log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader')))
if rls:
rls_dict = rls.to_dict({'info':{}})
download_info.update({
release_download.update({
'imdb_id': rls.movie.library.identifier,
'quality': rls.quality.identifier,
'protocol': rls_dict.get('info', {}).get('protocol') or rls_dict.get('info', {}).get('type'),
'rls_id': rls.id,
})
return download_info
return release_download
def downloadIsTorrent(self, download_info):
return download_info and download_info.get('protocol') in ['torrent', 'torrent_magnet']
def downloadIsTorrent(self, release_download):
return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet']
def fileIsAdded(self, src, group):
if not group or not group.get('before_rename'):
return False
return src in group['before_rename']
def statusInfoComplete(self, item):
return item['id'] and item['downloader'] and item['folder']
def statusInfoComplete(self, release_download):
return release_download['id'] and release_download['downloader'] and release_download['folder']
def movieInFromFolder(self, movie_folder):
return movie_folder and self.conf('from') in movie_folder or not movie_folder
return movie_folder and sp(self.conf('from')) in sp(movie_folder) or not movie_folder
def extractFiles(self, folder = None, movie_folder = None, files = None, cleanup = False):
if not files: files = []
@@ -857,9 +1005,11 @@ Remove it if you want it to be renamed (again, or at least let it try again)
restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))'
extr_files = []
from_folder = sp(self.conf('from'))
# Check input variables
if not folder:
folder = self.conf('from')
folder = from_folder
check_file_date = True
if movie_folder:
@@ -867,7 +1017,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if not files:
for root, folders, names in os.walk(folder):
files.extend([os.path.join(root, name) for name in names])
files.extend([sp(os.path.join(root, name)) for name in names])
# Find all archive files
archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)]
@@ -875,7 +1025,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
#Extract all found archives
for archive in archives:
# Check if it has already been processed by CPS
if self.hastagDir(os.path.dirname(archive['file'])):
if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}):
continue
# Find all related archive files
@@ -913,13 +1063,13 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.info('Archive %s found. Extracting...', os.path.basename(archive['file']))
try:
rar_handle = RarFile(archive['file'])
extr_path = os.path.join(self.conf('from'), os.path.relpath(os.path.dirname(archive['file']), folder))
extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder))
self.makeDir(extr_path)
for packedinfo in rar_handle.infolist():
if not packedinfo.isdir and not os.path.isfile(os.path.join(extr_path, os.path.basename(packedinfo.filename))):
if not packedinfo.isdir and not os.path.isfile(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))):
log.debug('Extracting %s...', packedinfo.filename)
rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False)
extr_files.append(os.path.join(extr_path, os.path.basename(packedinfo.filename)))
extr_files.append(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename))))
del rar_handle
except Exception, e:
log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc()))
@@ -936,9 +1086,9 @@ Remove it if you want it to be renamed (again, or at least let it try again)
files.remove(filename)
# Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided)
if extr_files and os.path.normpath(os.path.normcase(folder)) != os.path.normpath(os.path.normcase(self.conf('from'))):
if extr_files and folder != from_folder:
for leftoverfile in list(files):
move_to = os.path.join(self.conf('from'), os.path.relpath(leftoverfile, folder))
move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder))
try:
self.makeDir(os.path.dirname(move_to))
@@ -961,8 +1111,8 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.debug('Removing old movie folder %s...', movie_folder)
self.deleteEmptyFolder(movie_folder)
movie_folder = os.path.join(self.conf('from'), os.path.relpath(movie_folder, folder))
folder = self.conf('from')
movie_folder = os.path.join(from_folder, os.path.relpath(movie_folder, folder))
folder = from_folder
if extr_files:
files.extend(extr_files)

View File

@@ -1,10 +1,11 @@
from couchpotato import get_session
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss, sp
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \
splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import File, Movie
from couchpotato.core.settings.model import File, Media
from enzyme.exceptions import NoParserError, ParseError
from guessit import guess_movie_info
from subliminal.videos import Video
@@ -20,11 +21,9 @@ log = CPLog(__name__)
class Scanner(Plugin):
minimal_filesize = {
'media': 314572800, # 300MB
'trailer': 1048576, # 1MB
}
ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo'] #unpacking, smb-crap, hidden files
ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_',
'_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo',
'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files
ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate']
extensions = {
'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'],
@@ -49,6 +48,12 @@ class Scanner(Plugin):
'leftover': ('leftover', 'leftover'),
}
file_sizes = { # in MB
'movie': {'min': 300},
'trailer': {'min': 2, 'max': 250},
'backdrop': {'min': 0, 'max': 5},
}
codecs = {
'audio': ['dts', 'ac3', 'ac3d', 'mp3'],
'video': ['x264', 'h264', 'divx', 'xvid']
@@ -101,9 +106,9 @@ class Scanner(Plugin):
addEvent('scanner.name_year', self.getReleaseNameYear)
addEvent('scanner.partnumber', self.getPartNumber)
def scan(self, folder = None, files = None, download_info = None, simple = False, newer_than = 0, return_ignored = True, on_found = None):
def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None):
folder = ss(os.path.normpath(folder))
folder = sp(folder)
if not folder or not os.path.isdir(folder):
log.error('Folder doesn\'t exists: %s', folder)
@@ -119,7 +124,7 @@ class Scanner(Plugin):
try:
files = []
for root, dirs, walk_files in os.walk(folder):
files.extend(os.path.join(root, filename) for filename in walk_files)
files.extend([sp(os.path.join(root, filename)) for filename in walk_files])
# Break if CP wants to shut down
if self.shuttingDown():
@@ -129,7 +134,7 @@ class Scanner(Plugin):
log.error('Failed getting files from %s: %s', (folder, traceback.format_exc()))
else:
check_file_date = False
files = [ss(x) for x in files]
files = [sp(x) for x in files]
for file_path in files:
@@ -145,7 +150,7 @@ class Scanner(Plugin):
continue
is_dvd_file = self.isDVDFile(file_path)
if os.path.getsize(file_path) > self.minimal_filesize['media'] or is_dvd_file: # Minimal 300MB files or is DVD file
if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file
# Normal identifier
identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file)
@@ -179,7 +184,6 @@ class Scanner(Plugin):
# files will be grouped first.
leftovers = set(sorted(leftovers, reverse = True))
# Group files minus extension
ignored_identifiers = []
for identifier, group in movie_files.iteritems():
@@ -188,7 +192,7 @@ class Scanner(Plugin):
log.debug('Grouping files: %s', identifier)
has_ignored = 0
for file_path in group['unsorted_files']:
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
wo_ext = file_path[:-(len(ext) + 1)]
found_files = set([i for i in leftovers if wo_ext in i])
@@ -197,6 +201,11 @@ class Scanner(Plugin):
has_ignored += 1 if ext == 'ignore' else 0
if has_ignored == 0:
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
has_ignored += 1 if ext == 'ignore' else 0
if has_ignored > 0:
ignored_identifiers.append(identifier)
@@ -229,10 +238,6 @@ class Scanner(Plugin):
# Remove the found files from the leftover stack
leftovers = leftovers - set(found_files)
exts = [getExt(ff) for ff in found_files]
if 'ignore' in exts:
ignored_identifiers.append(identifier)
# Break if CP wants to shut down
if self.shuttingDown():
break
@@ -259,14 +264,14 @@ class Scanner(Plugin):
# Remove the found files from the leftover stack
leftovers = leftovers - set([ff])
ext = getExt(ff)
if ext == 'ignore':
ignored_identifiers.append(new_identifier)
# Break if CP wants to shut down
if self.shuttingDown():
break
# leftovers should be empty
if leftovers:
log.debug('Some files are still left over: %s', leftovers)
# Cleaning up used
for identifier in delete_identifiers:
if path_identifiers.get(identifier):
@@ -336,11 +341,11 @@ class Scanner(Plugin):
total_found = len(valid_files)
# Make sure only one movie was found if a download ID is provided
if download_info and total_found == 0:
log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', download_info.get('imdb_id'))
elif download_info and total_found > 1:
log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (download_info.get('imdb_id'), len(valid_files)))
download_info = None
if release_download and total_found == 0:
log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id'))
elif release_download and total_found > 1:
log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files)))
release_download = None
# Determine file types
db = get_session()
@@ -376,7 +381,7 @@ class Scanner(Plugin):
continue
log.debug('Getting metadata for %s', identifier)
group['meta_data'] = self.getMetaData(group, folder = folder, download_info = download_info)
group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download)
# Subtitle meta
group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {}
@@ -408,11 +413,11 @@ class Scanner(Plugin):
del group['unsorted_files']
# Determine movie
group['library'] = self.determineMovie(group, download_info = download_info)
group['library'] = self.determineMovie(group, release_download = release_download)
if not group['library']:
log.error('Unable to determine movie: %s', group['identifiers'])
else:
movie = db.query(Movie).filter_by(library_id = group['library']['id']).first()
movie = db.query(Media).filter_by(library_id = group['library']['id']).first()
group['movie_id'] = None if not movie else movie.id
processed_movies[identifier] = group
@@ -433,13 +438,13 @@ class Scanner(Plugin):
return processed_movies
def getMetaData(self, group, folder = '', download_info = None):
def getMetaData(self, group, folder = '', release_download = None):
data = {}
files = list(group['files']['movie'])
for cur_file in files:
if os.path.getsize(cur_file) < self.minimal_filesize['media']: continue # Ignore smaller files
if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files
meta = self.getMeta(cur_file)
@@ -458,8 +463,8 @@ class Scanner(Plugin):
# Use the quality guess first, if that failes use the quality we wanted to download
data['quality'] = None
if download_info and download_info.get('quality'):
data['quality'] = fireEvent('quality.single', download_info.get('quality'), single = True)
if release_download and release_download.get('quality'):
data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True)
if not data['quality']:
data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True)
@@ -543,12 +548,12 @@ class Scanner(Plugin):
return detected_languages
def determineMovie(self, group, download_info = None):
def determineMovie(self, group, release_download = None):
# Get imdb id from downloader
imdb_id = download_info and download_info.get('imdb_id')
imdb_id = release_download and release_download.get('imdb_id')
if imdb_id:
log.debug('Found movie via imdb id from it\'s download id: %s', download_info.get('imdb_id'))
log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id'))
files = group['files']
@@ -649,7 +654,7 @@ class Scanner(Plugin):
def getMediaFiles(self, files):
def test(s):
return self.filesizeBetween(s, 300, 100000) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s)
return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s)
return set(filter(test, files))
@@ -674,7 +679,7 @@ class Scanner(Plugin):
def getTrailers(self, files):
def test(s):
return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, 2, 250)
return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer'])
return set(filter(test, files))
@@ -685,7 +690,7 @@ class Scanner(Plugin):
files = set(filter(test, files))
images = {
'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files))
'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files))
}
# Rest
@@ -713,16 +718,6 @@ class Scanner(Plugin):
log.debug('Ignored "%s" contains "%s".', (filename, i))
return False
# Sample file
if self.isSampleFile(filename):
log.debug('Is sample file "%s".', filename)
return False
# Minimal size
if self.filesizeBetween(filename, self.minimal_filesize['media']):
log.debug('File to small: %s', filename)
return False
# All is OK
return True
@@ -731,9 +726,9 @@ class Scanner(Plugin):
if is_sample: log.debug('Is sample file: %s', filename)
return is_sample
def filesizeBetween(self, file, min = 0, max = 100000):
def filesizeBetween(self, file, file_size = []):
try:
return (min * 1048576) < os.path.getsize(file) < (max * 1048576)
return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576)
except:
log.error('Couldn\'t get filesize of %s.', file)
@@ -741,9 +736,16 @@ class Scanner(Plugin):
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False):
identifier = file_path.replace(folder, '') # root folder
year = self.findYear(file_path)
identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder
identifier = os.path.splitext(identifier)[0] # ext
try:
path_split = splitString(identifier, os.path.sep)
identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename
except: pass
if exclude_filename:
identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])]
@@ -757,7 +759,6 @@ class Scanner(Plugin):
identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':')
# Year
year = self.findYear(identifier)
if year and identifier[:4] != year:
identifier = '%s %s' % (identifier.split(year)[0].strip(), year)
else:
@@ -821,19 +822,21 @@ class Scanner(Plugin):
def findYear(self, text):
# Search year inside () or [] first
matches = re.search('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text)
matches = re.findall('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text)
if matches:
return matches.group('year')
return matches[-1][1]
# Search normal
matches = re.search('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
matches = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
if matches:
return matches.group('year')
return matches[-1]
return ''
def getReleaseNameYear(self, release_name, file_name = None):
release_name = release_name.strip(' .-_')
# Use guessit first
guess = {}
if file_name:
@@ -851,7 +854,7 @@ class Scanner(Plugin):
cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
cleaned = re.sub(self.clean, ' ', cleaned)
for year_str in [file_name, cleaned]:
for year_str in [file_name, release_name, cleaned]:
if not year_str: continue
year = self.findYear(year_str)
if year:
@@ -861,19 +864,21 @@ class Scanner(Plugin):
if year: # Split name on year
try:
movie_name = cleaned.split(year).pop(0).strip()
cp_guess = {
'name': movie_name,
'year': int(year),
}
movie_name = cleaned.rsplit(year, 1).pop(0).strip()
if movie_name:
cp_guess = {
'name': movie_name,
'year': int(year),
}
except:
pass
else: # Split name on multiple spaces
if not cp_guess: # Split name on multiple spaces
try:
movie_name = cleaned.split(' ').pop(0).strip()
cp_guess = {
'name': movie_name,
'year': int(year),
'year': int(year) if movie_name[:4] != year else 0,
}
except:
pass

View File

@@ -1,11 +1,11 @@
from couchpotato.core.event import addEvent
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.score.scores import nameScore, nameRatioScore, \
sizeScore, providerScore, duplicateScore, partialIgnoredScore, namePositionScore, \
halfMultipartScore
halfMultipartScore, sceneScore
from couchpotato.environment import Env
log = CPLog(__name__)
@@ -62,4 +62,7 @@ class Score(Plugin):
if extra_score:
score += extra_score(nzb)
# Scene / Nuke scoring
score += sceneScore(nzb['name'])
return score

View File

@@ -1,8 +1,13 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
import re
import traceback
log = CPLog(__name__)
name_scores = [
# Tags
@@ -160,3 +165,38 @@ def halfMultipartScore(nzb_name):
return -30
return 0
def sceneScore(nzb_name):
check_names = [nzb_name]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key = len).strip())
except: pass
for name in check_names:
# Strip twice, remove possible file extensions
name = name.lower().strip(' "\'\.-_\[\]')
name = re.sub('\.([a-z0-9]{0,4})$', '', name)
name = name.strip(' "\'\.-_\[\]')
# Make sure year and groupname is in there
year = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', name)
group = re.findall('\-([a-z0-9]+)$', name)
if len(year) > 0 and len(group) > 0:
try:
validate = fireEvent('release.validate', name, single = True)
if validate and tryInt(validate.get('score')) != 0:
log.debug('Release "%s" scored %s, reason: %s', (nzb_name, validate['score'], validate['reasons']))
return tryInt(validate.get('score'))
except:
log.error('Failed scoring scene: %s', traceback.format_exc())
return 0

View File

@@ -24,6 +24,7 @@ class StatusPlugin(Plugin):
'available': 'Available',
'suggest': 'Suggest',
'seeding': 'Seeding',
'missing': 'Missing',
}
status_cached = {}

View File

@@ -1,6 +1,6 @@
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -58,9 +58,9 @@ class Subtitle(Plugin):
for d_sub in downloaded:
log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files))
group['files']['subtitle'].append(d_sub.path)
group['before_rename'].append(d_sub.path)
group['subtitle_language'][d_sub.path] = [d_sub.language.alpha2]
group['files']['subtitle'].append(sp(d_sub.path))
group['before_rename'].append(sp(d_sub.path))
group['subtitle_language'][sp(d_sub.path)] = [d_sub.language.alpha2]
return True

View File

@@ -1,121 +0,0 @@
.suggestions {
}
.suggestions > h2 {
height: 40px;
}
.suggestions .movie_result {
display: inline-block;
width: 33.333%;
height: 150px;
}
@media all and (max-width: 960px) {
.suggestions .movie_result {
width: 50%;
}
}
@media all and (max-width: 600px) {
.suggestions .movie_result {
width: 100%;
}
}
.suggestions .movie_result .data {
left: 100px;
background: #4e5969;
border: none;
}
.suggestions .movie_result .data .info {
top: 15px;
left: 15px;
right: 15px;
bottom: 15px;
overflow: hidden;
}
.suggestions .movie_result .data .info h2 {
white-space: normal;
max-height: 120px;
font-size: 18px;
line-height: 18px;
}
.suggestions .movie_result .data .info .rating,
.suggestions .movie_result .data .info .genres,
.suggestions .movie_result .data .info .year {
position: static;
display: block;
padding: 0;
opacity: .6;
}
.suggestions .movie_result .data .info .year {
margin: 10px 0 0;
}
.suggestions .movie_result .data .info .rating {
font-size: 20px;
float: right;
margin-top: -20px;
}
.suggestions .movie_result .data .info .rating:before {
content: "\e031";
font-family: 'Elusive-Icons';
font-size: 14px;
margin: 0 5px 0 0;
vertical-align: bottom;
}
.suggestions .movie_result .data .info .genres {
font-size: 11px;
font-style: italic;
text-align: right;
}
.suggestions .movie_result .data {
cursor: default;
}
.suggestions .movie_result .options {
left: 100px;
}
.suggestions .movie_result .options select[name=title] { width: 100%; }
.suggestions .movie_result .options select[name=profile] { width: 100%; }
.suggestions .movie_result .options select[name=category] { width: 100%; }
.suggestions .movie_result .button {
position: absolute;
margin: 2px 0 0 0;
right: 15px;
bottom: 15px;
}
.suggestions .movie_result .thumbnail {
width: 100px;
}
.suggestions .movie_result .actions {
position: absolute;
bottom: 10px;
right: 10px;
display: none;
width: 140px;
}
.suggestions .movie_result:hover .actions {
display: block;
}
.suggestions .movie_result .data.open .actions {
display: none;
}
.suggestions .movie_result .actions a {
margin-left: 10px;
vertical-align: middle;
}

View File

@@ -1,136 +0,0 @@
var SuggestList = new Class({
Implements: [Options, Events],
initialize: function(options){
var self = this;
self.setOptions(options);
self.create();
},
create: function(){
var self = this;
self.el = new Element('div.suggestions', {
'events': {
'click:relay(a.delete)': function(e, el){
(e).stop();
$(el).getParent('.movie_result').destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': el.get('data-ignore')
},
'onComplete': self.fill.bind(self)
});
},
'click:relay(a.eye-open)': function(e, el){
(e).stop();
$(el).getParent('.movie_result').destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': el.get('data-seen'),
'mark_seen': 1
},
'onComplete': self.fill.bind(self)
});
}
}
}).grab(
new Element('h2', {
'text': 'You might like these'
})
);
self.api_request = Api.request('suggestion.view', {
'onComplete': self.fill.bind(self)
});
},
fill: function(json){
var self = this;
if(!json) return;
Object.each(json.suggestions, function(movie){
var m = new Block.Search.Item(movie, {
'onAdded': function(){
self.afterAdded(m, movie)
}
});
m.data_container.grab(
new Element('div.actions').adopt(
new Element('a.add.icon2', {
'title': 'Add movie with your default quality',
'data-add': movie.imdb,
'events': {
'click': m.showOptions.bind(m)
}
}),
$(new MA.IMDB(m)),
$(new MA.Trailer(m, {
'height': 150
})),
new Element('a.delete.icon2', {
'title': 'Don\'t suggest this movie again',
'data-ignore': movie.imdb
}),
new Element('a.eye-open.icon2', {
'title': 'Seen it, like it, don\'t add',
'data-seen': movie.imdb
})
)
);
m.data_container.removeEvents('click');
// Add rating
m.info_container.adopt(
m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', {
'text': parseFloat(m.info.rating.imdb[0]),
'title': parseInt(m.info.rating.imdb[1]) + ' votes'
}) : null,
m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', {
'text': m.info.genres.slice(0, 3).join(', ')
}) : null
)
$(m).inject(self.el);
});
self.fireEvent('loaded');
},
afterAdded: function(m, movie){
var self = this;
setTimeout(function(){
$(m).destroy();
Api.request('suggestion.ignore', {
'data': {
'imdb': movie.imdb,
'remove_only': true
},
'onComplete': self.fill.bind(self)
});
}, 3000);
},
toElement: function(){
return this.el;
}
})

View File

@@ -14,25 +14,25 @@
padding: 20px;
}
.page.userscript .movie_result {
.page.userscript .media_result {
height: 140px;
}
.page.userscript .movie_result .thumbnail {
.page.userscript .media_result .thumbnail {
width: 90px;
}
.page.userscript .movie_result .options {
.page.userscript .media_result .options {
left: 90px;
padding: 54px 15px;
}
.page.userscript .movie_result .year {
.page.userscript .media_result .year {
display: none;
}
.page.userscript .movie_result .options select[name="title"] {
.page.userscript .media_result .options select[name="title"] {
width: 190px;
}
.page.userscript .movie_result .options select[name="profile"] {
.page.userscript .media_result .options select[name="profile"] {
width: 70px;
}

View File

@@ -34,7 +34,7 @@ Page.Userscript = new Class({
if(json.error)
self.frame.set('html', json.error);
else {
var item = new Block.Search.Item(json.movie);
var item = new Block.Search.MovieItem(json.movie);
self.frame.adopt(item);
item.showOptions();
}
@@ -96,7 +96,7 @@ var UserscriptSettingTab = new Class({
})
)
).setStyles({
'background-image': "url('"+Api.createUrl('static/userscript/userscript.png')+"')"
'background-image': "url('"+App.createUrl('static/plugin/userscript/userscript.png')+"')"
});
});

View File

@@ -24,9 +24,10 @@ Page.Wizard = new Class({
'title': 'What download apps are you using?',
'description': 'CP needs an external download app to work with. Choose one below. For more downloaders check settings after you have filled in the wizard. If your download app isn\'t in the list, use the default Blackhole.'
},
'providers': {
'searcher': {
'label': 'Providers',
'title': 'Are you registered at any of these sites?',
'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have a few more. Check settings for the full list of available providers.'
'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have more.'
},
'renamer': {
'title': 'Move & rename the movies after downloading?',
@@ -38,7 +39,7 @@ Page.Wizard = new Class({
'<br />Once installed, just click the bookmarklet on a movie page and watch the magic happen ;)',
'content': function(){
return App.createUserscriptButtons().setStyles({
'background-image': "url('"+Api.createUrl('static/userscript/userscript.png')+"')"
'background-image': "url('"+App.createUrl('static/plugin/userscript/userscript.png')+"')"
})
}
},
@@ -76,7 +77,7 @@ Page.Wizard = new Class({
)
}
},
groups: ['welcome', 'general', 'downloaders', 'searcher', 'providers', 'renamer', 'automation', 'finish'],
groups: ['welcome', 'general', 'downloaders', 'searcher', 'renamer', 'automation', 'finish'],
open: function(action, params){
var self = this;
@@ -195,8 +196,7 @@ Page.Wizard = new Class({
self.el.getElement('.advanced_toggle').destroy();
// Hide retention
self.el.getElement('.tab_searcher').hide();
self.el.getElement('.t_searcher').hide();
self.el.getElement('.section_nzb').hide();
// Add pointer
new Element('.tab_wrapper').wraps(tabs);

View File

@@ -1,4 +1,4 @@
config = {
config = [{
'name': 'automation_providers',
'groups': [
{
@@ -18,4 +18,4 @@ config = {
'options': [],
},
],
}
}]

View File

@@ -18,6 +18,13 @@ config = [{
'default': False,
'type': 'enabler',
},
{
'name': 'backlog',
'advanced': True,
'description': 'Parses the history until the minimum movie year is reached. (Will be disabled once it has completed)',
'default': False,
'type': 'bool',
},
],
},
],

View File

@@ -1,3 +1,4 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
@@ -10,11 +11,49 @@ class Bluray(Automation, RSS):
interval = 1800
rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml'
backlog_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases&page=%s'
def getIMDBids(self):
movies = []
if self.conf('backlog'):
page = 0
while True:
page = page + 1
url = self.backlog_url % page
data = self.getHTMLData(url)
soup = BeautifulSoup(data)
try:
# Stop if the release year is before the minimal year
page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1]
if tryInt(page_year) < self.getMinimal('year'):
break
for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]:
name = table.h3.get_text().lower().split('blu-ray')[0].strip()
year = table.small.get_text().split('|')[1].strip()
if not name.find('/') == -1: # make sure it is not a double movie release
continue
if tryInt(year) < self.getMinimal('year'):
continue
imdb = self.search(name, year)
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
except:
log.debug('Error loading page: %s', page)
break
self.conf('backlog', value = False)
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:

View File

@@ -0,0 +1,34 @@
from .main import Flixster
def start():
return Flixster()
config = [{
'name': 'flixster',
'groups': [
{
'tab': 'automation',
'list': 'watchlist_providers',
'name': 'flixster_automation',
'label': 'Flixster',
'description': 'Import movies from any public <a href="http://www.flixster.com/">Flixster</a> watchlist',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_ids_use',
'label': 'Use',
},
{
'name': 'automation_ids',
'label': 'User ID',
'type': 'combined',
'combine': ['automation_ids_use', 'automation_ids'],
},
],
},
],
}]

View File

@@ -0,0 +1,47 @@
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
log = CPLog(__name__)
class Flixster(Automation):
url = 'http://www.flixster.com/api/users/%s/movies/ratings?scoreTypes=wts'
interval = 60
def getIMDBids(self):
ids = splitString(self.conf('automation_ids'))
if len(ids) == 0:
return []
movies = []
for movie in self.getWatchlist():
imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True)
movies.append(imdb_id)
return movies
def getWatchlist(self):
enablers = [tryInt(x) for x in splitString(self.conf('automation_ids_use'))]
ids = splitString(self.conf('automation_ids'))
index = -1
movies = []
for user_id in ids:
index += 1
if not enablers[index]:
continue
data = self.getJsonData(self.url % user_id, decode_from = 'iso-8859-1')
for movie in data:
movies.append({'title': movie['movie']['title'], 'year': movie['movie']['year'] })
return movies

View File

@@ -55,7 +55,14 @@ config = [{
'label': 'TOP 250',
'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart',
'default': True,
},
},
{
'name': 'automation_charts_boxoffice',
'type': 'bool',
'label': 'Box offce TOP 10',
'description': 'IMDB Box office <a href="http://www.imdb.com/chart/">TOP 10</a> chart',
'default': True,
},
],
},
],

View File

@@ -70,8 +70,11 @@ class IMDBAutomation(IMDBBase):
chart_urls = {
'theater': 'http://www.imdb.com/movies-in-theaters/',
'top250': 'http://www.imdb.com/chart/top',
'boxoffice': 'http://www.imdb.com/chart/',
}
first_table = ['boxoffice']
def getIMDBids(self):
movies = []
@@ -84,6 +87,14 @@ class IMDBAutomation(IMDBBase):
try:
result_div = html.find('div', attrs = {'id': 'main'})
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
imdb_ids = getImdb(str(result_div), multiple = True)
for imdb_id in imdb_ids:

View File

@@ -16,9 +16,6 @@ class ITunes(Automation, RSS):
def getIMDBids(self):
if self.isDisabled():
return
movies = []
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]

View File

@@ -15,7 +15,6 @@ import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
class MultiProvider(Plugin):
def __init__(self):
@@ -63,13 +62,17 @@ class Provider(Plugin):
return self.is_available.get(host, False)
def getJsonData(self, url, **kwargs):
def getJsonData(self, url, decode_from = None, **kwargs):
cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {})))
data = self.getCache(cache_key, url, **kwargs)
if data:
try:
data = data.strip()
if decode_from:
data = data.decode(decode_from)
return json.loads(data)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
@@ -251,7 +254,10 @@ class YarrProvider(Provider):
if identifier in qualities:
return ids
return [self.cat_backup_id]
if self.cat_backup_id:
return [self.cat_backup_id]
return []
class ResultList(list):
@@ -279,13 +285,23 @@ class ResultList(list):
new_result = self.fillResult(result)
is_correct_movie = fireEvent('movie.searcher.correct_movie',
nzb = new_result, movie = self.movie, quality = self.quality,
is_correct = fireEvent('searcher.correct_release', new_result, self.movie, self.quality,
imdb_results = self.kwargs.get('imdb_results', False), single = True)
if is_correct_movie and new_result['id'] not in self.result_ids:
if is_correct and new_result['id'] not in self.result_ids:
is_correct_weight = float(is_correct)
new_result['score'] += fireEvent('score.calculate', new_result, self.movie, single = True)
old_score = new_result['score']
new_result['score'] = int(old_score * is_correct_weight)
log.info('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
is_correct_weight,
old_score,
new_result['score']
))
self.found(new_result)
self.result_ids.append(result['id'])

View File

@@ -32,9 +32,26 @@ class MovieResultModifier(Plugin):
}
def __init__(self):
addEvent('result.modify.info.search', self.returnByType)
addEvent('result.modify.movie.search', self.combineOnIMDB)
addEvent('result.modify.movie.info', self.checkLibrary)
def returnByType(self, results):
new_results = {}
for r in results:
type_name = r.get('type', 'movie') + 's'
if not new_results.has_key(type_name):
new_results[type_name] = []
new_results[type_name].append(r)
# Combine movies, needs a cleaner way..
if new_results.has_key('movies'):
new_results['movies'] = self.combineOnIMDB(new_results['movies'])
return new_results
def combineOnIMDB(self, results):
temp = {}

View File

@@ -3,6 +3,7 @@ from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.info.base import MovieProvider
from couchpotato.environment import Env
import base64
import time
log = CPLog(__name__)
@@ -11,6 +12,7 @@ log = CPLog(__name__)
class CouchPotatoApi(MovieProvider):
urls = {
'validate': 'https://api.couchpota.to/validate/%s/',
'search': 'https://api.couchpota.to/search/%s/',
'info': 'https://api.couchpota.to/info/%s/',
'is_movie': 'https://api.couchpota.to/ismovie/%s/',
@@ -24,11 +26,14 @@ class CouchPotatoApi(MovieProvider):
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 1)
addEvent('info.search', self.search, priority = 1)
addEvent('movie.search', self.search, priority = 1)
addEvent('movie.release_date', self.getReleaseDate)
addEvent('movie.suggest', self.getSuggestions)
addEvent('movie.is_movie', self.isMovie)
addEvent('release.validate', self.validate)
addEvent('cp.source_url', self.getSourceUrl)
addEvent('cp.messages', self.getMessages)
@@ -50,6 +55,14 @@ class CouchPotatoApi(MovieProvider):
def search(self, q, limit = 5):
return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders())
def validate(self, name = None):
if not name:
return
name_enc = base64.b64encode(name)
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None):
if not identifier:

View File

@@ -20,6 +20,7 @@ class OMDBAPI(MovieProvider):
http_time_between_calls = 0
def __init__(self):
addEvent('info.search', self.search)
addEvent('movie.search', self.search)
addEvent('movie.info', self.getInfo)
@@ -84,6 +85,7 @@ class OMDBAPI(MovieProvider):
year = tryInt(movie.get('Year', ''))
movie_data = {
'type': 'movie',
'via_imdb': True,
'titles': [movie.get('Title')] if movie.get('Title') else [],
'original_title': movie.get('Title'),

View File

@@ -11,6 +11,7 @@ log = CPLog(__name__)
class TheMovieDb(MovieProvider):
def __init__(self):
addEvent('info.search', self.search, priority = 2)
addEvent('movie.search', self.search, priority = 2)
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info_by_tmdb', self.getInfo)
@@ -103,6 +104,7 @@ class TheMovieDb(MovieProvider):
year = None
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.id,
'titles': [toUnicode(movie.title)],
@@ -119,6 +121,7 @@ class TheMovieDb(MovieProvider):
'year': year,
'plot': movie.overview,
'genres': genres,
'collection': getattr(movie.collection, 'name', None),
}
movie_data = dict((k, v) for k, v in movie_data.iteritems() if v)

View File

@@ -104,6 +104,13 @@ class XBMC(MetaDataBase):
writers = SubElement(nfoxml, 'credits')
writers.text = toUnicode(writer)
# Sets or collections
collection_name = movie_info.get('collection')
if collection_name:
collection = SubElement(nfoxml, 'set')
collection.text = toUnicode(collection_name)
sorttitle = SubElement(nfoxml, 'sorttitle')
sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year'))
# Clean up the xml and return it
nfoxml = xml.dom.minidom.parseString(tostring(nfoxml))

View File

@@ -1,4 +1,4 @@
config = {
config = [{
'name': 'nzb_providers',
'groups': [
{
@@ -11,4 +11,4 @@ config = {
'options': [],
},
],
}
}]

View File

@@ -56,12 +56,16 @@ class BinSearch(NZBProvider):
info = row.find('span', attrs = {'class':'d'})
size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text)
age = 0
try: age = re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]
except: pass
def extra_check(item):
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
total = tryInt(parts.group('total'))
parts = tryInt(parts.group('parts'))
if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not 'par2' in info.text.lower()):
if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower())):
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
return False
@@ -74,7 +78,7 @@ class BinSearch(NZBProvider):
results.append({
'id': nzb_id,
'name': title.text,
'age': tryInt(re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]),
'age': tryInt(age),
'size': self.parseSize(size_match.group('size')),
'url': self.urls['download'] % nzb_id,
'detail_url': self.urls['detail'] % info.find('a')['href'],

View File

@@ -1,40 +0,0 @@
from .main import FTDWorld
def start():
return FTDWorld()
config = [{
'name': 'ftdworld',
'groups': [
{
'tab': 'searcher',
'list': 'nzb_providers',
'name': 'FTDWorld',
'description': 'Free provider, less accurate. See <a href="http://ftdworld.net">FTDWorld</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]

View File

@@ -1,83 +0,0 @@
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
import json
import traceback
log = CPLog(__name__)
class FTDWorld(NZBProvider):
urls = {
'search': 'http://ftdworld.net/api/index.php?%s',
'detail': 'http://ftdworld.net/spotinfo.php?id=%s',
'download': 'http://ftdworld.net/cgi-bin/nzbdown.pl?fileID=%s',
'login': 'http://ftdworld.net/api/login.php',
'login_check': 'http://ftdworld.net/api/login.php',
}
http_time_between_calls = 3 #seconds
cat_ids = [
([4, 11], ['dvdr']),
([1], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
([7, 10, 13, 14], ['bd50', '720p', '1080p']),
]
cat_backup_id = 1
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s' % (title, movie['library']['year'])
params = tryUrlencode({
'ctitle': q,
'customQuery': 'usr',
'cage': Env.setting('retention', 'nzb'),
'csizemin': quality.get('size_min'),
'csizemax': quality.get('size_max'),
'ccategory': 14,
'ctype': ','.join([str(x) for x in self.getCatId(quality['identifier'])]),
})
data = self.getJsonData(self.urls['search'] % params, opener = self.login_opener)
if data:
try:
if data.get('numRes') == 0:
return
for item in data.get('data'):
nzb_id = tryInt(item.get('id'))
results.append({
'id': nzb_id,
'name': toUnicode(item.get('Title')),
'age': self.calculateAge(tryInt(item.get('Created'))),
'size': item.get('Size', 0),
'url': self.urls['download'] % nzb_id,
'detail_url': self.urls['detail'] % nzb_id,
'score': (tryInt(item.get('webPlus', 0)) - tryInt(item.get('webMin', 0))) * 3,
})
except:
log.error('Failed to parse HTML response from FTDWorld: %s', traceback.format_exc())
def getLoginParams(self):
return tryUrlencode({
'userlogin': self.conf('username'),
'passlogin': self.conf('password'),
'submit': 'Log In',
})
def loginSuccess(self, output):
try:
return json.loads(output).get('goodToGo', False)
except:
return False
loginCheckSuccess = loginSuccess

View File

@@ -20,6 +20,7 @@ config = [{
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'use',

View File

@@ -1,4 +1,4 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt
from couchpotato.core.logger import CPLog
@@ -83,7 +83,7 @@ class Newznab(NZBProvider, RSS):
results.append({
'id': nzb_id,
'provider_extra': urlparse(host['host']).hostname or host['host'],
'name': name,
'name': toUnicode(name),
'name_extra': name_extra,
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,

View File

@@ -14,7 +14,8 @@ log = CPLog(__name__)
class OMGWTFNZBs(NZBProvider, RSS):
urls = {
'search': 'http://rss.omgwtfnzbs.org/rss-search.php?%s',
'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s',
'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s',
}
http_time_between_calls = 1 #seconds
@@ -49,13 +50,14 @@ class OMGWTFNZBs(NZBProvider, RSS):
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]
results.append({
'id': parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0],
'id': nzb_id,
'name': toUnicode(self.getTextElement(nzb, 'title')),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': self.getTextElement(nzb, 'link'),
'detail_url': self.urls['detail_url'] % nzb_id,
'description': self.getTextElement(nzb, 'description')
})

View File

@@ -1,4 +1,4 @@
config = {
config = [{
'name': 'torrent_providers',
'groups': [
{
@@ -11,4 +11,4 @@ config = {
'options': [],
},
],
}
}]

View File

@@ -1,6 +1,8 @@
from couchpotato.core.helpers.variable import getImdb, md5
from couchpotato.core.helpers.variable import getImdb, md5, cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import YarrProvider
from couchpotato.environment import Env
import time
log = CPLog(__name__)
@@ -9,6 +11,9 @@ class TorrentProvider(YarrProvider):
protocol = 'torrent'
proxy_domain = None
proxy_list = []
def imdbMatch(self, url, imdbId):
if getImdb(url) == imdbId:
return True
@@ -25,6 +30,42 @@ class TorrentProvider(YarrProvider):
return False
def getDomain(self, url = ''):
forced_domain = self.conf('domain')
if forced_domain:
return cleanHost(forced_domain).rstrip('/') + url
if not self.proxy_domain:
for proxy in self.proxy_list:
prop_name = 'proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed %s proxy %s', (self.getName(), proxy))
if self.correctProxy(data):
log.debug('Using proxy for %s: %s', (self.getName(), proxy))
self.proxy_domain = proxy
break
Env.prop(prop_name, time.time())
if not self.proxy_domain:
log.error('No %s proxies left, please add one in settings, or let us know which one to add on the forum.', self.getName())
return None
return cleanHost(self.proxy_domain).rstrip('/') + url
def correctProxy(self):
return True
class TorrentMagnetProvider(TorrentProvider):
protocol = 'torrent_magnet'

View File

@@ -1,16 +1,16 @@
from .main import SceneHD
from .main import BiTHDTV
def start():
return SceneHD()
return BiTHDTV()
config = [{
'name': 'scenehd',
'name': 'bithdtv',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'SceneHD',
'description': 'See <a href="https://scenehd.org">SceneHD</a>',
'name': 'BiT-HDTV',
'description': 'See <a href="http://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True,
'options': [
{
@@ -46,7 +46,7 @@ config = [{
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],

View File

@@ -0,0 +1,88 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class BiTHDTV(TorrentProvider):
urls = {
'test' : 'http://www.bit-hdtv.com/',
'login' : 'http://www.bit-hdtv.com/takelogin.php',
'login_check': 'http://www.bit-hdtv.com/messages.php',
'detail' : 'http://www.bit-hdtv.com/details.php?id=%s',
'search' : 'http://www.bit-hdtv.com/torrents.php?',
}
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
cat_id_movies = 7
http_time_between_calls = 1 #seconds
def _searchOnTitle(self, title, movie, quality, results):
arguments = tryUrlencode({
'search': '%s %s' % (title.replace(':', ''), movie['library']['year']),
'cat': self.cat_id_movies
})
url = "%s&%s" % (self.urls['search'], arguments)
data = self.getHTMLData(url, opener = self.login_opener)
if data:
# Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
split_data = data.partition('-->')
if '## SELECT COUNT(' in split_data[0]:
data = split_data[2]
html = BeautifulSoup(data)
try:
result_table = html.find('table', attrs = {'width' : '750', 'class' : ''})
if result_table is None:
return
entries = result_table.find_all('tr')
for result in entries[1:]:
cells = result.find_all('td')
link = cells[2].find('a')
torrent_id = link['href'].replace('/details.php?id=', '')
results.append({
'id': torrent_id,
'name': link.contents[0].get_text(),
'url': cells[0].find('a')['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(cells[6].get_text()),
'seeders': tryInt(cells[8].string),
'leechers': tryInt(cells[9].string),
'get_more_info': self.getMoreInfo,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
})
def getMoreInfo(self, item):
full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('table', attrs = {'class':'detail'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
def loginSuccess(self, output):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess

View File

@@ -0,0 +1,60 @@
from main import ILoveTorrents
def start():
return ILoveTorrents()
config = [{
'name': 'ilovetorrents',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'ILoveTorrents',
'description': 'Where the Love of Torrents is Born',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
},
{
'name': 'username',
'label': 'Username',
'type': 'string',
'default': '',
'description': 'The user name for your ILT account',
},
{
'name': 'password',
'label': 'Password',
'type': 'password',
'default': '',
'description': 'The password for your ILT account.',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]

View File

@@ -0,0 +1,128 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import re
import traceback
log = CPLog(__name__)
class ILoveTorrents(TorrentProvider):
urls = {
'download': 'http://www.ilovetorrents.me/%s',
'detail': 'http://www.ilovetorrents.me/%s',
'search': 'http://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s',
'test' : 'http://www.ilovetorrents.me/',
'login' : 'http://www.ilovetorrents.me/takelogin.php',
'login_check' : 'http://www.ilovetorrents.me'
}
cat_ids = [
(['41'], ['720p', '1080p', 'brrip']),
(['19'], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
(['20'], ['dvdr'])
]
cat_backup_id = 200
disable_provider = False
http_time_between_calls = 1
def _searchOnTitle(self, title, movie, quality, results):
page = 0
total_pages = 1
cats = self.getCatId(quality['identifier'])
while page < total_pages:
movieTitle = tryUrlencode('"%s" %s' % (title, movie['library']['year']))
search_url = self.urls['search'] % (movieTitle, page, cats[0])
page += 1
data = self.getHTMLData(search_url, opener = self.login_opener)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
if not results_table:
return
try:
pagelinks = soup.findAll(href = re.compile('page'))
pageNumbers = [int(re.search('page=(?P<pageNumber>.+'')', i['href']).group('pageNumber')) for i in pagelinks]
total_pages = max(pageNumbers)
except:
pass
entries = results_table.find_all('tr')
for result in entries[1:]:
prelink = result.find(href = re.compile('details.php'))
link = prelink['href']
download = result.find('a', href = re.compile('download.php'))['href']
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]
return confirmed + trusted + vip + moderated
id = re.search('id=(?P<id>\d+)&', link).group('id')
url = self.urls['download'] % (download)
fileSize = self.parseSize(result.select('td.rowhead')[5].text)
results.append({
'id': id,
'name': toUnicode(prelink.find('b').text),
'url': url,
'detail_url': self.urls['detail'] % link,
'size': fileSize,
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'Welcome to ILT',
})
def getMoreInfo(self, item):
cache_key = 'ilt.%s' % item['id']
description = self.getCache(cache_key)
if not description:
try:
full_description = self.getHTMLData(item['detail_url'], opener = self.login_opener)
html = BeautifulSoup(full_description)
nfo_pre = html.find('td', attrs = {'class':'main'}).findAll('table')[1]
description = toUnicode(nfo_pre.text) if nfo_pre else ''
except:
log.error('Failed getting more info for %s', item['name'])
description = ''
self.setCache(cache_key, description, timeout = 25920000)
item['description'] = description
return item
def loginSuccess(self, output):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess

Some files were not shown because too many files have changed in this diff Show More