Compare commits

..

204 Commits

Author SHA1 Message Date
Ruud Burger 9a01b274ba Merge pull request #4715 from BillNasty/develop
Update torrentshack URL
2015-03-16 20:08:00 +01:00
Ruud f7ed835ba5 Change minimum movie size
fix #4730
2015-03-09 16:36:09 +01:00
Ruud 8af49b9508 Yify not return data
fix #4725
2015-03-09 16:31:22 +01:00
Ruud 998049a39d Python 2.6 parseerror
fix #4701
2015-03-09 16:07:05 +01:00
BillNasty 111bd1b07c Fix Description from old URL
Fix provider description from old url to new url
2015-03-04 14:09:03 -05:00
BillNasty a09c10aab6 Update torrentshack URL
Update from temporary url to new main url
2015-03-01 22:32:16 -05:00
Ruud adb744a526 Don't show double updater type 2015-02-22 17:42:29 +01:00
Ruud 0f82cda811 Remove podnapisi from subtile list 2015-02-22 16:09:22 +01:00
Ruud 0d6c3c8ecb Yify, only use data when available 2015-02-22 16:06:07 +01:00
Ruud 6598f53fd4 Quality check improve 2015-02-22 15:55:54 +01:00
Ruud 6b8458d87f Hadouken apikey check not using correct settingskey
fix #4674
2015-02-22 14:49:37 +01:00
Ruud 99a0621238 Use keep-alive connection 2015-02-22 14:30:50 +01:00
Ruud Burger c52666309a Merge pull request #4676 from peerster/develop
Update torrentshack with new URL
2015-02-22 13:39:21 +01:00
Ruud 84a458d40b Add user-agent and type to omdbapi 2015-02-22 13:06:29 +01:00
Ruud f8631c6d53 Add extra category for TorrentLeech
fix #4683
2015-02-21 21:29:37 +01:00
Ruud b19b0775c7 Force update to new poster on refresh
fix #4671
2015-02-20 22:16:12 +01:00
peerster 2dc1c1dd38 Update torrentshack with new URL 2015-02-19 20:07:22 +01:00
Ruud 7db8b233c8 Don't decode string if confidence isn't high enough 2015-02-18 17:21:24 +01:00
Ruud 427c77a9ef Remove podnapisi 2015-02-15 19:23:45 +01:00
Ruud 94c3969f10 Use https for yify proxy 2015-02-10 20:52:15 +01:00
Ruud debd1855dd Move Yify to v2 2015-02-10 20:47:19 +01:00
Ruud 9f77597c11 Torrentz search on title
fix #4510
2015-02-10 17:15:53 +01:00
Ruud afc9039625 Also search lower qualities on OMGWTF
fix #4527
2015-02-10 16:50:53 +01:00
Ruud 920d3cb44e Don't verify SYNO downloader thingymajig
fix #4641
2015-02-10 16:27:13 +01:00
Ruud b1fc8ad862 Letterboxed new html markup
fix #4640
2015-02-10 16:21:32 +01:00
Ruud 11b9bc39ab Show tried to often error for TD 2015-02-10 15:40:55 +01:00
Ruud 6dcb3f3bf2 Change bitsoup category id
fixes #4629
2015-02-10 14:55:22 +01:00
Ruud ce768f45c5 Make RottenTomato logging more clear
close #4618
2015-02-10 14:36:54 +01:00
Ruud 9b91d1d6c0 Remove favor, link to api key page 2015-02-10 14:10:55 +01:00
Ruud d9c7a97604 Merge branch 'develop' of git://github.com/jonnyboy/CouchPotatoServer into jonnyboy-develop 2015-02-10 14:03:06 +01:00
Ruud 0fd01aa697 Cleanup 2015-02-10 14:01:51 +01:00
Ruud 58615e6f9b Merge branch 'develop' of git://github.com/grasshide/CouchPotatoServer into grasshide-develop 2015-02-10 13:54:13 +01:00
Ruud 2277322e57 Traceback import missing 2015-02-10 13:47:22 +01:00
Ruud Burger 18020e609e Merge pull request #4479 from sjlu/develop
Adding the ability to receive notifications through Webhooks
2015-02-10 13:19:59 +01:00
Ruud 6a31b920ac Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-02-10 13:15:43 +01:00
Ruud c1266a36e4 Re-use resursion code 2015-02-10 13:15:08 +01:00
Ruud 578effc538 Merge branch 'develop' of git://github.com/dumaresq/CouchPotatoServer into dumaresq-develop 2015-02-10 13:09:12 +01:00
Ruud Burger d881120013 Merge pull request #4513 from starkers/remotes/origin/develop
added touch and chown to the $PID_FILE
2015-02-10 13:07:25 +01:00
Ruud Burger da5318033a Merge pull request #4380 from mannkind/develop
Initial support for Plex Media Server w/Plex Home
2015-02-10 13:04:27 +01:00
Ruud Burger 31df5bce01 Merge pull request #4612 from maikhorma/maikhorma-#2782
Simple workaround for #2782
2015-02-10 13:02:24 +01:00
Ruud d5622b7cba Remove www from torrentday domain 2015-02-10 13:01:19 +01:00
Ruud Burger 26ad1b354f Merge pull request #4552 from coolius/patch-1
Update torrentday url
2015-02-10 12:53:52 +01:00
Ruud 7a616a81f7 Remove www from iptorrents 2015-02-10 12:52:05 +01:00
Ruud Burger 275aefc3cc Merge pull request #4553 from coolius/patch-2
Update iptorrents url
2015-02-10 12:51:02 +01:00
Ruud Burger 2b32490f72 Merge pull request #4649 from sammy2142/patch-1
Update kickass url from kickass.so to kickass.to
2015-02-10 12:49:16 +01:00
sammy2142 7b9043c16b Update kickass url from kickass.so to kickass.to
Kickass has reverted back to the .to domain as the .so domain was seized:
http://torrentfreak.com/kickasstorrents-taken-domain-name-seizure-150209/
2015-02-10 11:11:30 +00:00
maikhorma cf83f99be0 Updated UI
Tried to make it a bit cleaner.
2015-02-01 15:28:05 -05:00
maikhorma fb8a66d207 Shortcut to address #2782
Until there is a more elegant solution to avoid unwanted white space
trimming, this will let users disable that feature if it is not
something they need.
2015-02-01 14:43:16 -05:00
Ruud e8a3645bc6 Log failed folder getting 2015-02-01 12:18:31 +01:00
Ruud 592e40993c Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-01-31 10:32:24 +01:00
Ruud b00e69e222 TorrentBytes cut of longer titles
fix #4590
2015-01-31 10:32:15 +01:00
Ruud c9b4c8167f Actual include host in log 2015-01-28 11:35:26 +01:00
coolius cdb9cfe756 Update iptorrents.py
Updated iptorrents url to blockade-free iptorrents.eu
2015-01-19 17:18:56 +00:00
coolius e52f50b204 Update torrentday.py
Updated torrentday url to blockade-free torrentday.eu
2015-01-19 17:17:31 +00:00
Ruud 770c2be14c Create detail url if permalink is false 2015-01-17 13:04:47 +01:00
Ruud ab61961a64 Use detail url 2015-01-14 16:59:29 +01:00
Ruud 6aca799bbb Newznab: use guid for detail url 2015-01-14 16:55:30 +01:00
David Stark 89836be1d1 added touch and chown to the $PID_FILE 2015-01-12 17:37:26 +01:00
Andrew Dumaresq 20e1283627 better way to find the folder 2015-01-11 11:57:14 -05:00
Andrew Dumaresq ee8406e026 Minor text change 2015-01-11 11:45:29 -05:00
Andrew Dumaresq 514941b785 Merge branch 'develop' of https://github.com/dumaresq/CouchPotatoServer into develop 2015-01-11 11:42:52 -05:00
Ruud 1510e37652 Update Tornado 2015-01-11 16:18:22 +01:00
Ruud e1e39cd3f4 Update requests 2015-01-11 16:17:33 +01:00
Ruud e1bb8c5419 Update Chardet 2015-01-11 16:15:52 +01:00
Ruud 17fa33a496 Update user agent 2015-01-11 00:25:58 +01:00
Ruud 601f0b54cf Send CP header when downloading from newznab 2015-01-11 00:25:51 +01:00
dumaresq 51d44bfc3e Merge pull request #1 from RuudBurger/develop
Develop
2015-01-10 17:01:43 -05:00
Ruud 12148217a2 Log failed notification 2015-01-10 13:41:17 +01:00
Ruud 132fa12ef4 Late list not loaded on home 2015-01-10 12:17:47 +01:00
Ruud 1827c2e4cd Don't parse omgwtfnzb if no results are returned 2015-01-10 12:17:30 +01:00
Ruud f423bca06b Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-01-09 20:13:42 +01:00
Ruud e7b089edf5 Give better XML issues 2015-01-09 20:13:17 +01:00
Ruud Burger b8b7d94a6a Merge pull request #4456 from dumaresq/develop
Bug fixes and new features for putio
2015-01-09 20:08:30 +01:00
Ruud 2c080fec3d TorrentBytes nbsp issue
fix #4026
2015-01-08 16:56:38 +01:00
Ruud 4c68566c77 Use new OMGWTFNZB api
fix #4471
2015-01-08 14:59:53 +01:00
Steven Lu a3af784c18 Adding the ability to receive notifications through Webhooks 2015-01-06 18:47:19 -05:00
grasshide ac6f295c93 New algogithm to use some kind of crowd logic on newznab powered
providers.
2015-01-05 15:00:40 +01:00
Andrew Dumaresq 2c72cd7d9f Added new folder option and fixed but in callback url 2015-01-04 17:10:40 -05:00
Andrew Dumaresq d012dc5c85 Added new folder option 2015-01-04 17:10:16 -05:00
Andrew Dumaresq 038b4c63ee Updated to follow putio API changes 2015-01-04 17:09:36 -05:00
Ruud Burger 17e37996c4 Add remux category for TorrentShack
close #4427
2015-01-02 18:18:08 +01:00
jonnyboy 9318e19347 New torrent search provider hdaccess.net 2014-12-31 08:21:58 -05:00
Ruud 8f4e03d04b Use detected encoding
#4388
2014-12-27 13:46:25 +01:00
Ruud 229d67c086 Don't toUnicode loop 2014-12-22 22:01:47 +01:00
Dustin Brewer d84897ff33 Initial support for Plex Media Server w/Plex Home 2014-12-21 16:18:11 -08:00
Ruud 387a711538 TorrentBytes not encoding name
fix #4377
2014-12-21 21:14:38 +01:00
Ruud 7a1b914824 Return nonblock results in main thread 2014-12-21 20:19:53 +01:00
Ruud 5e62801666 Send data through finish not write 2014-12-21 20:19:30 +01:00
Ruud 00d887153f Return data in main thread 2014-12-21 19:39:16 +01:00
Ruud 6d5882001a Notificaton.list not returning anything
fix #4348
2014-12-20 22:32:18 +01:00
Ruud 4a6b45c65c SCC not finding seeders 2014-12-20 22:24:00 +01:00
Ruud b0d1fe5c33 Return false if no media is found on try_next
fix #4345
2014-12-20 22:17:43 +01:00
Ruud a6e49098c8 Add robots.txt 2014-12-20 22:15:27 +01:00
Ruud ffcd36cbf4 IOLoop callback hanging 2014-12-20 21:45:15 +01:00
Ruud 3bf2d844a0 Release api lock on connection close or finish
fix #4372
2014-12-20 20:13:49 +01:00
Ruud dd24eb8893 Revert "Give response back to the main thread on api calls"
This reverts commit 576bcb9f4b.

Conflicts:
	couchpotato/api.py
2014-12-20 18:49:35 +01:00
Ruud 538f51dd5b Log ipv6 failed bind 2014-12-19 14:16:20 +01:00
Ruud eea9f40501 Use current 2014-12-19 09:01:52 +01:00
Ruud 576bcb9f4b Give response back to the main thread on api calls
fix #4337
2014-12-19 08:57:24 +01:00
Ruud Burger 62c5365329 Merge pull request #4356 from rtaibah/ReaddTypoFix
Change Readd in tooltip to Re-add. Former is confusing and not an Englis...
2014-12-17 11:36:53 +01:00
Rami Taibah ddf575a86e Change Readd in tooltip to Re-add. Former is confusing and not an English word 2014-12-17 13:00:54 +03:00
Ruud Burger 6b9383ce92 Merge pull request #4342 from mano3m/develop_fixsize
Fix TorrentShack size
2014-12-16 07:52:08 +01:00
mano3m cb8d24ef1f Fix TorrentShack size 2014-12-15 22:26:29 +01:00
Ruud 814ddfb79f Don't return password fields
fix #4300
2014-12-14 12:33:28 +01:00
Ruud 766f819c0b Userscript for RT not parsing URL correctly 2014-12-14 12:06:03 +01:00
Ruud ff43df9ef1 Comments comments comments 2014-12-02 15:38:55 +01:00
Ruud 2e907e93e7 Whiteline 2014-12-02 12:02:49 +01:00
Ruud 4d329d6a36 Revert "Remove torrentleech"
This reverts commit dacc3d8f47.
2014-12-02 11:45:17 +01:00
Ruud 752191bc23 Comments 2014-12-02 11:43:10 +01:00
Ruud 1d73fd9d7e Import optimize 2014-12-02 11:15:29 +01:00
Ruud 79688c412a Merge branch 'develop' of git://github.com/hadouken/CouchPotatoServer into hadouken-develop 2014-12-02 11:07:54 +01:00
Ruud fc1c95fefb Description 2014-12-01 23:00:59 +01:00
Ruud 6a174716af underscored variables 2014-12-01 22:52:10 +01:00
Ruud defe256f1b Correct url 2014-12-01 16:52:43 +01:00
Ruud 8a5f154d9e Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-12-01 16:52:04 +01:00
Ruud fe56a69e8f Put.IO cleanup 2014-12-01 16:49:27 +01:00
Ruud c6d326f973 Move put.io API 2014-12-01 15:50:03 +01:00
Ruud 9e5f670feb Merge branch 'putio' of git://github.com/dumaresq/CouchPotatoServer into develop 2014-12-01 15:42:43 +01:00
Ruud Burger 9ebacf8816 Merge pull request #4258 from glibix/develop
Transmission status 16 is for "Stopped". So we need to detect a download...
2014-12-01 15:41:41 +01:00
Ruud df2d7ec9c2 Remove debug code 2014-12-01 15:39:33 +01:00
Ruud ddab74582b Merge branch 'develop' of git://github.com/psaab/CouchPotatoServer into psaab-develop 2014-12-01 15:28:11 +01:00
Ruud 2801079bc8 Merge branch 'develop_3845' of git://github.com/voidstarstar/CouchPotatoServer into voidstarstar-develop_3845 2014-12-01 15:14:26 +01:00
Ruud Burger 1deb49b524 Merge pull request #4261 from voidstarstar/develop_4211
Added renamer.progress API function. Fixes #4211.
2014-12-01 15:12:21 +01:00
Ruud Burger 49d550f652 Merge pull request #4270 from sammy2142/patch-1
Update Kickass url to https://kickass.so
2014-11-30 22:05:36 +01:00
sammy2142 1a43ce6ecc Update Kickass url to https://kickass.so
Kickass has recently changed its web address from https://kickass.to 
to https://kickass.so
2014-11-30 20:48:31 +00:00
voidstarstar 15a0131587 Added renamer.progress API function. Fixes #4211.
This function reports the status of the renamer.
Progress value True means the renamer is currently running.
Progress value False means the renamer is not currently running.
2014-11-27 21:51:30 -05:00
voidstarstar 0dca34958c Added a parameter to the renamer API. Fixes #3845.
The renamer now has a new 'to_folder' parameter.
This parameter specifies where movies are moved to.
2014-11-27 21:43:19 -05:00
Mathew Paret 4b231e36ea Merge branch 'feature/3967_add_imdb_link_to_tweet' into develop 2014-11-27 18:16:41 +05:30
Mathew Paret 52478a00db Revert "Feature #3967 - Added IMDB link to download complete tweet"
This reverts commit 87338760ad.
2014-11-27 18:13:41 +05:30
Mathew Paret e177766270 Merge branch 'feature/3967_add_imdb_link_to_tweet' into develop 2014-11-27 18:06:38 +05:30
Ruud Burger ff8da7c8f8 Merge pull request #4068 from ofir123/subscenter_support
Added support for subscenter.
2014-11-26 21:51:14 +01:00
Ruud Burger 89c8c5a0c7 Merge pull request #4203 from rkokkelk/develop
Fix startup script Debian/Ubuntu
2014-11-26 21:48:40 +01:00
Ruud 38c6266f9c Use single quotes 2014-11-26 21:47:39 +01:00
Ruud Burger 16f8e7e123 Merge pull request #4205 from kamillus/develop
adding a fix to handle missing directories in the file browser in webkit browsers
2014-11-26 21:46:10 +01:00
Ruud Burger 7110c7a11f Merge pull request #4249 from clinton-hall/patch-1
NZBGet 13 includes more status information
2014-11-25 07:55:39 +01:00
Clinton Hall 6d79f316a6 NZBGet 13 includes more status information
nzb['Status'] returns total (SUCESS/ALL) status and also failed status in V13+
This is particularly important when using fake detector scripts or stopping download due to health checks etc.
http://nzbget.net/RPC_API_reference#Method_.22history.22
https://couchpota.to/forum/viewtopic.php?f=5&t=4644
2014-11-25 11:02:47 +10:30
Paul Saab c1b6811b8a Tornado requires two sockets to support IPv6
Tornado sets setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
to force IPv6 sockets to only be used for IPv6 connections.  create a
separate socket to allow for CouchPotato to be used over IPv6.
2014-11-17 22:54:56 -08:00
Kamil 7d7b76b2e9 adding a fix to handle missing directories in the file browser in webkit browsers 2014-11-10 20:18:38 -05:00
Roy Kokkelkoren 657aa52fa7 Merge branch 'develop' of https://github.com/rkokkelk/CouchPotatoServer into develop 2014-11-10 15:40:41 +01:00
Roy Kokkelkoren 8e9ef8db39 Merge remote-tracking branch 'upstream/develop' into develop 2014-11-10 15:39:45 +01:00
test 92a0096b54 Merge remote-tracking branch 'upstream/develop' into develop 2014-11-10 08:29:27 -06:00
Mathew Paret 87338760ad Feature #3967 - Added IMDB link to download complete tweet 2014-11-10 18:47:37 +05:30
Mathew Paret 28019b0a09 Transmission status 16 is for "Stopped". So we need to detect a download as completed even if it is stopped but percent done is 100 2014-11-10 18:39:58 +05:30
Ruud Burger 248b007f4a Merge pull request #4094 from georgewhewell/hdbits-add-internal-only
add option for internal-only for hdbits provider
2014-11-09 14:39:50 +01:00
Ruud Burger 9e31c59de8 Merge pull request #4188 from DjSlash/patch-2
Update SSL protocol for Deluge connections
2014-11-09 14:37:23 +01:00
Ruud 269e785888 Yify, don't include quality in search
fix #4190
2014-11-09 14:30:22 +01:00
Ruud 3669aef42d is_movie param 2014-11-09 14:14:06 +01:00
Ruud 1087eb3a06 Add adding parameter to is_movie 2014-11-09 14:10:23 +01:00
Rutger van Sleen 43af80a137 Update SSL protocol for Deluge connections
Since Deluge 1.3.10 the SSL protocol is updated to TLSv1 instead of SSLv3. This resulted in CP not being able to add new torrents. Link to change in Deluge: http://git.deluge-torrent.org/deluge/commit/?h=1.3-stable&id=26f5be17609a8312c4ba06aa120ed208cd7876f2
2014-11-06 14:33:38 +01:00
Roy Kokkelkoren 0766a27a71 Fixed bug in init.d script which prevented the writing of the PID file.
Altered default value of DATA_DIR to /var/opt/couchpotato in order to comply to linux file structure
2014-11-06 04:40:39 -06:00
Ruud a12f049d14 Bit-HDTV http -> https
fix #3570
2014-11-01 17:30:11 +01:00
Ruud 6afe2fd9cf IPTorrents webdl category
fix #4150
2014-11-01 15:36:54 +01:00
Viktor Elofsson 61f634a21e Refactored Hadouken downloader. 2014-10-21 16:52:28 +02:00
Ruud 02b6659235 Don't show ignored in ETA message 2014-10-20 20:46:07 +02:00
Ruud dacc3d8f47 Remove torrentleech 2014-10-19 22:47:29 +02:00
Ruud 4f140bb1ac Remove print 2014-10-19 22:41:50 +02:00
Ruud 3dffaa7075 Update Tornado 2014-10-19 16:13:03 +02:00
georgewhewell d626fda710 add option for internal only for hdbits provider 2014-10-17 15:14:44 +01:00
Ruud 51c8de0fc3 Force filename renamer setting 2014-10-16 22:39:39 +02:00
Ruud 4f23ccc284 Also rename on old version database
fix #4083
2014-10-15 21:28:52 +02:00
Ruud a6ff34a47f Only check exists on file 2014-10-14 22:49:19 +02:00
Viktor Elofsson b40d1f3463 Merge pull request #1 from RuudBurger/develop
Sync upstream
2014-10-14 13:18:48 +02:00
Ruud f1a2d960bc Make tab font smaller 2014-10-13 13:24:57 +02:00
Ruud 4e7069e0c6 Deluge, allow "no port"
fix #4055
2014-10-13 13:20:21 +02:00
Ruud 477a47e45e Don't show fanart error on future movies 2014-10-13 13:12:44 +02:00
Ruud a3264240ab Don't error out on future movies 2014-10-13 13:10:38 +02:00
Ofir Brukner 1030d0d748 Added support for subscenter.
Updated both plugin and lib.
2014-10-13 00:50:29 +03:00
Ruud f9d9fffedb Don't ss int or float 2014-10-12 16:41:48 +02:00
Ruud 6b4e9a3fac Remove chardet from requests library 2014-10-11 22:57:57 +02:00
Ruud Burger 6787289846 Merge pull request #4054 from joshka/patch-1
Fix default permissions on files to remove execute bits on files #4053
2014-10-11 18:04:08 +02:00
Joshua McKinney d31a2e2768 Fix default permissions on files to remove execute bits on files #4053 2014-10-12 03:01:20 +11:00
Ruud c992680209 Meta and Middle click not triggering new tab 2014-10-11 15:35:56 +02:00
Ruud 65f0dc25d2 Allow 1080p in shitty quality releases 2014-10-11 14:32:12 +02:00
Ruud b616af3a83 Make minimum scoring editable
fix #4042
2014-10-10 23:16:58 +02:00
Ruud ca13107330 Ignore exceptions on removing db_backup stuff 2014-10-10 22:46:35 +02:00
Ruud c7ce18f8c2 Better error message for missing cd number 2014-10-10 15:13:58 +02:00
Ruud b6f288a522 Close request connection 2014-10-09 23:10:28 +02:00
Ruud cb48ca03df Remove profile when marking movies done 2014-10-08 23:05:04 +02:00
Ruud 7b6641d709 Never restatus "down" when adding release 2014-10-08 23:00:11 +02:00
Ruud 3c12a2c4bf Don't restatus movies to active when scanning manage section 2014-10-08 22:35:56 +02:00
Ruud 259e2bc61c Don't skip unpacking on manage scan 2014-10-08 21:58:34 +02:00
Ruud 9f6e4cc2fa Remove NotifyMyWP 2014-10-08 20:46:11 +02:00
Ruud a763957334 Log minimum 1 second wait 2014-10-07 22:53:57 +02:00
Ruud 06293dc0a2 Simplify tmdb provider 2014-10-07 22:50:22 +02:00
Ruud 38a5d967dd Remove tmdb3 lib 2014-10-07 21:40:41 +02:00
Ruud 4cdb9bc81d Remove tmdb3 dependency 2014-10-07 21:40:32 +02:00
Ruud 2104cb2839 Always try to return version string 2014-10-07 20:30:51 +02:00
Ruud d4a4bd40a8 Always return version info 2014-10-07 20:14:17 +02:00
Ruud ba47d7eea7 Use torrent-duplicate if returned from Transmission
fix #4014
2014-10-07 11:46:42 +02:00
Viktor Elofsson 2e52c8124a Implemented a downloader for Hadouken. 2014-10-02 20:34:43 +02:00
Andrew Dumaresq 8de5fcdac6 fixed button name 2014-09-20 19:39:35 -04:00
Andrew Dumaresq 4aa9801be4 general code cleanup 2014-09-20 19:39:12 -04:00
dumaresq 3e58378490 figured out how to make the check work better 2014-09-19 21:41:58 -04:00
Andrew Dumaresq 2c40db3074 removed un-needed variable 2014-09-19 20:28:03 -04:00
dumaresq fba228fd9d fixing check function 2014-09-19 20:26:54 -04:00
Andrew Dumaresq ef2b8e88b4 better download checking 2014-09-19 07:07:23 -04:00
Andrew Dumaresq c77b270fa8 Cleaned up OAUTH and made the download asyc 2014-09-18 06:00:09 -04:00
dumaresq 872a4f4650 Worked on geting Oauth and adding download status 2014-09-07 17:59:16 -04:00
Ruud d0f1e7c6a3 Update put.io code 2014-08-29 12:30:31 +02:00
Ruud 53e7e383a3 put.io rename 2014-08-29 11:38:28 +02:00
Ruud c06e1f3135 Merge branch 'develop' of git://github.com/dumaresq/CouchPotatoServer into dumaresq-develop 2014-08-29 11:37:49 +02:00
dumaresq bb73cb8eec Fixed missing library 2014-08-24 18:19:01 -04:00
dumaresq 5acab98025 fixed hardcoded directory 2014-08-19 19:32:00 -04:00
dumaresq ed6a46e9c0 Added putioDownloader 2014-08-17 16:28:47 -04:00
196 changed files with 3617 additions and 12041 deletions
+1 -1
View File
@@ -61,7 +61,7 @@ class Loader(object):
self.log = CPLog(__name__) self.log = CPLog(__name__)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10, encoding = 'utf-8') hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10)
hdlr.setLevel(logging.CRITICAL) hdlr.setLevel(logging.CRITICAL)
hdlr.setFormatter(formatter) hdlr.setFormatter(formatter)
self.log.logger.addHandler(hdlr) self.log.logger.addHandler(hdlr)
+9
View File
@@ -40,6 +40,8 @@ class WebHandler(BaseHandler):
return return
try: try:
if route == 'robots.txt':
self.set_header('Content-Type', 'text/plain')
self.write(views[route]()) self.write(views[route]())
except: except:
log.error("Failed doing web request '%s': %s", (route, traceback.format_exc())) log.error("Failed doing web request '%s': %s", (route, traceback.format_exc()))
@@ -60,6 +62,13 @@ def index():
addView('', index) addView('', index)
# Web view
def robots():
return 'User-agent: * \n' \
'Disallow: /'
addView('robots.txt', robots)
# API docs # API docs
def apiDocs(): def apiDocs():
routes = list(api.keys()) routes = list(api.keys())
+20 -16
View File
@@ -7,6 +7,7 @@ import urllib
from couchpotato.core.helpers.request import getParams from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, asynchronous from tornado.web import RequestHandler, asynchronous
@@ -50,13 +51,10 @@ class NonBlockHandler(RequestHandler):
start, stop = api_nonblock[route] start, stop = api_nonblock[route]
self.stopper = stop self.stopper = stop
start(self.onNewMessage, last_id = self.get_argument('last_id', None)) start(self.sendData, last_id = self.get_argument('last_id', None))
def onNewMessage(self, response):
if self.request.connection.stream.closed():
self.on_connection_close()
return
def sendData(self, response):
if not self.request.connection.stream.closed():
try: try:
self.finish(response) self.finish(response)
except: except:
@@ -64,10 +62,11 @@ class NonBlockHandler(RequestHandler):
try: self.finish({'success': False, 'error': 'Failed returning results'}) try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass except: pass
def on_connection_close(self): self.removeStopper()
def removeStopper(self):
if self.stopper: if self.stopper:
self.stopper(self.onNewMessage) self.stopper(self.sendData)
self.stopper = None self.stopper = None
@@ -83,10 +82,11 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
# Blocking API handler # Blocking API handler
class ApiHandler(RequestHandler): class ApiHandler(RequestHandler):
route = None
@asynchronous @asynchronous
def get(self, route, *args, **kwargs): def get(self, route, *args, **kwargs):
route = route.strip('/') self.route = route = route.strip('/')
if not api.get(route): if not api.get(route):
self.write('API call doesn\'t seem to exist') self.write('API call doesn\'t seem to exist')
self.finish() self.finish()
@@ -123,11 +123,15 @@ class ApiHandler(RequestHandler):
except: except:
log.error('Failed write error "%s": %s', (route, traceback.format_exc())) log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
api_locks[route].release() self.unlock()
post = get post = get
def taskFinished(self, result, route): def taskFinished(self, result, route):
IOLoop.current().add_callback(self.sendData, result, route)
self.unlock()
def sendData(self, result, route):
if not self.request.connection.stream.closed(): if not self.request.connection.stream.closed():
try: try:
@@ -135,14 +139,12 @@ class ApiHandler(RequestHandler):
jsonp_callback = self.get_argument('callback_func', default = None) jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback: if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')') self.set_header('Content-Type', 'text/javascript')
self.set_header("Content-Type", "text/javascript") self.finish(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.finish()
elif isinstance(result, tuple) and result[0] == 'redirect': elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1]) self.redirect(result[1])
else: else:
self.write(result) self.finish(result)
self.finish()
except UnicodeDecodeError: except UnicodeDecodeError:
log.error('Failed proper encode: %s', traceback.format_exc()) log.error('Failed proper encode: %s', traceback.format_exc())
except: except:
@@ -150,7 +152,9 @@ class ApiHandler(RequestHandler):
try: self.finish({'success': False, 'error': 'Failed returning results'}) try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass except: pass
api_locks[route].release() def unlock(self):
try: api_locks[self.route].release()
except: pass
def addApiView(route, func, static = False, docs = None, **kwargs): def addApiView(route, func, static = False, docs = None, **kwargs):
+3 -3
View File
@@ -181,13 +181,13 @@ class Core(Plugin):
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key')) return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def version(self): def version(self):
ver = fireEvent('updater.info', single = True) ver = fireEvent('updater.info', single = True) or {'version': {}}
if os.name == 'nt': platf = 'windows' if os.name == 'nt': platf = 'windows'
elif 'Darwin' in platform.platform(): platf = 'osx' elif 'Darwin' in platform.platform(): platf = 'osx'
else: platf = 'linux' else: platf = 'linux'
return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash']) return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown')
def versionView(self, **kwargs): def versionView(self, **kwargs):
return { return {
@@ -290,7 +290,7 @@ config = [{
}, },
{ {
'name': 'permission_file', 'name': 'permission_file',
'default': '0755', 'default': '0644',
'label': 'File CHMOD', 'label': 'File CHMOD',
'description': 'See Folder CHMOD description, but for files', 'description': 'See Folder CHMOD description, but for files',
}, },
+17 -8
View File
@@ -205,19 +205,28 @@ class GitUpdater(BaseUpdater):
def getVersion(self): def getVersion(self):
if not self.version: if not self.version:
hash = None
date = None
branch = self.branch
try: try:
output = self.repo.getHead() # Yes, please output = self.repo.getHead() # Yes, please
log.debug('Git version output: %s', output.hash) log.debug('Git version output: %s', output.hash)
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.repo.getCurrentBranch().name or self.branch, output.hash[:8], datetime.fromtimestamp(output.getDate())), hash = output.hash[:8]
'hash': output.hash[:8], date = output.getDate()
'date': output.getDate(), branch = self.repo.getCurrentBranch().name
'type': 'git',
'branch': self.repo.getCurrentBranch().name
}
except Exception as e: except Exception as e:
log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e) log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
return 'No GIT'
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'),
'hash': hash,
'date': date,
'type': 'git',
'branch': branch
}
return self.version return self.version
+2
View File
@@ -621,6 +621,8 @@ class Database(object):
except OperationalError: except OperationalError:
log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc()) log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc())
rename_old = True
except: except:
log.error('Migration failed: %s', traceback.format_exc()) log.error('Migration failed: %s', traceback.format_exc())
+36
View File
@@ -20,14 +20,31 @@ class Blackhole(DownloaderBase):
status_support = False status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
directory = self.conf('directory') directory = self.conf('directory')
# The folder needs to exist
if not directory or not os.path.isdir(directory): if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('protocol')) log.error('No directory set for blackhole %s download.', data.get('protocol'))
else: else:
try: try:
# Filedata can be empty, which probably means it a magnet link
if not filedata or len(filedata) < 50: if not filedata or len(filedata) < 50:
try: try:
if data.get('protocol') == 'torrent_magnet': if data.get('protocol') == 'torrent_magnet':
@@ -36,13 +53,16 @@ class Blackhole(DownloaderBase):
except: except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc()) log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
# If it's still empty, don't know what to do!
if not filedata or len(filedata) < 50: if not filedata or len(filedata) < 50:
log.error('No nzb/torrent available: %s', data.get('url')) log.error('No nzb/torrent available: %s', data.get('url'))
return False return False
# Create filename with imdb id and other nice stuff
file_name = self.createFileName(data, filedata, media) file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name) full_path = os.path.join(directory, file_name)
# People want thinks nice and tidy, create a subdir
if self.conf('create_subdir'): if self.conf('create_subdir'):
try: try:
new_path = os.path.splitext(full_path)[0] new_path = os.path.splitext(full_path)[0]
@@ -53,6 +73,8 @@ class Blackhole(DownloaderBase):
log.error('Couldnt create sub dir, reverting to old one: %s', full_path) log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try: try:
# Make sure the file doesn't exist yet, no need in overwriting it
if not os.path.isfile(full_path): if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path)) log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f: with open(full_path, 'wb') as f:
@@ -74,6 +96,10 @@ class Blackhole(DownloaderBase):
return False return False
def test(self): def test(self):
""" Test and see if the directory is writable
:return: boolean
"""
directory = self.conf('directory') directory = self.conf('directory')
if directory and os.path.isdir(directory): if directory and os.path.isdir(directory):
@@ -88,6 +114,10 @@ class Blackhole(DownloaderBase):
return False return False
def getEnabledProtocol(self): def getEnabledProtocol(self):
""" What protocols is this downloaded used for
:return: list with protocols
"""
if self.conf('use_for') == 'both': if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledProtocol() return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent': elif self.conf('use_for') == 'torrent':
@@ -96,6 +126,12 @@ class Blackhole(DownloaderBase):
return ['nzb'] return ['nzb']
def isEnabled(self, manual = False, data = None): def isEnabled(self, manual = False, data = None):
""" Check if protocol is used (and enabled)
:param manual: The user has clicked to download a link through the webUI
:param data: dict returned from provider
Contains the release information
:return: boolean
"""
if not data: data = {} if not data: data = {}
for_protocol = ['both'] for_protocol = ['both']
if data and 'torrent' in data.get('protocol'): if data and 'torrent' in data.get('protocol'):
+34
View File
@@ -25,8 +25,18 @@ class Deluge(DownloaderBase):
drpc = None drpc = None
def connect(self, reconnect = False): def connect(self, reconnect = False):
""" Connect to the delugeRPC, re-use connection when already available
:param reconnect: force reconnect
:return: DelugeRPC instance
"""
# Load host from config and split out port. # Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':') host = cleanHost(self.conf('host'), protocol = False).split(':')
# Force host assignment
if len(host) == 1:
host.append(80)
if not isInt(host[1]): if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.') log.error('Config properties are not filled in correctly, port is missing.')
return False return False
@@ -37,6 +47,20 @@ class Deluge(DownloaderBase):
return self.drpc return self.drpc
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -91,11 +115,21 @@ class Deluge(DownloaderBase):
return self.downloadReturnId(remote_torrent) return self.downloadReturnId(remote_torrent)
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True) and self.drpc.test(): if self.connect(True) and self.drpc.test():
return True return True
return False return False
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Deluge download status.') log.debug('Checking Deluge download status.')
+427
View File
@@ -0,0 +1,427 @@
from base64 import b16encode, b32decode, b64encode
from distutils.version import LooseVersion
from hashlib import sha1
import httplib
import json
import os
import re
import urllib2
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from bencode import bencode as benc, bdecode
log = CPLog(__name__)
autoload = 'Hadouken'
class Hadouken(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
hadouken_api = None
def connect(self):
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.conf('api_key'):
log.error('Config properties are not filled in correctly, API key is missing.')
return False
self.hadouken_api = HadoukenAPI(host[0], port = host[1], api_key = self.conf('api_key'))
return True
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol')))
if not self.connect():
return False
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
torrent_filename = self.createFileName(data, filedata, media)
if data.get('protocol') == 'torrent_magnet':
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = self.torrent_trackers
torrent_params['name'] = torrent_filename
else:
info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to Hadouken
if data.get('protocol') == 'torrent_magnet':
self.hadouken_api.add_magnet_link(data.get('url'), torrent_params)
else:
self.hadouken_api.add_file(filedata, torrent_params)
return self.downloadReturnId(torrent_hash)
def test(self):
""" Tests the given host:port and API key """
if not self.connect():
return False
version = self.hadouken_api.get_version()
if not version:
log.error('Could not get Hadouken version.')
return False
# The minimum required version of Hadouken is 4.5.6.
if LooseVersion(version) >= LooseVersion('4.5.6'):
return True
log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version)
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Hadouken download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.hadouken_api.get_by_hash_list(ids)
if not queue:
return []
for torrent in queue:
if torrent is None:
continue
torrent_filelist = self.hadouken_api.get_files_by_hash(torrent['InfoHash'])
torrent_files = []
save_path = torrent['SavePath']
# The 'Path' key for each file_item contains
# the full path to the single file relative to the
# torrents save path.
# For a single file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "file1.iso"
# Resulting path: "C:\Downloads\file1.iso"
# For a multi file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "dirname/file1.iso"
# Resulting path: "C:\Downloads\dirname/file1.iso"
for file_item in torrent_filelist:
torrent_files.append(sp(os.path.join(save_path, file_item['Path'])))
release_downloads.append({
'id': torrent['InfoHash'].upper(),
'name': torrent['Name'],
'status': self.get_torrent_status(torrent),
'seed_ratio': self.get_seed_ratio(torrent),
'original_status': torrent['State'],
'timeleft': -1,
'folder': sp(save_path if len(torrent_files == 1) else os.path.join(save_path, torrent['Name'])),
'files': torrent_files
})
return release_downloads
def get_seed_ratio(self, torrent):
""" Returns the seed ratio for a given torrent.
Keyword arguments:
torrent -- The torrent to calculate seed ratio for.
"""
up = torrent['TotalUploadedBytes']
down = torrent['TotalDownloadedBytes']
if up > 0 and down > 0:
return up / down
return 0
def get_torrent_status(self, torrent):
""" Returns the CouchPotato status for a given torrent.
Keyword arguments:
torrent -- The torrent to translate status for.
"""
if torrent['IsSeeding'] and torrent['IsFinished'] and torrent['Paused']:
return 'completed'
if torrent['IsSeeding']:
return 'seeding'
return 'busy'
def pause(self, release_download, pause = True):
""" Pauses or resumes the torrent specified by the ID field
in release_download.
Keyword arguments:
release_download -- The CouchPotato release_download to pause/resume.
pause -- Boolean indicating whether to pause or resume.
"""
if not self.connect():
return False
return self.hadouken_api.pause(release_download['id'], pause)
def removeFailed(self, release_download):
""" Removes a failed torrent and also remove the data associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
"""
log.info('%s failed downloading, deleting...', release_download['name'])
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = True)
def processComplete(self, release_download, delete_files = False):
""" Removes the completed torrent from Hadouken and optionally removes the data
associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
delete_files: Boolean indicating whether to remove the associated data.
"""
log.debug('Requesting Hadouken to remove the torrent %s%s.',
(release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = delete_files)
class HadoukenAPI(object):
def __init__(self, host = 'localhost', port = 7890, api_key = None):
self.url = 'http://' + str(host) + ':' + str(port)
self.api_key = api_key
self.requestId = 0;
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent', 'couchpotato-hadouken-client/1.0'), ('Accept', 'application/json')]
if not api_key:
log.error('API key missing.')
def add_file(self, filedata, torrent_params):
""" Add a file to Hadouken with the specified parameters.
Keyword arguments:
filedata -- The binary torrent data.
torrent_params -- Additional parameters for the file.
"""
data = {
'method': 'torrents.addFile',
'params': [b64encode(filedata), torrent_params]
}
return self._request(data)
def add_magnet_link(self, magnetLink, torrent_params):
""" Add a magnet link to Hadouken with the specified parameters.
Keyword arguments:
magnetLink -- The magnet link to send.
torrent_params -- Additional parameters for the magnet link.
"""
data = {
'method': 'torrents.addUrl',
'params': [magnetLink, torrent_params]
}
return self._request(data)
def get_by_hash_list(self, infoHashList):
""" Gets a list of torrents filtered by the given info hash list.
Keyword arguments:
infoHashList -- A list of info hashes.
"""
data = {
'method': 'torrents.getByInfoHashList',
'params': [infoHashList]
}
return self._request(data)
def get_files_by_hash(self, infoHash):
""" Gets a list of files for the torrent identified by the
given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to return files for.
"""
data = {
'method': 'torrents.getFiles',
'params': [infoHash]
}
return self._request(data)
def get_version(self):
""" Gets the version, commitish and build date of Hadouken. """
data = {
'method': 'core.getVersion',
'params': None
}
result = self._request(data)
if not result:
return False
return result['Version']
def pause(self, infoHash, pause):
""" Pauses/unpauses the torrent identified by the given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to operate on.
pause -- If true, pauses the torrent. Otherwise resumes.
"""
data = {
'method': 'torrents.pause',
'params': [infoHash]
}
if not pause:
data['method'] = 'torrents.resume'
return self._request(data)
def remove(self, infoHash, remove_data = False):
""" Removes the torrent identified by the given info hash and
optionally removes the data as well.
Keyword arguments:
infoHash -- The info hash of the torrent to remove.
remove_data -- If true, removes the data associated with the torrent.
"""
data = {
'method': 'torrents.remove',
'params': [infoHash, remove_data]
}
return self._request(data)
def _request(self, data):
self.requestId += 1
data['jsonrpc'] = '2.0'
data['id'] = self.requestId
request = urllib2.Request(self.url + '/jsonrpc', data = json.dumps(data))
request.add_header('Authorization', 'Token ' + self.api_key)
request.add_header('Content-Type', 'application/json')
try:
f = self.opener.open(request)
response = f.read()
f.close()
obj = json.loads(response)
if not 'error' in obj.keys():
return obj['result']
log.error('JSONRPC error, %s: %s', obj['error']['code'], obj['error']['message'])
except httplib.InvalidURL as err:
log.error('Invalid Hadouken host, check your config %s', err)
except urllib2.HTTPError as err:
if err.code == 401:
log.error('Invalid Hadouken API key, check your config')
else:
log.error('Hadouken HTTPError: %s', err)
except urllib2.URLError as err:
log.error('Unable to connect to Hadouken %s', err)
return False
config = [{
'name': 'hadouken',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'hadouken',
'label': 'Hadouken',
'description': 'Use <a href="http://www.hdkn.net">Hadouken</a> (>= v4.5.6) to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent'
},
{
'name': 'host',
'default': 'localhost:7890'
},
{
'name': 'api_key',
'label': 'API key',
'type': 'password'
},
{
'name': 'label',
'description': 'Label to add torrent as.'
}
]
}
]
}]
+28 -3
View File
@@ -23,6 +23,20 @@ class NZBGet(DownloaderBase):
rpc = 'xmlrpc' rpc = 'xmlrpc'
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -71,6 +85,10 @@ class NZBGet(DownloaderBase):
return False return False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
rpc = self.getRPC() rpc = self.getRPC()
try: try:
@@ -91,6 +109,13 @@ class NZBGet(DownloaderBase):
return True return True
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking NZBGet download status.') log.debug('Checking NZBGet download status.')
@@ -163,12 +188,12 @@ class NZBGet(DownloaderBase):
nzb_id = nzb['NZBID'] nzb_id = nzb['NZBID']
if nzb_id in ids: if nzb_id in ids:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log'])) log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({ release_downloads.append({
'id': nzb_id, 'id': nzb_id,
'name': nzb['NZBFilename'], 'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed', 'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'], 'original_status': nzb['Status'],
'timeleft': str(timedelta(seconds = 0)), 'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir']) 'folder': sp(nzb['DestDir'])
}) })
+25
View File
@@ -24,6 +24,20 @@ class NZBVortex(DownloaderBase):
session_id = None session_id = None
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -45,6 +59,10 @@ class NZBVortex(DownloaderBase):
return False return False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
try: try:
login_result = self.login() login_result = self.login()
except: except:
@@ -53,6 +71,13 @@ class NZBVortex(DownloaderBase):
return login_result return login_result
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
raw_statuses = self.call('nzb') raw_statuses = self.call('nzb')
+18
View File
@@ -19,6 +19,20 @@ class Pneumatic(DownloaderBase):
status_support = False status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -63,6 +77,10 @@ class Pneumatic(DownloaderBase):
return False return False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
directory = self.conf('directory') directory = self.conf('directory')
if directory and os.path.isdir(directory): if directory and os.path.isdir(directory):
@@ -0,0 +1,68 @@
from .main import PutIO
def autoload():
return PutIO()
config = [{
'name': 'putio',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'putio',
'label': 'put.io',
'description': 'This will start a torrent download on <a href="http://put.io">Put.io</a>.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'oauth_token',
'label': 'oauth_token',
'description': 'This is the OAUTH_TOKEN from your putio API',
'advanced': True,
},
{
'name': 'folder',
'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'),
'default': 0,
},
{
'name': 'callback_host',
'description': 'External reachable url to CP so put.io can do it\'s thing',
},
{
'name': 'download',
'description': 'Set this to have CouchPotato download the file from Put.io',
'type': 'bool',
'default': 0,
},
{
'name': 'delete_file',
'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'),
'type': 'bool',
'default': 0,
},
{
'name': 'download_dir',
'type': 'directory',
'label': 'Download Directory',
'description': 'The Directory to download files to, does nothing if you don\'t select download',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]
+181
View File
@@ -0,0 +1,181 @@
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEventAsync
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from pio import api as pio
import datetime
log = CPLog(__name__)
autoload = 'Putiodownload'
class PutIO(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
downloading_list = []
oauth_authenticate = 'https://api.couchpota.to/authorize/putio/'
def __init__(self):
addApiView('downloader.putio.getfrom', self.getFromPutio, docs = {
'desc': 'Allows you to download file from prom Put.io',
})
addApiView('downloader.putio.auth_url', self.getAuthorizationUrl)
addApiView('downloader.putio.credentials', self.getCredentials)
addEvent('putio.download', self.putioDownloader)
return super(PutIO, self).__init__()
# This is a recusive function to check for the folders
def recursionFolder(self, client, folder = 0, tfolder = ''):
files = client.File.list(folder)
for f in files:
if f.content_type == 'application/x-directory':
if f.name == tfolder:
return f.id
else:
result = self.recursionFolder(client, f.id, tfolder)
if result != 0:
return result
return 0
# This will check the root for the folder, and kick of recusively checking sub folder
def convertFolder(self, client, folder):
if folder == 0:
return 0
else:
return self.recursionFolder(client, 0, folder)
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to put.io', data.get('name'))
url = data.get('url')
client = pio.Client(self.conf('oauth_token'))
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('putioFolder ID is %s', putioFolder)
# It might be possible to call getFromPutio from the renamer if we can then we don't need to do this.
# Note callback_host is NOT our address, it's the internet host that putio can call too
callbackurl = None
if self.conf('download'):
callbackurl = 'http://' + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder)
log.debug('resp is %s', resp.id);
return self.downloadReturnId(resp.id)
def test(self):
try:
client = pio.Client(self.conf('oauth_token'))
if client.File.list():
return True
except:
log.info('Failed to get file listing, check OAUTH_TOKEN')
return False
def getAuthorizationUrl(self, host = None, **kwargs):
callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/'))
log.debug('callback_url is %s', callback_url)
target_url = self.oauth_authenticate + "?target=" + callback_url
log.debug('target_url is %s', target_url)
return {
'success': True,
'url': target_url,
}
def getCredentials(self, **kwargs):
try:
oauth_token = kwargs.get('oauth')
except:
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
log.debug('oauth_token is: %s', oauth_token)
self.conf('oauth_token', value = oauth_token);
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
def getAllDownloadStatus(self, ids):
log.debug('Checking putio download status.')
client = pio.Client(self.conf('oauth_token'))
transfers = client.Transfer.list()
log.debug(transfers);
release_downloads = ReleaseDownloadList(self)
for t in transfers:
if t.id in ids:
log.debug('downloading list is %s', self.downloading_list)
if t.status == "COMPLETED" and self.conf('download') == False :
status = 'completed'
# So check if we are trying to download something
elif t.status == "COMPLETED" and self.conf('download') == True:
# Assume we are done
status = 'completed'
if not self.downloading_list:
now = datetime.datetime.utcnow()
date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S")
# We need to make sure a race condition didn't happen
if (now - date_time) < datetime.timedelta(minutes=5):
# 5 minutes haven't passed so we wait
status = 'busy'
else:
# If we have the file_id in the downloading_list mark it as busy
if str(t.file_id) in self.downloading_list:
status = 'busy'
else:
status = 'busy'
release_downloads.append({
'id' : t.id,
'name': t.name,
'status': status,
'timeleft': t.estimated_time,
})
return release_downloads
def putioDownloader(self, fid):
log.info('Put.io Real downloader called with file_id: %s',fid)
client = pio.Client(self.conf('oauth_token'))
log.debug('About to get file List')
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('PutioFolderID is %s', putioFolder)
files = client.File.list(parent_id=putioFolder)
downloaddir = self.conf('download_dir')
for f in files:
if str(f.id) == str(fid):
client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file'))
# Once the download is complete we need to remove it from the running list.
self.downloading_list.remove(fid)
return True
def getFromPutio(self, **kwargs):
try:
file_id = str(kwargs.get('file_id'))
except:
return {
'success' : False,
}
log.info('Put.io Download has been called file_id is %s', file_id)
if file_id not in self.downloading_list:
self.downloading_list.append(file_id)
fireEventAsync('putio.download',fid = file_id)
return {
'success': True,
}
return {
'success': False,
}
@@ -0,0 +1,68 @@
var PutIODownloader = new Class({
initialize: function(){
var self = this;
App.addEvent('loadSettings', self.addRegisterButton.bind(self));
},
addRegisterButton: function(){
var self = this;
var setting_page = App.getPage('Settings');
setting_page.addEvent('create', function(){
var fieldset = setting_page.tabs.downloaders.groups.putio,
l = window.location;
var putio_set = 0;
fieldset.getElements('input[type=text]').each(function(el){
putio_set += +(el.get('value') != '');
});
new Element('.ctrlHolder').adopt(
// Unregister button
(putio_set > 0) ?
[
self.unregister = new Element('a.button.red', {
'text': 'Unregister "'+fieldset.getElement('input[name*=oauth_token]').get('value')+'"',
'events': {
'click': function(){
fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change');
self.unregister.destroy();
self.unregister_or.destroy();
}
}
}),
self.unregister_or = new Element('span[text=or]')
]
: null,
// Register button
new Element('a.button', {
'text': putio_set > 0 ? 'Register a different account' : 'Register your put.io account',
'events': {
'click': function(){
Api.request('downloader.putio.auth_url', {
'data': {
'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '')
},
'onComplete': function(json){
window.location = json.url;
}
});
}
}
})
).inject(fieldset.getElement('.test_button'), 'before');
})
}
});
window.addEvent('domready', function(){
new PutIODownloader();
});
@@ -41,12 +41,30 @@ class qBittorrent(DownloaderBase):
return self.qb return self.qb
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(): if self.connect():
return True return True
return False return False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -95,6 +113,14 @@ class qBittorrent(DownloaderBase):
return 'busy' return 'busy'
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking qBittorrent download status.') log.debug('Checking qBittorrent download status.')
if not self.connect(): if not self.connect():
+26
View File
@@ -84,6 +84,10 @@ class rTorrent(DownloaderBase):
return self.rt return self.rt
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True): if self.connect(True):
return True return True
@@ -94,6 +98,20 @@ class rTorrent(DownloaderBase):
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -161,6 +179,14 @@ class rTorrent(DownloaderBase):
return 'completed' return 'completed'
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking rTorrent download status.') log.debug('Checking rTorrent download status.')
if not self.connect(): if not self.connect():
+27
View File
@@ -21,6 +21,21 @@ class Sabnzbd(DownloaderBase):
protocol = ['nzb'] protocol = ['nzb']
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -69,6 +84,11 @@ class Sabnzbd(DownloaderBase):
return False return False
def test(self): def test(self):
""" Check if connection works
Return message if an old version of SAB is used
:return: bool
"""
try: try:
sab_data = self.call({ sab_data = self.call({
'mode': 'version', 'mode': 'version',
@@ -89,6 +109,13 @@ class Sabnzbd(DownloaderBase):
return True return True
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking SABnzbd download status.') log.debug('Checking SABnzbd download status.')
+20 -1
View File
@@ -19,6 +19,21 @@ class Synology(DownloaderBase):
status_support = False status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -50,6 +65,10 @@ class Synology(DownloaderBase):
return self.downloadReturnId('') if response else False return self.downloadReturnId('') if response else False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
host = cleanHost(self.conf('host'), protocol = False).split(':') host = cleanHost(self.conf('host'), protocol = False).split(':')
try: try:
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password')) srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
@@ -118,7 +137,7 @@ class SynologyRPC(object):
def _req(self, url, args, files = None): def _req(self, url, args, files = None):
response = {'success': False} response = {'success': False}
try: try:
req = requests.post(url, data = args, files = files) req = requests.post(url, data = args, files = files, verify = False)
req.raise_for_status() req.raise_for_status()
response = json.loads(req.text) response = json.loads(req.text)
if response['success']: if response['success']:
+32 -2
View File
@@ -34,6 +34,21 @@ class Transmission(DownloaderBase):
return self.trpc return self.trpc
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -78,19 +93,32 @@ class Transmission(DownloaderBase):
log.error('Failed sending torrent to Transmission') log.error('Failed sending torrent to Transmission')
return False return False
data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate')
# Change settings of added torrents # Change settings of added torrents
if torrent_params: if torrent_params:
self.trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params) self.trpc.set_torrent(data['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.') log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString']) return self.downloadReturnId(data['hashString'])
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect() and self.trpc.get_session(): if self.connect() and self.trpc.get_session():
return True return True
return False return False
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Transmission download status.') log.debug('Checking Transmission download status.')
@@ -119,6 +147,8 @@ class Transmission(DownloaderBase):
status = 'failed' status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1: elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed' status = 'completed'
elif torrent['status'] == 16 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]: elif torrent['status'] in [5, 6]:
status = 'seeding' status = 'seeding'
+26
View File
@@ -51,6 +51,21 @@ class uTorrent(DownloaderBase):
return self.utorrent_api return self.utorrent_api
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -120,6 +135,10 @@ class uTorrent(DownloaderBase):
return self.downloadReturnId(torrent_hash) return self.downloadReturnId(torrent_hash)
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(): if self.connect():
build_version = self.utorrent_api.get_build() build_version = self.utorrent_api.get_build()
if not build_version: if not build_version:
@@ -131,6 +150,13 @@ class uTorrent(DownloaderBase):
return False return False
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking uTorrent download status.') log.debug('Checking uTorrent download status.')
+8 -16
View File
@@ -37,27 +37,19 @@ def toUnicode(original, *args):
except: except:
try: try:
detected = detect(original) detected = detect(original)
if detected.get('encoding') == 'utf-8': try:
return original.decode('utf-8') if detected.get('confidence') > 0.8:
return original.decode(detected.get('encoding'))
except:
pass
return ek(original, *args) return ek(original, *args)
except: except:
raise raise
except: except:
log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc())) log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc()))
ascii_text = str(original).encode('string_escape') return 'ERROR DECODING STRING'
return toUnicode(ascii_text)
def toUTF8(original):
try:
if isinstance(original, str) and len(original) > 0:
# Try to detect
detected = detect(original)
return original.decode(detected.get('encoding')).encode('utf-8')
else:
return original
except:
#log.error('Failed encoding to UTF8: %s', traceback.format_exc())
raise
def ss(original, *args): def ss(original, *args):
@@ -103,7 +95,7 @@ def ek(original, *args):
if isinstance(original, (str, unicode)): if isinstance(original, (str, unicode)):
try: try:
from couchpotato.environment import Env from couchpotato.environment import Env
return original.decode(Env.get('encoding')) return original.decode(Env.get('encoding'), 'ignore')
except UnicodeDecodeError: except UnicodeDecodeError:
raise raise
-51
View File
@@ -1,51 +0,0 @@
import os
from chardet import detect
from couchpotato import Env
fs_enc = Env.get('fs_encoding')
def list_dir(path, full_path = True):
"""
List directory don't error when it doesn't exist
"""
path = unicode_path(path)
if os.path.isdir(path):
for f in os.listdir(path):
if full_path:
yield join(path, f)
else:
yield f
def join(*args):
"""
Join path, encode properly before joining
"""
return os.path.join(*[safe(x) for x in args])
def unicode_path(path):
"""
Convert back to unicode
:param path: path string
"""
if isinstance(path, str):
detected = detect(path)
print detected
path = path.decode(detected.get('encoding'))
path = path.decode('unicode_escape')
return path
def safe(path):
if isinstance(path, unicode):
return path.encode('unicode_escape')
return path
+8 -9
View File
@@ -1,6 +1,5 @@
import logging import logging
import re import re
import traceback
class CPLog(object): class CPLog(object):
@@ -55,19 +54,19 @@ class CPLog(object):
def safeMessage(self, msg, replace_tuple = ()): def safeMessage(self, msg, replace_tuple = ()):
from couchpotato.core.helpers.encoding import ss, toUTF8 from couchpotato.core.helpers.encoding import ss, toUnicode
msg = toUTF8(msg) msg = ss(msg)
try: try:
if isinstance(replace_tuple, tuple): if isinstance(replace_tuple, tuple):
msg = msg % tuple([toUTF8(x) for x in list(replace_tuple)]) msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)])
elif isinstance(replace_tuple, dict): elif isinstance(replace_tuple, dict):
msg = msg % dict((k, toUTF8(v)) for k, v in replace_tuple.iteritems()) msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems())
else: else:
msg = msg % toUTF8(replace_tuple) msg = msg % ss(replace_tuple)
except: except Exception as e:
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, traceback.format_exc())) self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
self.setup() self.setup()
if not self.is_develop: if not self.is_develop:
@@ -84,4 +83,4 @@ class CPLog(object):
except: except:
pass pass
return toUTF8(msg) return toUnicode(msg)
+11 -2
View File
@@ -1,9 +1,10 @@
import os import os
import traceback import traceback
from couchpotato import CPLog from couchpotato import CPLog, md5
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
import six import six
@@ -92,7 +93,15 @@ class MediaBase(Plugin):
if not isinstance(image, (str, unicode)): if not isinstance(image, (str, unicode)):
continue continue
if file_type not in existing_files or len(existing_files.get(file_type, [])) == 0: # Check if it has top image
filename = '%s.%s' % (md5(image), getExt(image))
existing = existing_files.get(file_type, [])
has_latest = False
for x in existing:
if filename in x:
has_latest = True
if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True) file_path = fireEvent('file.download', url = image, single = True)
if file_path: if file_path:
existing_files[file_type] = [toUnicode(file_path)] existing_files[file_type] = [toUnicode(file_path)]
+7 -2
View File
@@ -456,6 +456,11 @@ class MediaPlugin(MediaBase):
deleted = True deleted = True
elif new_media_status: elif new_media_status:
media['status'] = new_media_status media['status'] = new_media_status
# Remove profile (no use for in manage)
if new_media_status == 'done':
media['profile_id'] = None
db.update(media) db.update(media)
fireEvent('media.untag', media['_id'], 'recent', single = True) fireEvent('media.untag', media['_id'], 'recent', single = True)
@@ -491,7 +496,7 @@ class MediaPlugin(MediaBase):
} }
}) })
def restatus(self, media_id, tag_recent = True): def restatus(self, media_id, tag_recent = True, allowed_restatus = None):
try: try:
db = get_db() db = get_db()
@@ -526,7 +531,7 @@ class MediaPlugin(MediaBase):
m['status'] = previous_status m['status'] = previous_status
# Only update when status has changed # Only update when status has changed
if previous_status != m['status']: if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus):
db.update(m) db.update(m)
# Tag media as recent # Tag media as recent
@@ -5,6 +5,11 @@ import time
import traceback import traceback
import xml.etree.ElementTree as XMLTree import xml.etree.ElementTree as XMLTree
try:
from xml.etree.ElementTree import ParseError as XmlParseError
except ImportError:
from xml.parsers.expat import ExpatError as XmlParseError
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \ from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
@@ -94,6 +99,8 @@ class Provider(Plugin):
try: try:
data = XMLTree.fromstring(ss(data)) data = XMLTree.fromstring(ss(data))
return self.getElements(data, item_path) return self.getElements(data, item_path)
except XmlParseError:
log.error('Invalid XML returned, check "%s" manually for issues', url)
except: except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
@@ -68,8 +68,12 @@ class Base(NZBProvider, RSS):
if not date: if not date:
date = self.getTextElement(nzb, 'pubDate') date = self.getTextElement(nzb, 'pubDate')
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
name = self.getTextElement(nzb, 'title') name = self.getTextElement(nzb, 'title')
detail_url = self.getTextElement(nzb, 'guid')
nzb_id = detail_url.split('/')[-1:].pop()
if '://' not in detail_url:
detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)
if not name: if not name:
continue continue
@@ -103,7 +107,7 @@ class Base(NZBProvider, RSS):
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), 'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id), 'detail_url': detail_url,
'content': self.getTextElement(nzb, 'description'), 'content': self.getTextElement(nzb, 'description'),
'description': description, 'description': description,
'score': host['extra_score'], 'score': host['extra_score'],
@@ -183,7 +187,7 @@ class Base(NZBProvider, RSS):
return 'try_next' return 'try_next'
try: try:
data = self.urlopen(url, show_error = False) data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
self.limits_reached[host] = False self.limits_reached[host] = False
return data return data
except HTTPError as e: except HTTPError as e:
@@ -1,13 +1,9 @@
from urlparse import urlparse, parse_qs
import time
from couchpotato.core.event import fireEvent from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.nzb.base import NZBProvider from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from dateutil.parser import parse
log = CPLog(__name__) log = CPLog(__name__)
@@ -16,27 +12,19 @@ log = CPLog(__name__)
class Base(NZBProvider, RSS): class Base(NZBProvider, RSS):
urls = { urls = {
'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s', 'search': 'https://api.omgwtfnzbs.org/json/?%s',
'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
cat_ids = [ cat_ids = [
([15], ['dvdrip']), ([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']),
([15, 16], ['brrip']), ([15, 16], ['brrip']),
([16], ['720p', '1080p', 'bd50']), ([16], ['720p', '1080p', 'bd50']),
([17], ['dvdr']), ([17], ['dvdr']),
] ]
cat_backup_id = 'movie' cat_backup_id = 'movie'
def search(self, movie, quality):
if quality['identifier'] in fireEvent('quality.pre_releases', single = True):
return []
return super(Base, self).search(movie, quality)
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
q = '%s %s' % (title, movie['info']['year']) q = '%s %s' % (title, movie['info']['year'])
@@ -47,21 +35,19 @@ class Base(NZBProvider, RSS):
'api': self.conf('api_key', default = ''), 'api': self.conf('api_key', default = ''),
}) })
nzbs = self.getRSSData(self.urls['search'] % params) nzbs = self.getJsonData(self.urls['search'] % params)
if isinstance(nzbs, list):
for nzb in nzbs: for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]
results.append({ results.append({
'id': nzb_id, 'id': nzb.get('nzbid'),
'name': toUnicode(self.getTextElement(nzb, 'title')), 'name': toUnicode(nzb.get('release')),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))), 'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
'size': tryInt(enclosure['length']) / 1024 / 1024, 'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
'url': enclosure['url'], 'url': nzb.get('getnzb'),
'detail_url': self.urls['detail_url'] % nzb_id, 'detail_url': nzb.get('details'),
'description': self.getTextElement(nzb, 'description') 'description': nzb.get('weblink')
}) })
@@ -13,11 +13,11 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'http://www.bit-hdtv.com/', 'test': 'https://www.bit-hdtv.com/',
'login': 'http://www.bit-hdtv.com/takelogin.php', 'login': 'https://www.bit-hdtv.com/takelogin.php',
'login_check': 'http://www.bit-hdtv.com/messages.php', 'login_check': 'https://www.bit-hdtv.com/messages.php',
'detail': 'http://www.bit-hdtv.com/details.php?id=%s', 'detail': 'https://www.bit-hdtv.com/details.php?id=%s',
'search': 'http://www.bit-hdtv.com/torrents.php?', 'search': 'https://www.bit-hdtv.com/torrents.php?',
} }
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken # Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
@@ -93,7 +93,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'BiT-HDTV', 'name': 'BiT-HDTV',
'description': '<a href="http://bit-hdtv.com">BiT-HDTV</a>', 'description': '<a href="https://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
'options': [ 'options': [
@@ -0,0 +1,130 @@
import re
import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://hdaccess.net/',
'detail': 'https://hdaccess.net/details.php?id=%s',
'search': 'https://hdaccess.net/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s',
'download': 'https://hdaccess.net/grab.php?torrent=%s&apikey=%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only')))
if data:
try:
#for result in data[]:
for key, result in data.iteritems():
if tryInt(result['total_results']) == 0:
return
torrentscore = self.conf('extra_score')
releasegroup = result['releasegroup']
resolution = result['resolution']
encoding = result['encoding']
freeleech = tryInt(result['freeleech'])
seeders = tryInt(result['seeders'])
torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)
if freeleech > 0 and self.conf('prefer_internal'):
torrent_desc += '/ Internal'
torrentscore += 200
if seeders == 0:
torrentscore = 0
name = result['release_name']
year = tryInt(result['year'])
results.append({
'id': tryInt(result['torrentid']),
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
'url': self.urls['download'] % (result['torrentid'], self.conf('apikey')),
'detail_url': self.urls['detail'] % result['torrentid'],
'size': tryInt(result['size']),
'seeders': tryInt(result['seeders']),
'leechers': tryInt(result['leechers']),
'age': tryInt(result['age']),
'score': torrentscore
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
config = [{
'name': 'hdaccess',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HDAccess',
'wizard': True,
'description': '<a href="https://hdaccess.net">HDAccess</a>',
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAADuUlEQVQ4yz3T209bdQAH8O/vnNNzWno5FIpAKZdSLi23gWMDtumWuSXOyzJj9M1kyIOPS1xiYuKe9GUPezZZnGIiMTqTxS1bdIuYkG2MWKBAKYVszOgKFkrbA+259HfO+fli/PwPHzI+Pg5CCEAI2VcUlEsl1tHdU7P5bGOkWChEaaUCwvHpmkD93POn6bwgCMQGAMYYYwyCruuQnE7SPzjIstvb8l+bm5fXkokJSmlQEkUQAIpSRH5vd0tyum7I/sA1Z5VH2ctmiGWZjHw4McE1NAZtQ9fD25kXt1VN7es7dNjuGRjiJFeVpWo6slsZPhF/Ys/PPeIs2056ff7zIOS5rpU5/viJEwwEnu3Mi18dojjw0aWP6amz57h9RSE/35zinq2nuGjvIQwOj7K2SKeZWkk0auXSSZ+/ZopSy+CbW1pQKpWu6Jr2/qVPPqWRjm6HWi6Tm999g3RyGbndLCqGgVBrO3F7fHykK0YX47NNtGLYlBq/c+H2iD+3k704dHQUDcFmQVXLyP6zhfTqCl45fQYjx17FemoJunoAk1bQFGoVhkdPwNC0ix2dMT+3llodM02rKdo7gN3dHAEhuH/vNgDg3Pl3cPaNt2GZJpYX5lBbFwClBukfGobL5WrayW6NccVCISY4HIQxYts2Q3J5CXOPHuLlo6NoCoXQ2hbG0JFRpJYWcVDIQ5ZlyL5qW5b9hNlWjKsYBgzDgKppMCoGHty7A0orOHbyNNweL+obGnDm9TdhWSYS8Vn4a2shOZ0QJRGSKIHjeGGtWNhjqqpyG+k04k8eozPai9ZwByavf4kfpyZxZGwMfYOHsbwQx34hB5dL4syKweRq/xpXHwzNapqWSSYWMDszzYqFPEaOn4KiKJiZfoCZ6d8Am+GtC++iXCpjaf4P9vefT8HzfKarp3eWRKMxCILwuWXSz977YIK2RTodDoGH1+OG1+tDlbsKkuiAJEngeWBjNUUnv7rucIiOLyzTvMKJTgnVtbVXLctK3L31g+NAUajL5bEptaDpOnTdgGkzVHl9drms0ju3fnJIkphoaQtfbQiFwAcCAY5wnCE5Xff3i8XX4o9nGksH+8zl9hAGZlWMCivkc9z0L3fZ999+LTCGZKi55YJTFHfye3sc6e/vB88LpK6+iWlqSS4WcpcNXZtwOp3B6mo/REmCSSkEgd+qq3vpRkt75Fp9Y1BZWZwnhq4zEovF/u/MATAti4U7umvyu9kR27aikihC9vvTnV2xufVUMu/2uIksy/9tZvgX49fLmAMx3bsAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
'description': 'Enter your site username.',
},
{
'name': 'apikey',
'default': '',
'label': 'API Key',
'description': 'Enter your site api key. This can be find on <a href="https://hdaccess.net/usercp.php?action=security">Profile Security</a>',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 0,
'description': 'Will not be (re)moved until this seed ratio is met. HDAccess minimum is 1:1.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 0,
'description': 'Will not be (re)moved until this seed time (in hours) is met. HDAccess minimum is 48 hours.',
},
{
'name': 'prefer_internal',
'advanced': True,
'type': 'bool',
'default': 1,
'description': 'Favors internal releases over non-internal releases.',
},
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDAccess internal',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
@@ -29,6 +29,9 @@ class Base(TorrentProvider):
} }
post_data.update(params) post_data.update(params)
if self.conf('internal_only'):
post_data.update({'origin': [1]})
try: try:
result = self.getJsonData(self.urls['api'], data = json.dumps(post_data)) result = self.getJsonData(self.urls['api'], data = json.dumps(post_data))
@@ -110,6 +113,14 @@ config = [{
'default': 0, 'default': 0,
'description': 'Starting score for each release found via this provider.', 'description': 'Starting score for each release found via this provider.',
}, },
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDBits internal'
}
], ],
}, },
], ],
@@ -14,11 +14,11 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'https://www.iptorrents.com/', 'test': 'https://iptorrents.eu/',
'base_url': 'https://www.iptorrents.com', 'base_url': 'https://iptorrents.eu',
'login': 'https://www.iptorrents.com/torrents/', 'login': 'https://iptorrents.eu/torrents/',
'login_check': 'https://www.iptorrents.com/inbox.php', 'login_check': 'https://iptorrents.eu/inbox.php',
'search': 'https://www.iptorrents.com/torrents/?%s%%s&q=%s&qf=ti&p=%%d', 'search': 'https://iptorrents.eu/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
@@ -120,7 +120,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'IPTorrents', 'name': 'IPTorrents',
'description': '<a href="http://www.iptorrents.com">IPTorrents</a>', 'description': '<a href="https://iptorrents.eu">IPTorrents</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
'options': [ 'options': [
@@ -42,6 +42,7 @@ class Base(TorrentProvider):
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a') link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class': 'td_dl'}).find('a') url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a') leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '') torrent_id = link['href'].replace('details?id=', '')
@@ -51,7 +52,7 @@ class Base(TorrentProvider):
'url': self.urls['download'] % url['href'], 'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id, 'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]), 'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
'seeders': tryInt(result.find('td', attrs = {'class': 'ttr_seeders'}).find('a').string), 'seeders': tryInt(seeders.string) if seeders else 0,
'leechers': tryInt(leechers.string) if leechers else 0, 'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo, 'get_more_info': self.getMoreInfo,
}) })
@@ -1,7 +1,7 @@
import traceback import traceback
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -56,11 +56,12 @@ class Base(TorrentProvider):
full_id = link['href'].replace('details.php?id=', '') full_id = link['href'].replace('details.php?id=', '')
torrent_id = full_id[:6] torrent_id = full_id[:6]
name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip()
results.append({ results.append({
'id': torrent_id, 'id': torrent_id,
'name': link.contents[0], 'name': name,
'url': self.urls['download'] % (torrent_id, link.contents[0]), 'url': self.urls['download'] % (torrent_id, name),
'detail_url': self.urls['detail'] % torrent_id, 'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]), 'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
'seeders': tryInt(cells[8].find('span').contents[0]), 'seeders': tryInt(cells[8].find('span').contents[0]),
@@ -1,3 +1,4 @@
import re
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -8,12 +9,12 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'http://www.td.af/', 'test': 'https://torrentday.eu/',
'login': 'http://www.td.af/torrents/', 'login': 'https://torrentday.eu/torrents/',
'login_check': 'http://www.torrentday.com/userdetails.php', 'login_check': 'https://torrentday.eu/userdetails.php',
'detail': 'http://www.td.af/details.php?id=%s', 'detail': 'https://torrentday.eu/details.php?id=%s',
'search': 'http://www.td.af/V3/API/API.php', 'search': 'https://torrentday.eu/V3/API/API.php',
'download': 'http://www.td.af/download.php/%s/%s', 'download': 'https://torrentday.eu/download.php/%s/%s',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
@@ -55,6 +56,10 @@ class Base(TorrentProvider):
} }
def loginSuccess(self, output): def loginSuccess(self, output):
often = re.search('You tried too often, please wait .*</div>', output)
if often:
raise Exception(often.group(0)[:-6].strip())
return 'Password not correct' not in output return 'Password not correct' not in output
def loginCheckSuccess(self, output): def loginCheckSuccess(self, output):
@@ -68,7 +73,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'TorrentDay', 'name': 'TorrentDay',
'description': '<a href="http://www.td.af/">TorrentDay</a>', 'description': '<a href="https://torrentday.eu/">TorrentDay</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
'options': [ 'options': [
@@ -17,7 +17,7 @@ class Base(TorrentProvider):
'login': 'https://www.torrentleech.org/user/account/login/', 'login': 'https://www.torrentleech.org/user/account/login/',
'login_check': 'https://torrentleech.org/user/messages', 'login_check': 'https://torrentleech.org/user/messages',
'detail': 'https://www.torrentleech.org/torrent/%s', 'detail': 'https://www.torrentleech.org/torrent/%s',
'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d', 'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%s',
'download': 'https://www.torrentleech.org%s', 'download': 'https://www.torrentleech.org%s',
} }
@@ -13,12 +13,12 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'http://torrentshack.eu/', 'test': 'https://torrentshack.me/',
'login': 'http://torrentshack.eu/login.php', 'login': 'https://torrentshack.me/login.php',
'login_check': 'http://torrentshack.eu/inbox.php', 'login_check': 'https://torrentshack.me/inbox.php',
'detail': 'http://torrentshack.eu/torrent/%s', 'detail': 'https://torrentshack.me/torrent/%s',
'search': 'http://torrentshack.eu/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', 'search': 'https://torrentshack.me/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download': 'http://torrentshack.eu/%s', 'download': 'https://torrentshack.me/%s',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
@@ -42,6 +42,7 @@ class Base(TorrentProvider):
link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
url = result.find('td', attrs = {'class': 'torrent_td'}).find('a') url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')
size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ')
tds = result.find_all('td') tds = result.find_all('td')
results.append({ results.append({
@@ -49,7 +50,7 @@ class Base(TorrentProvider):
'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}), 'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
'url': self.urls['download'] % url['href'], 'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'], 'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(result.find_all('td')[5].string), 'size': self.parseSize(size),
'seeders': tryInt(tds[len(tds)-2].string), 'seeders': tryInt(tds[len(tds)-2].string),
'leechers': tryInt(tds[len(tds)-1].string), 'leechers': tryInt(tds[len(tds)-1].string),
}) })
@@ -81,7 +82,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'TorrentShack', 'name': 'TorrentShack',
'description': '<a href="http://torrentshack.eu/">TorrentShack</a>', 'description': '<a href="https://torrentshack.me/">TorrentShack</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC',
'options': [ 'options': [
@@ -22,12 +22,12 @@ class Base(TorrentMagnetProvider, RSS):
http_time_between_calls = 0 http_time_between_calls = 0
def _search(self, media, quality, results): def _searchOnTitle(self, title, media, quality, results):
search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search'] search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']
# Create search parameters # Create search parameters
search_params = self.buildUrl(media) search_params = self.buildUrl(title, media, quality)
smin = quality.get('size_min') smin = quality.get('size_min')
smax = quality.get('size_max') smax = quality.get('size_max')
@@ -2,28 +2,25 @@ import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__) log = CPLog(__name__)
class Base(TorrentMagnetProvider): class Base(TorrentProvider):
urls = { urls = {
'test': '%s/api', 'test': '%s/api/v2',
'search': '%s/api/list.json?keywords=%s&quality=%s', 'search': '%s/api/v2/list_movies.json?limit=50&query_term=%s'
'detail': '%s/api/movie.json?id=%s'
} }
http_time_between_calls = 1 # seconds http_time_between_calls = 1 # seconds
proxy_list = [ proxy_list = [
'http://yify.unlocktorrent.com', 'https://yts.re',
'http://yify-torrents.com.come.in', 'https://yts.wf',
'http://yts.re', 'https://yts.im',
'http://yts.im'
'http://yify-torrents.im',
] ]
def search(self, movie, quality): def search(self, movie, quality):
@@ -39,27 +36,30 @@ class Base(TorrentMagnetProvider):
if not domain: if not domain:
return return
search_url = self.urls['search'] % (domain, getIdentifier(movie), quality['identifier']) search_url = self.urls['search'] % (domain, getIdentifier(movie))
data = self.getJsonData(search_url) data = self.getJsonData(search_url) or {}
data = data.get('data')
if data and data.get('MovieList'): if isinstance(data, dict) and data.get('movies'):
try: try:
for result in data.get('MovieList'): for result in data.get('movies'):
if result['Quality'] and result['Quality'] not in result['MovieTitle']: for release in result.get('torrents', []):
title = result['MovieTitle'] + ' BrRip ' + result['Quality']
if release['quality'] and release['quality'] not in result['title_long']:
title = result['title_long'] + ' BRRip ' + release['quality']
else: else:
title = result['MovieTitle'] + ' BrRip' title = result['title_long'] + ' BRRip'
results.append({ results.append({
'id': result['MovieID'], 'id': release['hash'],
'name': title, 'name': title,
'url': result['TorrentMagnetUrl'], 'url': release['url'],
'detail_url': self.urls['detail'] % (domain, result['MovieID']), 'detail_url': result['url'],
'size': self.parseSize(result['Size']), 'size': self.parseSize(release['size']),
'seeders': tryInt(result['TorrentSeeds']), 'seeders': tryInt(release['seeds']),
'leechers': tryInt(result['TorrentPeers']), 'leechers': tryInt(release['peers']),
}) })
except: except:
+1 -1
View File
@@ -65,7 +65,7 @@ class MovieBase(MovieTypeBase):
return False return False
elif not params.get('info'): elif not params.get('info'):
try: try:
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True) is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True)
if not is_movie: if not is_movie:
msg = 'Can\'t add movie, seems to be a TV show.' msg = 'Can\'t add movie, seems to be a TV show.'
log.error(msg) log.error(msg)
@@ -696,7 +696,7 @@ MA.Readd = new Class({
if(movie_done || snatched && snatched > 0) if(movie_done || snatched && snatched > 0)
self.el = new Element('a.readd', { self.el = new Element('a.readd', {
'title': 'Readd the movie and mark all previous snatched/downloaded as ignored', 'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored',
'events': { 'events': {
'click': self.doReadd.bind(self) 'click': self.doReadd.bind(self)
} }
@@ -264,3 +264,11 @@
height: 40px; height: 40px;
} }
@media all and (max-width: 480px) {
.toggle_menu h2 {
font-size: 16px;
text-align: center;
height: 30px;
}
}
@@ -44,11 +44,12 @@ var Charts = new Class({
if( Cookie.read('suggestions_charts_menu_selected') === 'charts'){ if( Cookie.read('suggestions_charts_menu_selected') === 'charts'){
self.show(); self.show();
self.fireEvent.delay(0, self, 'created');
} }
else else
self.el.hide(); self.el.hide();
self.fireEvent.delay(0, self, 'created');
}, },
fill: function(json){ fill: function(json){
@@ -0,0 +1,89 @@
import re
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'CrowdAI'
class CrowdAI(Automation, RSS):
interval = 1800
def getIMDBids(self):
movies = []
urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
for url in urls:
if not urls[url]:
continue
rss_movies = self.getRSSData(url)
for movie in rss_movies:
description = self.getTextElement(movie, 'description')
grabs = 0
for item in movie:
if item.attrib.get('name') == 'grabs':
grabs = item.attrib.get('value')
break
if int(grabs) > tryInt(self.conf('number_grabs')):
title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
year = re.match(r'.*Year: (\d{4}).*', description).group(1)
imdb = self.search(title, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'crowdai',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'crowdai_automation',
'label': 'CrowdAI',
'description': 'Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie. Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
'default': '1',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
},
{
'name': 'number_grabs',
'default': '500',
'label': 'Grab threshold',
'description': 'Number of grabs required',
},
],
},
],
}]
@@ -48,11 +48,12 @@ class Letterboxd(Automation):
soup = BeautifulSoup(self.getHTMLData(self.url % username)) soup = BeautifulSoup(self.getHTMLData(self.url % username))
for movie in soup.find_all('a', attrs = {'class': 'frame'}): for movie in soup.find_all('li', attrs = {'class': 'poster-container'}):
match = removeEmpty(self.pattern.split(movie['title'])) img = movie.find('img', movie)
title = img.get('alt')
movies.append({ movies.append({
'title': match[0], 'title': title
'year': match[1]
}) })
return movies return movies
@@ -39,15 +39,14 @@ class Rottentomatoes(Automation, RSS):
if result: if result:
log.info2('Something smells...')
rating = tryInt(self.getTextElement(movie, rating_tag)) rating = tryInt(self.getTextElement(movie, rating_tag))
name = result.group(0) name = result.group(0)
print rating, tryInt(self.conf('tomatometer_percent'))
if rating < tryInt(self.conf('tomatometer_percent')): if rating < tryInt(self.conf('tomatometer_percent')):
log.info2('%s seems to be rotten...', name) log.info2('%s seems to be rotten...', name)
else: else:
log.info2('Found %s with fresh rating %s', (name, rating))
log.info2('Found %s fresh enough movies, enqueuing: %s', (rating, name))
year = datetime.datetime.now().strftime("%Y") year = datetime.datetime.now().strftime("%Y")
imdb = self.search(name, year) imdb = self.search(name, year)
@@ -69,12 +69,15 @@ class CouchPotatoApi(MovieProvider):
name_enc = base64.b64encode(ss(name)) name_enc = base64.b64encode(ss(name))
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders()) return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None): def isMovie(self, identifier = None, adding = False):
if not identifier: if not identifier:
return return
data = self.getJsonData(self.urls['is_movie'] % identifier, headers = self.getRequestHeaders()) url = self.urls['is_movie'] % identifier
url += '?adding=1' if adding else ''
data = self.getJsonData(url, headers = self.getRequestHeaders())
if data: if data:
return data.get('is_movie', True) return data.get('is_movie', True)
@@ -4,6 +4,7 @@ from couchpotato import tryInt
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.core.media.movie.providers.base import MovieProvider
from requests import HTTPError
log = CPLog(__name__) log = CPLog(__name__)
@@ -32,12 +33,14 @@ class FanartTV(MovieProvider):
try: try:
url = self.urls['api'] % identifier url = self.urls['api'] % identifier
fanart_data = self.getJsonData(url) fanart_data = self.getJsonData(url, show_error = False)
if fanart_data: if fanart_data:
log.debug('Found images for %s', fanart_data.get('name')) log.debug('Found images for %s', fanart_data.get('name'))
images = self._parseMovie(fanart_data) images = self._parseMovie(fanart_data)
except HTTPError as e:
log.debug('Failed getting extra art for %s: %s',
(identifier, e))
except: except:
log.error('Failed getting extra art for %s: %s', log.error('Failed getting extra art for %s: %s',
(identifier, traceback.format_exc())) (identifier, traceback.format_exc()))
@@ -2,6 +2,7 @@ import json
import re import re
import traceback import traceback
from couchpotato import Env
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString
@@ -17,8 +18,8 @@ autoload = 'OMDBAPI'
class OMDBAPI(MovieProvider): class OMDBAPI(MovieProvider):
urls = { urls = {
'search': 'http://www.omdbapi.com/?%s', 'search': 'http://www.omdbapi.com/?type=movie&%s',
'info': 'http://www.omdbapi.com/?i=%s', 'info': 'http://www.omdbapi.com/?type=movie&i=%s',
} }
http_time_between_calls = 0 http_time_between_calls = 0
@@ -38,7 +39,8 @@ class OMDBAPI(MovieProvider):
} }
cache_key = 'omdbapi.cache.%s' % q cache_key = 'omdbapi.cache.%s' % q
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}), timeout = 3) url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached: if cached:
result = self.parseMovie(cached) result = self.parseMovie(cached)
@@ -56,7 +58,7 @@ class OMDBAPI(MovieProvider):
return {} return {}
cache_key = 'omdbapi.cache.%s' % identifier cache_key = 'omdbapi.cache.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3) cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached: if cached:
result = self.parseMovie(cached) result = self.parseMovie(cached)
@@ -1,11 +1,10 @@
import traceback import traceback
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.core.media.movie.providers.base import MovieProvider
import tmdb3
log = CPLog(__name__) log = CPLog(__name__)
@@ -13,34 +12,45 @@ autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider): class TheMovieDb(MovieProvider):
MAX_EXTRATHUMBS = 4
http_time_between_calls = .35
configuration = {
'images': {
'secure_base_url': 'https://image.tmdb.org/t/p/',
},
}
def __init__(self): def __init__(self):
addEvent('info.search', self.search, priority = 3)
addEvent('movie.search', self.search, priority = 3)
addEvent('movie.info', self.getInfo, priority = 3) addEvent('movie.info', self.getInfo, priority = 3)
addEvent('movie.info_by_tmdb', self.getInfo) addEvent('movie.info_by_tmdb', self.getInfo)
addEvent('app.load', self.config)
# Configure TMDB settings def config(self):
tmdb3.set_key(self.conf('api_key')) configuration = self.request('configuration')
tmdb3.set_cache('null') if configuration:
self.configuration = configuration
def search(self, q, limit = 12): def search(self, q, limit = 3):
""" Find movie by name """ """ Find movie by name """
if self.isDisabled(): if self.isDisabled():
return False return False
search_string = simplifyString(q)
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
if not results:
log.debug('Searching for movie: %s', q) log.debug('Searching for movie: %s', q)
raw = None raw = None
try: try:
raw = tmdb3.searchMovie(search_string) name_year = fireEvent('scanner.name_year', q, single = True)
raw = self.request('search/movie', {
'query': name_year.get('name', q),
'year': name_year.get('year'),
'search_type': 'ngram' if limit > 1 else 'phrase'
}, return_key = 'results')
except: except:
log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc())) log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc()))
results = [] results = []
if raw: if raw:
@@ -48,7 +58,9 @@ class TheMovieDb(MovieProvider):
nr = 0 nr = 0
for movie in raw: for movie in raw:
results.append(self.parseMovie(movie, extended = False)) parsed_movie = self.parseMovie(movie, extended = False)
if parsed_movie:
results.append(parsed_movie)
nr += 1 nr += 1
if nr == limit: if nr == limit:
@@ -56,7 +68,6 @@ class TheMovieDb(MovieProvider):
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results]) log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
self.setCache(cache_key, results)
return results return results
except SyntaxError as e: except SyntaxError as e:
log.error('Failed to parse XML response: %s', e) log.error('Failed to parse XML response: %s', e)
@@ -69,39 +80,26 @@ class TheMovieDb(MovieProvider):
if not identifier: if not identifier:
return {} return {}
cache_key = 'tmdb.cache.%s%s' % (identifier, '.ex' if extended else '') result = self.parseMovie({
result = self.getCache(cache_key) 'id': identifier
}, extended = extended)
if not result: return result or {}
try:
log.debug('Getting info: %s', cache_key)
# noinspection PyArgumentList
movie = tmdb3.Movie(identifier)
try: exists = movie.title is not None
except: exists = False
if exists:
result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result)
else:
result = {}
except:
log.error('Failed getting info for %s: %s', (identifier, traceback.format_exc()))
return result
def parseMovie(self, movie, extended = True): def parseMovie(self, movie, extended = True):
cache_key = 'tmdb.cache.%s%s' % (movie.id, '.ex' if extended else '') # Do request, append other items
movie_data = self.getCache(cache_key) movie = self.request('movie/%s' % movie.get('id'), {
'append_to_response': 'alternative_titles' + (',images,casts' if extended else '')
if not movie_data: })
if not movie:
return
# Images # Images
poster = self.getImage(movie, type = 'poster', size = 'w154') poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original') poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original') backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original', n = self.MAX_EXTRATHUMBS, skipfirst = True) extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else []
images = { images = {
'poster': [poster] if poster else [], 'poster': [poster] if poster else [],
@@ -114,39 +112,43 @@ class TheMovieDb(MovieProvider):
# Genres # Genres
try: try:
genres = [genre.name for genre in movie.genres] genres = [genre.get('name') for genre in movie.get('genres', [])]
except: except:
genres = [] genres = []
# 1900 is the same as None # 1900 is the same as None
year = str(movie.releasedate or '')[:4] year = str(movie.get('release_date') or '')[:4]
if not movie.releasedate or year == '1900' or year.lower() == 'none': if not movie.get('release_date') or year == '1900' or year.lower() == 'none':
year = None year = None
# Gather actors data # Gather actors data
actors = {} actors = {}
if extended: if extended:
for cast_item in movie.cast:
# Full data
cast = movie.get('casts', {}).get('cast', [])
for cast_item in cast:
try: try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character) actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character'))
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original') images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original')
except: except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc())) log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
movie_data = { movie_data = {
'type': 'movie', 'type': 'movie',
'via_tmdb': True, 'via_tmdb': True,
'tmdb_id': movie.id, 'tmdb_id': movie.get('id'),
'titles': [toUnicode(movie.title)], 'titles': [toUnicode(movie.get('title'))],
'original_title': movie.originaltitle, 'original_title': movie.get('original_title'),
'images': images, 'images': images,
'imdb': movie.imdb, 'imdb': movie.get('imdb_id'),
'runtime': movie.runtime, 'runtime': movie.get('runtime'),
'released': str(movie.releasedate), 'released': str(movie.get('release_date')),
'year': tryInt(year, None), 'year': tryInt(year, None),
'plot': movie.overview, 'plot': movie.get('overview'),
'genres': genres, 'genres': genres,
'collection': getattr(movie.collection, 'name', None), 'collection': getattr(movie.get('belongs_to_collection'), 'name', None),
'actor_roles': actors 'actor_roles': actors
} }
@@ -156,51 +158,55 @@ class TheMovieDb(MovieProvider):
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']: if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']:
movie_data['titles'].append(movie_data['original_title']) movie_data['titles'].append(movie_data['original_title'])
if extended: # Add alternative titles
for alt in movie.alternate_titles: alternate_titles = movie.get('alternative_titles', {}).get('titles', [])
alt_name = alt.title
for alt in alternate_titles:
alt_name = alt.get('title')
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None: if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name) movie_data['titles'].append(alt_name)
# Cache movie parsed
self.setCache(cache_key, movie_data)
return movie_data return movie_data
def getImage(self, movie, type = 'poster', size = 'poster'): def getImage(self, movie, type = 'poster', size = 'poster'):
image_url = '' image_url = ''
try: try:
image_url = getattr(movie, type).geturl(size = size) path = movie.get('%s_path' % type)
image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path)
except: except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie)))) log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_url return image_url
def getMultImages(self, movie, type = 'backdrops', size = 'original', n = -1, skipfirst = False): def getMultImages(self, movie, type = 'backdrops', size = 'original'):
"""
If n < 0, return all images. Otherwise return n images.
If n > len(getattr(movie, type)), then return all images.
If skipfirst is True, then it will skip getattr(movie, type)[0]. This
is because backdrops[0] is typically backdrop.
"""
image_urls = [] image_urls = []
try: try:
images = getattr(movie, type) for image in movie.get('images', {}).get(type, [])[1:5]:
if n < 0 or n > len(images): image_urls.append(self.getImage(image, 'file', size))
num_images = len(images)
else:
num_images = n
for i in range(int(skipfirst), num_images + int(skipfirst)):
image_urls.append(images[i].geturl(size = size))
except: except:
log.debug('Failed getting %i %s.%s for "%s"', (n, type, size, ss(str(movie)))) log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_urls return image_urls
def request(self, call = '', params = {}, return_key = None):
params = dict((k, v) for k, v in params.items() if v)
params = tryUrlencode(params)
try:
url = 'http://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.conf('api_key'), '&%s' % params if params else '')
data = self.getJsonData(url, show_error = False)
except:
log.debug('Movie not found: %s, %s', (call, params))
data = None
if data and return_key and return_key in data:
data = data.get(return_key)
return data
def isDisabled(self): def isDisabled(self):
if self.conf('api_key') == '': if self.conf('api_key') == '':
log.error('No API key provided.') log.error('No API key provided.')
@@ -11,7 +11,7 @@ autoload = 'Bitsoup'
class Bitsoup(MovieProvider, Base): class Bitsoup(MovieProvider, Base):
cat_ids = [ cat_ids = [
([17], ['3d']), ([17], ['3d']),
([41], ['720p', '1080p']), ([80], ['720p', '1080p']),
([20], ['dvdr']), ([20], ['dvdr']),
([19], ['brrip', 'dvdrip']), ([19], ['brrip', 'dvdrip']),
] ]
@@ -0,0 +1,11 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.hdaccess import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'HDAccess'
class HDAccess(MovieProvider, Base):
pass
@@ -13,7 +13,7 @@ class IPTorrents(MovieProvider, Base):
([87], ['3d']), ([87], ['3d']),
([48], ['720p', '1080p', 'bd50']), ([48], ['720p', '1080p', 'bd50']),
([72], ['cam', 'ts', 'tc', 'r5', 'scr']), ([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
([7,48], ['dvdrip', 'brrip']), ([7, 48, 20], ['dvdrip', 'brrip']),
([6], ['dvdr']), ([6], ['dvdr']),
] ]
@@ -16,12 +16,12 @@ class TorrentLeech(MovieProvider, Base):
([9], ['ts', 'tc']), ([9], ['ts', 'tc']),
([10], ['r5', 'scr']), ([10], ['r5', 'scr']),
([11], ['dvdrip']), ([11], ['dvdrip']),
([14], ['brrip']), ([13, 14], ['brrip']),
([12], ['dvdr']), ([12], ['dvdr']),
] ]
def buildUrl(self, title, media, quality): def buildUrl(self, title, media, quality):
return ( return (
tryUrlencode(title.replace(':', '')), tryUrlencode(title.replace(':', '')),
self.getCatId(quality)[0] ','.join([str(x) for x in self.getCatId(quality)])
) )
@@ -22,8 +22,8 @@ class TorrentShack(MovieProvider, Base):
# Movies-SD Pack - 983 (not included) # Movies-SD Pack - 983 (not included)
cat_ids = [ cat_ids = [
([970], ['bd50']), ([970, 320], ['bd50']),
([300], ['720p', '1080p']), ([300, 320], ['720p', '1080p']),
([350], ['dvdr']), ([350], ['dvdr']),
([400], ['brrip', 'dvdrip']), ([400], ['brrip', 'dvdrip']),
] ]
@@ -1,6 +1,5 @@
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.torrent.torrentz import Base from couchpotato.core.media._base.providers.torrent.torrentz import Base
from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.core.media.movie.providers.base import MovieProvider
@@ -11,5 +10,5 @@ autoload = 'Torrentz'
class Torrentz(MovieProvider, Base): class Torrentz(MovieProvider, Base):
def buildUrl(self, media): def buildUrl(self, title, media, quality):
return tryUrlencode('"%s"' % fireEvent('library.query', media, single = True)) return tryUrlencode('"%s %s"' % (title, media['info']['year']))
@@ -12,7 +12,7 @@ autoload = 'RottenTomatoes'
class RottenTomatoes(UserscriptBase): class RottenTomatoes(UserscriptBase):
includes = ['*://www.rottentomatoes.com/m/*/'] includes = ['*://www.rottentomatoes.com/m/*']
excludes = ['*://www.rottentomatoes.com/m/*/*/'] excludes = ['*://www.rottentomatoes.com/m/*/*/']
version = 2 version = 2
+11 -10
View File
@@ -166,7 +166,8 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
'quality': q_identifier, 'quality': q_identifier,
'finish': profile['finish'][index], 'finish': profile['finish'][index],
'wait_for': tryInt(profile['wait_for'][index]), 'wait_for': tryInt(profile['wait_for'][index]),
'3d': profile['3d'][index] if profile.get('3d') else False '3d': profile['3d'][index] if profile.get('3d') else False,
'minimum_score': profile.get('minimum_score', 1),
} }
could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']) could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year'])
@@ -202,13 +203,6 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
quality['custom'] = quality_custom quality['custom'] = quality_custom
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
results_count = len(results)
total_result_count += results_count
if results_count == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
# Keep track of releases found outside ETA window
outside_eta_results += results_count if could_not_be_released else 0
# Check if movie isn't deleted while searching # Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True): if not fireEvent('media.get', movie.get('_id'), single = True):
@@ -216,10 +210,16 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
# Add them to this movie releases list # Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True) found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
results_count = len(found_releases)
total_result_count += results_count
if results_count == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
# Keep track of releases found outside ETA window
outside_eta_results += results_count if could_not_be_released else 0
# Don't trigger download, but notify user of available releases # Don't trigger download, but notify user of available releases
if could_not_be_released: if could_not_be_released and results_count > 0:
if results_count > 0:
log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title))
# Try find a valid result and download it # Try find a valid result and download it
@@ -396,6 +396,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return True return True
return False
except: except:
log.error('Failed searching for next release: %s', traceback.format_exc()) log.error('Failed searching for next release: %s', traceback.format_exc())
return False return False
@@ -52,7 +52,7 @@ var SuggestList = new Class({
else else
self.hide(); self.hide();
self.fireEvent('created'); self.fireEvent.delay(0, self, 'created');
}, },
+12 -12
View File
@@ -14,6 +14,7 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification from couchpotato.core.notifications.base import Notification
from .index import NotificationIndex, NotificationUnreadIndex from .index import NotificationIndex, NotificationUnreadIndex
from couchpotato.environment import Env from couchpotato.environment import Env
from tornado.ioloop import IOLoop
log = CPLog(__name__) log = CPLog(__name__)
@@ -110,11 +111,11 @@ class CoreNotifier(Notification):
if limit_offset: if limit_offset:
splt = splitString(limit_offset) splt = splitString(limit_offset)
limit = splt[0] limit = tryInt(splt[0])
offset = 0 if len(splt) is 1 else splt[1] offset = tryInt(0 if len(splt) is 1 else splt[1])
results = db.get_many('notification', limit = limit, offset = offset, with_doc = True) results = db.all('notification', limit = limit, offset = offset, with_doc = True)
else: else:
results = db.get_many('notification', limit = 200, with_doc = True) results = db.all('notification', limit = 200, with_doc = True)
notifications = [] notifications = []
for n in results: for n in results:
@@ -148,17 +149,16 @@ class CoreNotifier(Notification):
def notify(self, message = '', data = None, listener = None): def notify(self, message = '', data = None, listener = None):
if not data: data = {} if not data: data = {}
try:
db = get_db()
data['notification_type'] = listener if listener else 'unknown'
n = { n = {
'_t': 'notification', '_t': 'notification',
'time': int(time.time()), 'time': int(time.time()),
'message': toUnicode(message)
} }
try:
db = get_db()
n['message'] = toUnicode(message)
if data.get('sticky'): if data.get('sticky'):
n['sticky'] = True n['sticky'] = True
if data.get('important'): if data.get('important'):
@@ -170,7 +170,7 @@ class CoreNotifier(Notification):
return True return True
except: except:
log.error('Failed notify: %s', traceback.format_exc()) log.error('Failed notify "%s": %s', (n, traceback.format_exc()))
def frontend(self, type = 'notification', data = None, message = None): def frontend(self, type = 'notification', data = None, message = None):
if not data: data = {} if not data: data = {}
@@ -190,7 +190,7 @@ class CoreNotifier(Notification):
while len(self.listeners) > 0 and not self.shuttingDown(): while len(self.listeners) > 0 and not self.shuttingDown():
try: try:
listener, last_id = self.listeners.pop() listener, last_id = self.listeners.pop()
listener({ IOLoop.current().add_callback(listener, {
'success': True, 'success': True,
'result': [notification], 'result': [notification],
}) })
@@ -1,68 +0,0 @@
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from pynmwp import PyNMWP
import six
log = CPLog(__name__)
autoload = 'NotifyMyWP'
class NotifyMyWP(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
keys = splitString(self.conf('api_key'))
p = PyNMWP(keys, self.conf('dev_key'))
response = p.push(application = self.default_title, event = message, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1)
for key in keys:
if not response[key]['Code'] == six.u('200'):
log.error('Could not send notification to NotifyMyWindowsPhone (%s). %s', (key, response[key]['message']))
return False
return response
config = [{
'name': 'notifymywp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifymywp',
'label': 'Windows Phone',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'api_key',
'description': 'Multiple keys seperated by a comma. Maximum of 5.'
},
{
'name': 'dev_key',
'advanced': True,
},
{
'name': 'priority',
'default': 0,
'type': 'dropdown',
'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)],
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
@@ -23,6 +23,26 @@ config = [{
'default': 'localhost', 'default': 'localhost',
'description': 'Hostname/IP, default localhost' 'description': 'Hostname/IP, default localhost'
}, },
{
'name': 'username',
'label': 'Username',
'default': '',
'description': 'Required for myPlex'
},
{
'name': 'password',
'label': 'Password',
'default': '',
'type': 'password',
'description': 'Required for myPlex'
},
{
'name': 'auth_token',
'label': 'Auth Token',
'default': '',
'advanced': True,
'description': 'Required for myPlex'
},
{ {
'name': 'clients', 'name': 'clients',
'default': '', 'default': '',
@@ -35,10 +35,45 @@ class PlexServer(object):
if path.startswith('/'): if path.startswith('/'):
path = path[1:] path = path[1:]
#Maintain support for older Plex installations without myPlex
if not self.plex.conf('auth_token') and not self.plex.conf('username') and not self.plex.conf('password'):
data = self.plex.urlopen('%s/%s' % ( data = self.plex.urlopen('%s/%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400), self.createHost(self.plex.conf('media_server'), port = 32400),
path path
)) ))
else:
#Fetch X-Plex-Token if it doesn't exist but a username/password do
if not self.plex.conf('auth_token') and (self.plex.conf('username') and self.plex.conf('password')):
import urllib2, base64
log.info("Fetching a new X-Plex-Token from plex.tv")
username = self.plex.conf('username')
password = self.plex.conf('password')
req = urllib2.Request("https://plex.tv/users/sign_in.xml", data="")
authheader = "Basic %s" % base64.encodestring('%s:%s' % (username, password))[:-1]
req.add_header("Authorization", authheader)
req.add_header("X-Plex-Product", "Couchpotato Notifier")
req.add_header("X-Plex-Client-Identifier", "b3a6b24dcab2224bdb101fc6aa08ea5e2f3147d6")
req.add_header("X-Plex-Version", "1.0")
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log.info("Error fetching token from plex.tv")
try:
auth_tree = etree.parse(response)
token = auth_tree.findall(".//authentication-token")[0].text
self.plex.conf('auth_token', token)
except (ValueError, IndexError) as e:
log.info("Error parsing plex.tv response: " + ex(e))
#Add X-Plex-Token header for myPlex support workaround
data = self.plex.urlopen('%s/%s?X-Plex-Token=%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path,
self.plex.conf('auth_token')
))
if data_type == 'xml': if data_type == 'xml':
return etree.fromstring(data) return etree.fromstring(data)
+68
View File
@@ -0,0 +1,68 @@
import traceback
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Webhook'
class Webhook(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
post_data = {
'message': toUnicode(message)
}
if getIdentifier(data):
post_data.update({
'imdb_id': getIdentifier(data)
})
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
try:
self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False)
return True
except:
log.error('Webhook notification failed: %s', traceback.format_exc())
return False
config = [{
'name': 'webhook',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'webhook',
'label': 'Webhook',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'url',
'description': 'The URL to send notification data to when '
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
}
]
}
]
}]
+3 -3
View File
@@ -39,7 +39,7 @@ class Plugin(object):
_locks = {} _locks = {}
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20130519 Firefox/24.0' user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'
http_last_use = {} http_last_use = {}
http_time_between_calls = 0 http_time_between_calls = 0
http_failed_request = {} http_failed_request = {}
@@ -206,7 +206,7 @@ class Plugin(object):
if self.http_failed_disabled[host] > (time.time() - 900): if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host) log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error: if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests') raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host)
else: else:
return '' return ''
else: else:
@@ -279,7 +279,7 @@ class Plugin(object):
wait = (last_use - now) + self.http_time_between_calls wait = (last_use - now) + self.http_time_between_calls
if wait > 0: if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait)) log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait)))
time.sleep(min(wait, 30)) time.sleep(min(wait, 30))
def beforeCall(self, handler): def beforeCall(self, handler):
+1
View File
@@ -87,6 +87,7 @@ class FileBrowser(Plugin):
try: try:
dirs = self.getDirectories(path = path, show_hidden = show_hidden) dirs = self.getDirectories(path = path, show_hidden = show_hidden)
except: except:
log.error('Failed getting directory "%s" : %s', (path, traceback.format_exc()))
dirs = [] dirs = []
parent = os.path.dirname(path.rstrip(os.path.sep)) parent = os.path.dirname(path.rstrip(os.path.sep))
+5 -4
View File
@@ -1,9 +1,9 @@
import codecs
import os import os
import re import re
import traceback import traceback
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
@@ -103,8 +103,9 @@ class Logging(Plugin):
if not os.path.isfile(path): if not os.path.isfile(path):
break break
f = codecs.open(path, 'r', 'utf-8') f = open(path, 'r')
raw_lines = self.toList(f.read()) log_content = toUnicode(f.read())
raw_lines = self.toList(log_content)
raw_lines.reverse() raw_lines.reverse()
brk = False brk = False
@@ -130,7 +131,7 @@ class Logging(Plugin):
def toList(self, log_content = ''): def toList(self, log_content = ''):
logs_raw = log_content.split('[0m\n') logs_raw = toUnicode(log_content).split('[0m\n')
logs = [] logs = []
for log_line in logs_raw: for log_line in logs_raw:
+1 -1
View File
@@ -123,7 +123,7 @@ class Manage(Plugin):
fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder)
onFound = self.createAddToLibrary(folder, added_identifiers) onFound = self.createAddToLibrary(folder, added_identifiers)
fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, on_found = onFound, single = True) fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, check_file_date = False, on_found = onFound, single = True)
# Break if CP wants to shut down # Break if CP wants to shut down
if self.shuttingDown(): if self.shuttingDown():
+2
View File
@@ -86,6 +86,7 @@ class ProfilePlugin(Plugin):
'label': toUnicode(kwargs.get('label')), 'label': toUnicode(kwargs.get('label')),
'order': tryInt(kwargs.get('order', 999)), 'order': tryInt(kwargs.get('order', 999)),
'core': kwargs.get('core', False), 'core': kwargs.get('core', False),
'minimum_score': tryInt(kwargs.get('minimum_score', 1)),
'qualities': [], 'qualities': [],
'wait_for': [], 'wait_for': [],
'stop_after': [], 'stop_after': [],
@@ -217,6 +218,7 @@ class ProfilePlugin(Plugin):
'label': toUnicode(profile.get('label')), 'label': toUnicode(profile.get('label')),
'order': order, 'order': order,
'qualities': profile.get('qualities'), 'qualities': profile.get('qualities'),
'minimum_score': 1,
'finish': [], 'finish': [],
'wait_for': [], 'wait_for': [],
'stop_after': [], 'stop_after': [],
@@ -51,6 +51,11 @@
margin: 0 5px !important; margin: 0 5px !important;
} }
.profile .wait_for .minimum_score_input {
width: 40px !important;
text-align: left;
}
.profile .types { .profile .types {
padding: 0; padding: 0;
margin: 0 20px 0 -4px; margin: 0 20px 0 -4px;
@@ -53,12 +53,21 @@ var Profile = new Class({
}), }),
new Element('span', {'text':'day(s) for a better quality '}), new Element('span', {'text':'day(s) for a better quality '}),
new Element('span.advanced', {'text':'and keep searching'}), new Element('span.advanced', {'text':'and keep searching'}),
// "After a checked quality is found and downloaded, continue searching for even better quality releases for the entered number of days." // "After a checked quality is found and downloaded, continue searching for even better quality releases for the entered number of days."
new Element('input.inlay.xsmall.stop_after_input.advanced', { new Element('input.inlay.xsmall.stop_after_input.advanced', {
'type':'text', 'type':'text',
'value': data.stop_after && data.stop_after.length > 0 ? data.stop_after[0] : 0 'value': data.stop_after && data.stop_after.length > 0 ? data.stop_after[0] : 0
}), }),
new Element('span.advanced', {'text':'day(s) for a better (checked) quality.'}) new Element('span.advanced', {'text':'day(s) for a better (checked) quality.'}),
// Minimum score of
new Element('span.advanced', {'html':'<br/>Releases need a minimum score of'}),
new Element('input.advanced.inlay.xsmall.minimum_score_input', {
'size': 4,
'type':'text',
'value': data.minimum_score || 1
})
) )
); );
@@ -126,6 +135,7 @@ var Profile = new Class({
'label' : self.el.getElement('.quality_label input').get('value'), 'label' : self.el.getElement('.quality_label input').get('value'),
'wait_for' : self.el.getElement('.wait_for_input').get('value'), 'wait_for' : self.el.getElement('.wait_for_input').get('value'),
'stop_after' : self.el.getElement('.stop_after_input').get('value'), 'stop_after' : self.el.getElement('.stop_after_input').get('value'),
'minimum_score' : self.el.getElement('.minimum_score_input').get('value'),
'types': [] 'types': []
}; };
+13 -6
View File
@@ -30,10 +30,10 @@ class QualityPlugin(Plugin):
{'identifier': 'dvdr', 'size': (3000, 10000), 'median_size': 4500, 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']}, {'identifier': 'dvdr', 'size': (3000, 10000), 'median_size': 4500, 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'median_size': 1500, 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]}, {'identifier': 'dvdrip', 'size': (600, 2400), 'median_size': 1500, 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'median_size': 700, 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr', 'webrip', ('web', 'rip')], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': []}, {'identifier': 'scr', 'size': (600, 1600), 'median_size': 700, 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr', 'webrip', ('web', 'rip')], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': []},
{'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p'], 'ext':[]}, {'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p', '1080p'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p'], 'ext':[]}, {'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p'], 'ext':[]}, {'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p'], 'ext':[]} {'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p', '1080p'], 'ext':[]}
] ]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr'] pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
threed_tags = { threed_tags = {
@@ -240,7 +240,7 @@ class QualityPlugin(Plugin):
# Add additional size score if only 1 size validated # Add additional size score if only 1 size validated
if len(size_scores) == 1: if len(size_scores) == 1:
self.calcScore(score, size_scores[0], 8) self.calcScore(score, size_scores[0], 7)
del size_scores del size_scores
# Return nothing if all scores are <= 0 # Return nothing if all scores are <= 0
@@ -278,6 +278,8 @@ class QualityPlugin(Plugin):
'ext': 5, 'ext': 5,
} }
scored_on = []
# Check alt and tags # Check alt and tags
for tag_type in ['identifier', 'alternative', 'tags', 'label']: for tag_type in ['identifier', 'alternative', 'tags', 'label']:
qualities = quality.get(tag_type, []) qualities = quality.get(tag_type, [])
@@ -289,10 +291,13 @@ class QualityPlugin(Plugin):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type) score += points.get(tag_type)
if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words: if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words and ss(alt.lower()) not in scored_on:
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type) score += points.get(tag_type)
# Don't score twice on same tag
scored_on.append(ss(alt).lower())
# Check extention # Check extention
for ext in quality.get('ext', []): for ext in quality.get('ext', []):
if ext == extension: if ext == extension:
@@ -485,6 +490,8 @@ class QualityPlugin(Plugin):
'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'}, 'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'},
'Movie Name.2014.720p Web-Dl Aac2.0 h264-ReleaseGroup': {'size': 3800, 'quality': 'brrip'}, 'Movie Name.2014.720p Web-Dl Aac2.0 h264-ReleaseGroup': {'size': 3800, 'quality': 'brrip'},
'Movie Name.2014.720p.WEBRip.x264.AC3-ReleaseGroup': {'size': 3000, 'quality': 'scr'}, 'Movie Name.2014.720p.WEBRip.x264.AC3-ReleaseGroup': {'size': 3000, 'quality': 'scr'},
'Movie.Name.2014.1080p.HDCAM.-.ReleaseGroup': {'size': 5300, 'quality': 'cam'},
'Movie.Name.2014.720p.HDSCR.4PARTS.MP4.AAC.ReleaseGroup': {'size': 2401, 'quality': 'scr'},
} }
correct = 0 correct = 0
+6 -4
View File
@@ -187,7 +187,7 @@ class Release(Plugin):
release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v) release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v)
db.update(release) db.update(release)
fireEvent('media.restatus', media['_id'], single = True) fireEvent('media.restatus', media['_id'], allowed_restatus = ['done'], single = True)
return True return True
except: except:
@@ -389,8 +389,8 @@ class Release(Plugin):
log.info('Ignored: %s', rel['name']) log.info('Ignored: %s', rel['name'])
continue continue
if rel['score'] <= 0: if rel['score'] < quality_custom.get('minimum_score'):
log.info('Ignored, score "%s" to low: %s', (rel['score'], rel['name'])) log.info('Ignored, score "%s" to low, need at least "%s": %s', (rel['score'], quality_custom.get('minimum_score'), rel['name']))
continue continue
if rel['size'] <= 50: if rel['size'] <= 50:
@@ -441,7 +441,6 @@ class Release(Plugin):
for rel in search_results: for rel in search_results:
rel_identifier = md5(rel['url']) rel_identifier = md5(rel['url'])
found_releases.append(rel_identifier)
release = { release = {
'_t': 'release', '_t': 'release',
@@ -482,6 +481,9 @@ class Release(Plugin):
# Update release in search_results # Update release in search_results
rel['status'] = rls.get('status') rel['status'] = rls.get('status')
if rel['status'] == 'available':
found_releases.append(rel_identifier)
return found_releases return found_releases
except: except:
log.error('Failed: %s', traceback.format_exc()) log.error('Failed: %s', traceback.format_exc())
+35 -5
View File
@@ -35,6 +35,7 @@ class Renamer(Plugin):
'desc': 'For the renamer to check for new files to rename in a folder', 'desc': 'For the renamer to check for new files to rename in a folder',
'params': { 'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'}, 'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'to_folder': {'desc': 'Optional: The folder to move releases to. Leave empty for default folder.'},
'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'}, 'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'}, 'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'}, 'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'},
@@ -44,6 +45,13 @@ class Renamer(Plugin):
}, },
}) })
addApiView('renamer.progress', self.getProgress, docs = {
'desc': 'Get the progress of current renamer scan',
'return': {'type': 'object', 'example': """{
'progress': False || True,
}"""},
})
addEvent('renamer.scan', self.scan) addEvent('renamer.scan', self.scan)
addEvent('renamer.check_snatched', self.checkSnatched) addEvent('renamer.check_snatched', self.checkSnatched)
@@ -67,11 +75,17 @@ class Renamer(Plugin):
return True return True
def getProgress(self, **kwargs):
return {
'progress': self.renaming_started
}
def scanView(self, **kwargs): def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0)) async = tryInt(kwargs.get('async', 0))
base_folder = kwargs.get('base_folder') base_folder = kwargs.get('base_folder')
media_folder = sp(kwargs.get('media_folder')) media_folder = sp(kwargs.get('media_folder'))
to_folder = kwargs.get('to_folder')
# Backwards compatibility, to be removed after a few versions :) # Backwards compatibility, to be removed after a few versions :)
if not media_folder: if not media_folder:
@@ -95,13 +109,13 @@ class Renamer(Plugin):
}) })
fire_handle = fireEvent if not async else fireEventAsync fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download) fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download, to_folder = to_folder)
return { return {
'success': True 'success': True
} }
def scan(self, base_folder = None, release_download = None): def scan(self, base_folder = None, release_download = None, to_folder = None):
if not release_download: release_download = {} if not release_download: release_download = {}
if self.isDisabled(): if self.isDisabled():
@@ -115,6 +129,8 @@ class Renamer(Plugin):
base_folder = sp(self.conf('from')) base_folder = sp(self.conf('from'))
from_folder = sp(self.conf('from')) from_folder = sp(self.conf('from'))
if not to_folder:
to_folder = sp(self.conf('to')) to_folder = sp(self.conf('to'))
# Get media folder to process # Get media folder to process
@@ -220,10 +236,14 @@ class Renamer(Plugin):
nfo_name = self.conf('nfo_name') nfo_name = self.conf('nfo_name')
separator = self.conf('separator') separator = self.conf('separator')
if len(file_name) == 0:
log.error('Please fill in the filename option under renamer settings. Forcing it on <original>.<ext> to keep the same name as source file.')
file_name = '<original>.<ext>'
cd_keys = ['<cd>','<cd_nr>', '<original>'] cd_keys = ['<cd>','<cd_nr>', '<original>']
if not any(x in folder_name for x in cd_keys) and not any(x in file_name for x in cd_keys): if not any(x in folder_name for x in cd_keys) and not any(x in file_name for x in cd_keys):
log.error('Missing `cd` or `cd_nr` in the renamer. This will cause multi-file releases of being renamed to the same file.' log.error('Missing `cd` or `cd_nr` in the renamer. This will cause multi-file releases of being renamed to the same file. '
'Force adding it') 'Please add it in the renamer settings. Force adding it for now.')
file_name = '%s %s' % ('<cd>', file_name) file_name = '%s %s' % ('<cd>', file_name)
# Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader. # Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader.
@@ -791,7 +811,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
dest = sp(dest) dest = sp(dest)
try: try:
if os.path.exists(dest): if os.path.exists(dest) and os.path.isfile(dest):
raise Exception('Destination "%s" already exists' % dest) raise Exception('Destination "%s" already exists' % dest)
move_type = self.conf('file_action') move_type = self.conf('file_action')
@@ -865,7 +885,9 @@ Remove it if you want it to be renamed (again, or at least let it try again)
#If information is not available, we don't want the tag in the filename #If information is not available, we don't want the tag in the filename
replaced = replaced.replace('<' + x + '>', '') replaced = replaced.replace('<' + x + '>', '')
if self.conf('replace_doubles'):
replaced = self.replaceDoubles(replaced.lstrip('. ')) replaced = self.replaceDoubles(replaced.lstrip('. '))
for x, r in replacements.items(): for x, r in replacements.items():
if x in ['thename', 'namethe']: if x in ['thename', 'namethe']:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
@@ -1322,6 +1344,14 @@ config = [{
'type': 'choice', 'type': 'choice',
'options': rename_options 'options': rename_options
}, },
{
'advanced': True,
'name': 'replace_doubles',
'type': 'bool',
'label': 'Clean Name',
'description': ('Attempt to clean up double separaters due to missing data for fields.','Sometimes this eliminates wanted white space (see <a href="https://github.com/RuudBurger/CouchPotatoServer/issues/2782">#2782</a>).'),
'default': True
},
{ {
'name': 'unrar', 'name': 'unrar',
'type': 'bool', 'type': 'bool',
+3 -4
View File
@@ -63,8 +63,8 @@ class Scanner(Plugin):
} }
file_sizes = { # in MB file_sizes = { # in MB
'movie': {'min': 300}, 'movie': {'min': 200},
'trailer': {'min': 2, 'max': 250}, 'trailer': {'min': 2, 'max': 199},
'backdrop': {'min': 0, 'max': 5}, 'backdrop': {'min': 0, 'max': 5},
} }
@@ -131,7 +131,7 @@ class Scanner(Plugin):
addEvent('scanner.name_year', self.getReleaseNameYear) addEvent('scanner.name_year', self.getReleaseNameYear)
addEvent('scanner.partnumber', self.getPartNumber) addEvent('scanner.partnumber', self.getPartNumber)
def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None): def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, check_file_date = True, on_found = None):
folder = sp(folder) folder = sp(folder)
@@ -145,7 +145,6 @@ class Scanner(Plugin):
# Scan all files of the folder if no files are set # Scan all files of the folder if no files are set
if not files: if not files:
check_file_date = True
try: try:
files = [] files = []
for root, dirs, walk_files in os.walk(folder, followlinks=True): for root, dirs, walk_files in os.walk(folder, followlinks=True):
+1 -1
View File
@@ -16,7 +16,7 @@ autoload = 'Subtitle'
class Subtitle(Plugin): class Subtitle(Plugin):
services = ['opensubtitles', 'thesubdb', 'subswiki', 'podnapisi'] services = ['opensubtitles', 'thesubdb', 'subswiki', 'subscenter']
def __init__(self): def __init__(self):
addEvent('renamer.before', self.searchSingle) addEvent('renamer.before', self.searchSingle)
+8
View File
@@ -157,7 +157,15 @@ class Settings(object):
values[section] = {} values[section] = {}
for option in self.p.items(section): for option in self.p.items(section):
(option_name, option_value) = option (option_name, option_value) = option
is_password = False
try: is_password = self.types[section][option_name] == 'password'
except: pass
values[section][option_name] = self.get(option_name, section) values[section][option_name] = self.get(option_name, section)
if is_password and values[section][option_name]:
values[section][option_name] = len(values[section][option_name]) * '*'
return values return values
def save(self): def save(self):
-1
View File
@@ -14,7 +14,6 @@ class Env(object):
''' Environment variables ''' ''' Environment variables '''
_app = None _app = None
_encoding = 'UTF-8' _encoding = 'UTF-8'
_fs_encoding = 'UTF-8'
_debug = False _debug = False
_dev = False _dev = False
_settings = Settings() _settings = Settings()
+10 -4
View File
@@ -86,7 +86,6 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
encoding = 'UTF-8' encoding = 'UTF-8'
Env.set('encoding', encoding) Env.set('encoding', encoding)
Env.set('fs_encoding', sys.getfilesystemencoding())
# Do db stuff # Do db stuff
db_path = sp(os.path.join(data_dir, 'database')) db_path = sp(os.path.join(data_dir, 'database'))
@@ -117,7 +116,8 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
# Delete non zip files # Delete non zip files
if len(ints) != 1: if len(ints) != 1:
os.remove(os.path.join(root, backup_file)) try: os.remove(os.path.join(root, backup_file))
except: pass
else: else:
existing_backups.append((int(ints[0]), backup_file)) existing_backups.append((int(ints[0]), backup_file))
else: else:
@@ -205,7 +205,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
logger.addHandler(hdlr) logger.addHandler(hdlr)
# To file # To file
hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = 'utf-8') hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding'))
hdlr2.setFormatter(formatter) hdlr2.setFormatter(formatter)
logger.addHandler(hdlr2) logger.addHandler(hdlr2)
@@ -244,11 +244,13 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
# Basic config # Basic config
host = Env.setting('host', default = '0.0.0.0') host = Env.setting('host', default = '0.0.0.0')
# app.debug = development host6 = Env.setting('host6', default = '::')
config = { config = {
'use_reloader': reloader, 'use_reloader': reloader,
'port': tryInt(Env.setting('port', default = 5050)), 'port': tryInt(Env.setting('port', default = 5050)),
'host': host if host and len(host) > 0 else '0.0.0.0', 'host': host if host and len(host) > 0 else '0.0.0.0',
'host6': host6 if host6 and len(host6) > 0 else '::',
'ssl_cert': Env.setting('ssl_cert', default = None), 'ssl_cert': Env.setting('ssl_cert', default = None),
'ssl_key': Env.setting('ssl_key', default = None), 'ssl_key': Env.setting('ssl_key', default = None),
} }
@@ -331,6 +333,10 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
while try_restart: while try_restart:
try: try:
server.listen(config['port'], config['host']) server.listen(config['port'], config['host'])
try: server.listen(config['port'], config['host6'])
except: log.info2('Tried to bind to IPV6 but failed')
loop.start() loop.start()
server.close_all_connections() server.close_all_connections()
server.stop() server.stop()
+11 -4
View File
@@ -54,16 +54,22 @@
}, },
pushState: function(e){ pushState: function(e){
if((!e.meta && Browser.platform.mac) || (!e.control && !Browser.platform.mac)){ var self = this;
if((!e.meta && self.isMac()) || (!e.control && !self.isMac())){
(e).preventDefault(); (e).preventDefault();
var url = e.target.get('href'); var url = e.target.get('href');
if(History.getPath() != url)
// Middle click
if(e.event && e.event.button == 1)
window.open(url);
else if(History.getPath() != url)
History.push(url); History.push(url);
} }
}, },
isMac: function(){ isMac: function(){
return Browser.platform.mac return Browser.platform == 'mac'
}, },
createLayout: function(){ createLayout: function(){
@@ -325,11 +331,12 @@
}, },
openDerefered: function(e, el){ openDerefered: function(e, el){
var self = this;
(e).stop(); (e).stop();
var url = 'http://www.dereferer.org/?' + el.get('href'); var url = 'http://www.dereferer.org/?' + el.get('href');
if(el.get('target') == '_blank' || (e.meta && Browser.platform.mac) || (e.control && !Browser.platform.mac)) if(el.get('target') == '_blank' || (e.meta && self.isMac()) || (e.control && !self.isMac()))
window.open(url); window.open(url);
else else
window.location = url; window.location = url;
+1 -1
View File
@@ -117,7 +117,7 @@ var AboutSettingTab = new Class({
var self = this; var self = this;
var date = new Date(json.version.date * 1000); var date = new Date(json.version.date * 1000);
self.version_text.set('text', json.version.hash + (json.version.date ? ' ('+date.toLocaleString()+')' : '')); self.version_text.set('text', json.version.hash + (json.version.date ? ' ('+date.toLocaleString()+')' : ''));
self.updater_type.set('text', json.version.type + ', ' + json.branch); self.updater_type.set('text', (json.version.type != json.branch) ? (json.version.type + ', ' + json.branch) : json.branch);
} }
}); });
@@ -886,6 +886,9 @@ Option.Directory = new Class({
'text': 'Selected folder is empty' 'text': 'Selected folder is empty'
}).inject(self.dir_list) }).inject(self.dir_list)
//fix for webkit type browsers to refresh the dom for the file browser
//http://stackoverflow.com/questions/3485365/how-can-i-force-webkit-to-redraw-repaint-to-propagate-style-changes
self.dir_list.setStyle('webkitTransform', 'scale(1)');
self.caretAtEnd(); self.caretAtEnd();
}, },
+1
View File
@@ -6,6 +6,7 @@
<meta name="apple-mobile-web-app-capable" content="yes"> <meta name="apple-mobile-web-app-capable" content="yes">
<meta name="mobile-web-app-capable" content="yes"> <meta name="mobile-web-app-capable" content="yes">
<meta http-equiv="X-UA-Compatible" content="IE=edge" /> <meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="robots" content="noindex, nofollow" />
{% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'front', single = True) %} {% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'front', single = True) %}
<link rel="stylesheet" href="{{ Env.get('web_base') }}{{ url }}" type="text/css">{% end %} <link rel="stylesheet" href="{{ Env.get('web_base') }}{{ url }}" type="text/css">{% end %}
+6 -4
View File
@@ -33,8 +33,8 @@ DESC=CouchPotato
## ##
## CP_USER= #$RUN_AS, username to run couchpotato under, the default is couchpotato ## CP_USER= #$RUN_AS, username to run couchpotato under, the default is couchpotato
## CP_HOME= #$APP_PATH, the location of couchpotato.py, the default is /opt/couchpotato ## CP_HOME= #$APP_PATH, the location of couchpotato.py, the default is /opt/couchpotato
## CP_DATA= #$DATA_DIR, the location of couchpotato.db, cache, logs, the default is /var/couchpotato ## CP_DATA= #$DATA_DIR, the location of couchpotato.db, cache, logs, the default is /var/opt/couchpotato
## CP_PIDFILE= #$PID_FILE, the location of couchpotato.pid, the default is /var/run/couchpotato.pid ## CP_PIDFILE= #$PID_FILE, the location of couchpotato.pid, the default is /var/run/couchpotato/couchpotato.pid
## PYTHON_BIN= #$DAEMON, the location of the python binary, the default is /usr/bin/python ## PYTHON_BIN= #$DAEMON, the location of the python binary, the default is /usr/bin/python
## CP_OPTS= #$EXTRA_DAEMON_OPTS, extra cli option for couchpotato, i.e. " --config_file=/home/couchpotato/couchpotato.ini" ## CP_OPTS= #$EXTRA_DAEMON_OPTS, extra cli option for couchpotato, i.e. " --config_file=/home/couchpotato/couchpotato.ini"
## SSD_OPTS= #$EXTRA_SSD_OPTS, extra start-stop-daemon option like " --group=users" ## SSD_OPTS= #$EXTRA_SSD_OPTS, extra start-stop-daemon option like " --group=users"
@@ -51,10 +51,10 @@ RUN_AS=${CP_USER-couchpotato}
APP_PATH=${CP_HOME-/opt/couchpotato/} APP_PATH=${CP_HOME-/opt/couchpotato/}
# Data directory where couchpotato.db, cache and logs are stored # Data directory where couchpotato.db, cache and logs are stored
DATA_DIR=${CP_DATA-/var/couchpotato} DATA_DIR=${CP_DATA-/var/opt/couchpotato}
# Path to store PID file # Path to store PID file
PID_FILE=${CP_PIDFILE-/var/run/couchpotato.pid} PID_FILE=${CP_PIDFILE-/var/run/couchpotato/couchpotato.pid}
# path to python bin # path to python bin
DAEMON=${PYTHON_BIN-/usr/bin/python} DAEMON=${PYTHON_BIN-/usr/bin/python}
@@ -95,6 +95,8 @@ fi
case "$1" in case "$1" in
start) start)
touch $PID_FILE
chown $RUN_AS $PID_FILE
echo "Starting $DESC" echo "Starting $DESC"
start-stop-daemon -d $APP_PATH -c $RUN_AS $EXTRA_SSD_OPTS --start --pidfile $PID_FILE --exec $DAEMON -- $DAEMON_OPTS start-stop-daemon -d $APP_PATH -c $RUN_AS $EXTRA_SSD_OPTS --start --pidfile $PID_FILE --exec $DAEMON -- $DAEMON_OPTS
;; ;;
+1 -1
View File
@@ -15,7 +15,7 @@
# 02110-1301 USA # 02110-1301 USA
######################### END LICENSE BLOCK ######################### ######################### END LICENSE BLOCK #########################
__version__ = "2.2.1" __version__ = "2.3.0"
from sys import version_info from sys import version_info
+49 -15
View File
@@ -12,34 +12,68 @@ Example::
If no paths are provided, it takes its input from stdin. If no paths are provided, it takes its input from stdin.
""" """
from io import open
from sys import argv, stdin
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'): def description_of(lines, name='stdin'):
"""Return a string describing the probable encoding of a file.""" """
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector() u = UniversalDetector()
for line in file: for line in lines:
u.feed(line) u.feed(line)
u.close() u.close()
result = u.result result = u.result
if result['encoding']: if result['encoding']:
return '%s: %s with confidence %s' % (name, return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['encoding'],
result['confidence']) result['confidence'])
else: else:
return '%s: no result' % name return '{0}: no result'.format(name)
def main(): def main(argv=None):
if len(argv) <= 1: '''
print(description_of(stdin)) Handles command line arguments and gets things started.
else:
for path in argv[1:]: :param argv: List of arguments, as if specified on the command-line.
with open(path, 'rb') as f: If None, ``sys.argv[1:]`` is used instead.
print(description_of(f, path)) :type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__': if __name__ == '__main__':
+8
View File
@@ -177,6 +177,12 @@ class JapaneseContextAnalysis:
return -1, 1 return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis): class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf): def get_order(self, aBuf):
if not aBuf: if not aBuf:
return -1, 1 return -1, 1
@@ -184,6 +190,8 @@ class SJISContextAnalysis(JapaneseContextAnalysis):
first_char = wrap_ord(aBuf[0]) first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2 charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else: else:
charLen = 1 charLen = 1
+3 -3
View File
@@ -129,11 +129,11 @@ class Latin1Prober(CharSetProber):
if total < 0.01: if total < 0.01:
confidence = 0.0 confidence = 0.0
else: else:
confidence = ((self._mFreqCounter[3] / total) confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
- (self._mFreqCounter[1] * 20.0 / total)) / total)
if confidence < 0.0: if confidence < 0.0:
confidence = 0.0 confidence = 0.0
# lower the confidence of latin1 so that other more accurate # lower the confidence of latin1 so that other more accurate
# detector can take priority. # detector can take priority.
confidence = confidence * 0.5 confidence = confidence * 0.73
return confidence return confidence
+3 -6
View File
@@ -353,7 +353,7 @@ SJIS_cls = (
2,2,2,2,2,2,2,2, # 68 - 6f 2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77 2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f 2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87 3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f 3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97 3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f 3,3,3,3,3,3,3,3, # 98 - 9f
@@ -369,9 +369,8 @@ SJIS_cls = (
2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7 3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef 3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7 3,3,3,3,3,3,3,3, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff 3,3,3,3,3,0,0,0) # f8 - ff
)
SJIS_st = ( SJIS_st = (
@@ -571,5 +570,3 @@ UTF8SMModel = {'classTable': UTF8_cls,
'stateTable': UTF8_st, 'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable, 'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'} 'name': 'UTF-8'}
# flake8: noqa
+1 -1
View File
@@ -47,7 +47,7 @@ class SJISProber(MultiByteCharSetProber):
self._mContextAnalyzer.reset() self._mContextAnalyzer.reset()
def get_charset_name(self): def get_charset_name(self):
return "SHIFT_JIS" return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf): def feed(self, aBuf):
aLen = len(aBuf) aLen = len(aBuf)
+2 -2
View File
@@ -71,9 +71,9 @@ class UniversalDetector:
if not self._mGotData: if not self._mGotData:
# If the data starts with BOM, we know it is UTF # If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM: if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM # EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0} self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE: elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM # FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0} self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
View File
+272
View File
@@ -0,0 +1,272 @@
# -*- coding: utf-8 -*-
# Changed
# Removed iso8601 library requirement
# Added CP logging
import os
import re
import json
import webbrowser
from urllib import urlencode
from couchpotato import CPLog
from dateutil.parser import parse
import requests
BASE_URL = 'https://api.put.io/v2'
ACCESS_TOKEN_URL = 'https://api.put.io/v2/oauth2/access_token'
AUTHENTICATION_URL = 'https://api.put.io/v2/oauth2/authenticate'
log = CPLog(__name__)
class AuthHelper(object):
def __init__(self, client_id, client_secret, redirect_uri, type='code'):
self.client_id = client_id
self.client_secret = client_secret
self.callback_url = redirect_uri
self.type = type
@property
def authentication_url(self):
"""Redirect your users to here to authenticate them."""
params = {
'client_id': self.client_id,
'response_type': self.type,
'redirect_uri': self.callback_url
}
return AUTHENTICATION_URL + "?" + urlencode(params)
def open_authentication_url(self):
webbrowser.open(self.authentication_url)
def get_access_token(self, code):
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'redirect_uri': self.callback_url,
'code': code
}
response = requests.get(ACCESS_TOKEN_URL, params=params)
log.debug(response)
assert response.status_code == 200
return response.json()['access_token']
class Client(object):
def __init__(self, access_token):
self.access_token = access_token
self.session = requests.session()
# Keep resource classes as attributes of client.
# Pass client to resource classes so resource object
# can use the client.
attributes = {'client': self}
self.File = type('File', (_File,), attributes)
self.Transfer = type('Transfer', (_Transfer,), attributes)
self.Account = type('Account', (_Account,), attributes)
def request(self, path, method='GET', params=None, data=None, files=None,
headers=None, raw=False, stream=False):
"""
Wrapper around requests.request()
Prepends BASE_URL to path.
Inserts oauth_token to query params.
Parses response as JSON and returns it.
"""
if not params:
params = {}
if not headers:
headers = {}
# All requests must include oauth_token
params['oauth_token'] = self.access_token
headers['Accept'] = 'application/json'
url = BASE_URL + path
log.debug('url: %s', url)
response = self.session.request(
method, url, params=params, data=data, files=files,
headers=headers, allow_redirects=True, stream=stream)
log.debug('response: %s', response)
if raw:
return response
log.debug('content: %s', response.content)
try:
response = json.loads(response.content)
except ValueError:
raise Exception('Server didn\'t send valid JSON:\n%s\n%s' % (
response, response.content))
if response['status'] == 'ERROR':
raise Exception(response['error_type'])
return response
class _BaseResource(object):
client = None
def __init__(self, resource_dict):
"""Constructs the object from a dict."""
# All resources must have id and name attributes
self.id = None
self.name = None
self.__dict__.update(resource_dict)
try:
self.created_at = parse(self.created_at)
except AttributeError:
self.created_at = None
def __str__(self):
return self.name.encode('utf-8')
def __repr__(self):
# shorten name for display
name = self.name[:17] + '...' if len(self.name) > 20 else self.name
return '<%s id=%r, name="%r">' % (
self.__class__.__name__, self.id, name)
class _File(_BaseResource):
@classmethod
def get(cls, id):
d = cls.client.request('/files/%i' % id, method='GET')
t = d['file']
return cls(t)
@classmethod
def list(cls, parent_id=0):
d = cls.client.request('/files/list', params={'parent_id': parent_id})
files = d['files']
return [cls(f) for f in files]
@classmethod
def upload(cls, path, name=None, parent_id=0):
with open(path) as f:
if name:
files = {'file': (name, f)}
else:
files = {'file': f}
d = cls.client.request('/files/upload', method='POST',
data={'parent_id': parent_id}, files=files)
f = d['file']
return cls(f)
def dir(self):
"""List the files under directory."""
return self.list(parent_id=self.id)
def download(self, dest='.', delete_after_download=False):
if self.content_type == 'application/x-directory':
self._download_directory(dest, delete_after_download)
else:
self._download_file(dest, delete_after_download)
def _download_directory(self, dest='.', delete_after_download=False):
name = self.name
if isinstance(name, unicode):
name = name.encode('utf-8', 'replace')
dest = os.path.join(dest, name)
if not os.path.exists(dest):
os.mkdir(dest)
for sub_file in self.dir():
sub_file.download(dest, delete_after_download)
if delete_after_download:
self.delete()
def _download_file(self, dest='.', delete_after_download=False):
response = self.client.request(
'/files/%s/download' % self.id, raw=True, stream=True)
filename = re.match(
'attachment; filename=(.*)',
response.headers['content-disposition']).groups()[0]
# If file name has spaces, it must have quotes around.
filename = filename.strip('"')
with open(os.path.join(dest, filename), 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
if delete_after_download:
self.delete()
def delete(self):
return self.client.request('/files/delete', method='POST',
data={'file_ids': str(self.id)})
def move(self, parent_id):
return self.client.request('/files/move', method='POST',
data={'file_ids': str(self.id), 'parent_id': str(parent_id)})
def rename(self, name):
return self.client.request('/files/rename', method='POST',
data={'file_id': str(self.id), 'name': str(name)})
class _Transfer(_BaseResource):
@classmethod
def list(cls):
d = cls.client.request('/transfers/list')
transfers = d['transfers']
return [cls(t) for t in transfers]
@classmethod
def get(cls, id):
d = cls.client.request('/transfers/%i' % id, method='GET')
t = d['transfer']
return cls(t)
@classmethod
def add_url(cls, url, parent_id=0, extract=False, callback_url=None):
d = cls.client.request('/transfers/add', method='POST', data=dict(
url=url, save_parent_id=parent_id, extract=extract,
callback_url=callback_url))
t = d['transfer']
return cls(t)
@classmethod
def add_torrent(cls, path, parent_id=0, extract=False, callback_url=None):
with open(path) as f:
files = {'file': f}
d = cls.client.request('/files/upload', method='POST', files=files,
data=dict(save_parent_id=parent_id,
extract=extract,
callback_url=callback_url))
t = d['transfer']
return cls(t)
@classmethod
def clean(cls):
return cls.client.request('/transfers/clean', method='POST')
class _Account(_BaseResource):
@classmethod
def info(cls):
return cls.client.request('/account/info', method='GET')
@classmethod
def settings(cls):
return cls.client.request('/account/settings', method='GET')
-134
View File
@@ -1,134 +0,0 @@
from xml.dom.minidom import parseString
from httplib import HTTPSConnection
from urllib import urlencode
__version__ = "0.1"
API_SERVER = 'notifymywindowsphone.com'
ADD_PATH = '/publicapi/notify'
USER_AGENT = "PyNMWP/v%s" % __version__
def uniq_preserve(seq): # Dave Kirby
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def uniq(seq):
# Not order preserving
return {}.fromkeys(seq).keys()
class PyNMWP(object):
"""PyNMWP(apikey=[], developerkey=None)
takes 2 optional arguments:
- (opt) apykey: might me a string containing 1 key or an array of keys
- (opt) developerkey: where you can store your developer key
"""
def __init__(self, apikey = [], developerkey = None):
self._developerkey = None
self.developerkey(developerkey)
if apikey:
if type(apikey) == str:
apikey = [apikey]
self._apikey = uniq(apikey)
def addkey(self, key):
"Add a key (register ?)"
if type(key) == str:
if not key in self._apikey:
self._apikey.append(key)
elif type(key) == list:
for k in key:
if not k in self._apikey:
self._apikey.append(k)
def delkey(self, key):
"Removes a key (unregister ?)"
if type(key) == str:
if key in self._apikey:
self._apikey.remove(key)
elif type(key) == list:
for k in key:
if key in self._apikey:
self._apikey.remove(k)
def developerkey(self, developerkey):
"Sets the developer key (and check it has the good length)"
if type(developerkey) == str and len(developerkey) == 48:
self._developerkey = developerkey
def push(self, application = "", event = "", description = "", url = "", priority = 0, batch_mode = False):
"""Pushes a message on the registered API keys.
takes 5 arguments:
- (req) application: application name [256]
- (req) event: event name [1000]
- (req) description: description [10000]
- (opt) url: url [512]
- (opt) priority: from -2 (lowest) to 2 (highest) (def:0)
- (opt) batch_mode: call API 5 by 5 (def:False)
Warning: using batch_mode will return error only if all API keys are bad
cf: http://nma.usk.bz/api.php
"""
datas = {
'application': application[:256].encode('utf8'),
'event': event[:1024].encode('utf8'),
'description': description[:10000].encode('utf8'),
'priority': priority
}
if url:
datas['url'] = url[:512]
if self._developerkey:
datas['developerkey'] = self._developerkey
results = {}
if not batch_mode:
for key in self._apikey:
datas['apikey'] = key
res = self.callapi('POST', ADD_PATH, datas)
results[key] = res
else:
for i in range(0, len(self._apikey), 5):
datas['apikey'] = ",".join(self._apikey[i:i + 5])
res = self.callapi('POST', ADD_PATH, datas)
results[datas['apikey']] = res
return results
def callapi(self, method, path, args):
headers = { 'User-Agent': USER_AGENT }
if method == "POST":
headers['Content-type'] = "application/x-www-form-urlencoded"
http_handler = HTTPSConnection(API_SERVER)
http_handler.request(method, path, urlencode(args), headers)
resp = http_handler.getresponse()
try:
res = self._parse_reponse(resp.read())
except Exception, e:
res = {'type': "pynmwperror",
'code': 600,
'message': str(e)
}
pass
return res
def _parse_reponse(self, response):
root = parseString(response).firstChild
for elem in root.childNodes:
if elem.nodeType == elem.TEXT_NODE: continue
if elem.tagName == 'success':
res = dict(elem.attributes.items())
res['message'] = ""
res['type'] = elem.tagName
return res
if elem.tagName == 'error':
res = dict(elem.attributes.items())
res['message'] = elem.firstChild.nodeValue
res['type'] = elem.tagName
return res
+4 -4
View File
@@ -13,7 +13,7 @@ Requests is an HTTP library, written in Python, for human beings. Basic GET
usage: usage:
>>> import requests >>> import requests
>>> r = requests.get('http://python.org') >>> r = requests.get('https://www.python.org')
>>> r.status_code >>> r.status_code
200 200
>>> 'Python is a programming language' in r.content >>> 'Python is a programming language' in r.content
@@ -22,7 +22,7 @@ usage:
... or POST: ... or POST:
>>> payload = dict(key1='value1', key2='value2') >>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload) >>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text) >>> print(r.text)
{ {
... ...
@@ -42,8 +42,8 @@ is at <http://python-requests.org>.
""" """
__title__ = 'requests' __title__ = 'requests'
__version__ = '2.4.0' __version__ = '2.5.1'
__build__ = 0x020400 __build__ = 0x020501
__author__ = 'Kenneth Reitz' __author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0' __license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Kenneth Reitz' __copyright__ = 'Copyright 2014 Kenneth Reitz'
+27 -13
View File
@@ -15,19 +15,21 @@ from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url) prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError) ProxyError, RetryError)
from .auth import _basic_auth_str from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False DEFAULT_POOLBLOCK = False
@@ -59,8 +61,12 @@ class HTTPAdapter(BaseAdapter):
:param pool_connections: The number of urllib3 connection pools to cache. :param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool. :param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection :param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and should attempt. Note, this applies only to failed DNS lookups, socket
timeouts, never to requests where the server returns a response. connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections. :param pool_block: Whether the connection pool should block for connections.
Usage:: Usage::
@@ -76,7 +82,10 @@ class HTTPAdapter(BaseAdapter):
def __init__(self, pool_connections=DEFAULT_POOLSIZE, def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK): pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {} self.config = {}
self.proxy_manager = {} self.proxy_manager = {}
@@ -122,7 +131,7 @@ class HTTPAdapter(BaseAdapter):
self._pool_block = block self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, **pool_kwargs) block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs): def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy. """Return urllib3 ProxyManager for the given proxy.
@@ -269,7 +278,7 @@ class HTTPAdapter(BaseAdapter):
proxy = proxies.get(scheme) proxy = proxies.get(scheme)
if proxy and scheme != 'https': if proxy and scheme != 'https':
url, _ = urldefrag(request.url) url = urldefragauth(request.url)
else: else:
url = request.path_url url = request.path_url
@@ -316,8 +325,10 @@ class HTTPAdapter(BaseAdapter):
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content. :param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request. :param timeout: (optional) How long to wait for the server to send
:type timeout: float or tuple (connect timeout, read timeout), eg (3.1, 20) data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates. :param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted. :param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request. :param proxies: (optional) The proxies dictionary to apply to the request.
@@ -355,7 +366,7 @@ class HTTPAdapter(BaseAdapter):
assert_same_host=False, assert_same_host=False,
preload_content=False, preload_content=False,
decode_content=False, decode_content=False,
retries=Retry(self.max_retries, read=False), retries=self.max_retries,
timeout=timeout timeout=timeout
) )
@@ -400,13 +411,16 @@ class HTTPAdapter(BaseAdapter):
# All is well, return the connection to the pool. # All is well, return the connection to the pool.
conn._put_conn(low_conn) conn._put_conn(low_conn)
except socket.error as sockerr: except (ProtocolError, socket.error) as err:
raise ConnectionError(sockerr, request=request) raise ConnectionError(err, request=request)
except MaxRetryError as e: except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError): if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request) raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request) raise ConnectionError(e, request=request)
except _ProxyError as e: except _ProxyError as e:
+16 -5
View File
@@ -22,12 +22,17 @@ def request(method, url, **kwargs):
:param url: URL for the new :class:`Request` object. :param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request in seconds. :param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded. :param stream: (optional) if ``False``, the response content will be immediately downloaded.
@@ -41,7 +46,12 @@ def request(method, url, **kwargs):
""" """
session = sessions.Session() session = sessions.Session()
return session.request(method=method, url=url, **kwargs) response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, **kwargs): def get(url, **kwargs):
@@ -77,15 +87,16 @@ def head(url, **kwargs):
return request('head', url, **kwargs) return request('head', url, **kwargs)
def post(url, data=None, **kwargs): def post(url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object. """Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object. :param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes. :param \*\*kwargs: Optional arguments that ``request`` takes.
""" """
return request('post', url, data=data, **kwargs) return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs): def put(url, data=None, **kwargs):
+15 -3
View File
@@ -17,6 +17,7 @@ from base64 import b64encode
from .compat import urlparse, str from .compat import urlparse, str
from .cookies import extract_cookies_to_jar from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data' CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
@@ -66,6 +67,7 @@ class HTTPDigestAuth(AuthBase):
self.nonce_count = 0 self.nonce_count = 0
self.chal = {} self.chal = {}
self.pos = None self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url): def build_digest_header(self, method, url):
@@ -150,6 +152,11 @@ class HTTPDigestAuth(AuthBase):
return 'Digest %s' % (base) return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs): def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed.""" """Takes the given response and tries digest-auth, if needed."""
@@ -162,7 +169,7 @@ class HTTPDigestAuth(AuthBase):
if 'digest' in s_auth.lower() and num_401_calls < 2: if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1) self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE) pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1)) self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
@@ -182,7 +189,7 @@ class HTTPDigestAuth(AuthBase):
return _r return _r
setattr(self, 'num_401_calls', 1) self.num_401_calls = 1
return r return r
def __call__(self, r): def __call__(self, r):
@@ -192,6 +199,11 @@ class HTTPDigestAuth(AuthBase):
try: try:
self.pos = r.body.tell() self.pos = r.body.tell()
except AttributeError: except AttributeError:
pass # In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401) r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r return r
+2 -2
View File
@@ -4,7 +4,7 @@
pythoncompat pythoncompat
""" """
from .packages import chardet import chardet
import sys import sys
@@ -76,7 +76,7 @@ is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try: try:
import simplejson as json import simplejson as json
except (ImportError, SyntaxError): except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it thows a SyntaxError # simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals. # because of u'...' Unicode literals.
import json import json

Some files were not shown because too many files have changed in this diff Show More