Compare commits

..

440 Commits

Author SHA1 Message Date
Ruud
965b8089f1 Don't error out on empty date 2015-02-22 18:07:43 +01:00
Ruud
0bd953409e One up! 2015-02-22 17:43:58 +01:00
Ruud
32cf40c121 Merge branch 'master' into desktop 2015-02-22 17:42:59 +01:00
Ruud
f46cda99ce Merge branch 'develop' 2015-02-22 17:42:37 +01:00
Ruud
adb744a526 Don't show double updater type 2015-02-22 17:42:29 +01:00
Ruud
be1065641c Merge branch 'master' into desktop
# Conflicts:
#	installer.iss
#	setup.py
#	version.py
2015-02-22 17:37:20 +01:00
Ruud
a96e924916 Merge branch 'develop' 2015-02-22 16:24:17 +01:00
Ruud
0f82cda811 Remove podnapisi from subtile list 2015-02-22 16:09:22 +01:00
Ruud
0d6c3c8ecb Yify, only use data when available 2015-02-22 16:06:07 +01:00
Ruud
6598f53fd4 Quality check improve 2015-02-22 15:55:54 +01:00
Ruud
6b8458d87f Hadouken apikey check not using correct settingskey
fix #4674
2015-02-22 14:49:37 +01:00
Ruud
99a0621238 Use keep-alive connection 2015-02-22 14:30:50 +01:00
Ruud Burger
c52666309a Merge pull request #4676 from peerster/develop
Update torrentshack with new URL
2015-02-22 13:39:21 +01:00
Ruud
84a458d40b Add user-agent and type to omdbapi 2015-02-22 13:06:29 +01:00
Ruud
f8631c6d53 Add extra category for TorrentLeech
fix #4683
2015-02-21 21:29:37 +01:00
Ruud
b19b0775c7 Force update to new poster on refresh
fix #4671
2015-02-20 22:16:12 +01:00
peerster
2dc1c1dd38 Update torrentshack with new URL 2015-02-19 20:07:22 +01:00
Ruud
55288ad648 Merge branch 'develop' 2015-02-18 17:21:39 +01:00
Ruud
7db8b233c8 Don't decode string if confidence isn't high enough 2015-02-18 17:21:24 +01:00
Ruud Burger
d5627c47f9 Dummy commit
For all source people who got "desktop" version by accident
2015-02-17 20:09:57 +01:00
Ruud
a9e62c4461 Merge branch 'develop' 2015-02-16 16:24:21 +01:00
Ruud
0088584a50 Revert "Merge branch 'desktop' of github.com:RuudBurger/CouchPotatoServer"
This reverts commit 40cd1b4e08, reversing
changes made to 5b1dd68675.
2015-02-16 16:23:02 +01:00
Ruud
b20a590aab One up! 2015-02-15 20:37:00 +01:00
Ruud
f08f2b0339 Merge branch 'develop' 2015-02-15 19:25:45 +01:00
Ruud
427c77a9ef Remove podnapisi 2015-02-15 19:23:45 +01:00
Ruud
a4dff7a331 Merge branch 'develop' 2015-02-15 12:04:35 +01:00
Ruud
40cd1b4e08 Merge branch 'desktop' of github.com:RuudBurger/CouchPotatoServer 2015-02-15 12:04:30 +01:00
Ruud
94c3969f10 Use https for yify proxy 2015-02-10 20:52:15 +01:00
Ruud
debd1855dd Move Yify to v2 2015-02-10 20:47:19 +01:00
Ruud
9f77597c11 Torrentz search on title
fix #4510
2015-02-10 17:15:53 +01:00
Ruud
afc9039625 Also search lower qualities on OMGWTF
fix #4527
2015-02-10 16:50:53 +01:00
Ruud
920d3cb44e Don't verify SYNO downloader thingymajig
fix #4641
2015-02-10 16:27:13 +01:00
Ruud
b1fc8ad862 Letterboxed new html markup
fix #4640
2015-02-10 16:21:32 +01:00
Ruud
11b9bc39ab Show tried to often error for TD 2015-02-10 15:40:55 +01:00
Ruud
6dcb3f3bf2 Change bitsoup category id
fixes #4629
2015-02-10 14:55:22 +01:00
Ruud
ce768f45c5 Make RottenTomato logging more clear
close #4618
2015-02-10 14:36:54 +01:00
Ruud
9b91d1d6c0 Remove favor, link to api key page 2015-02-10 14:10:55 +01:00
Ruud
d9c7a97604 Merge branch 'develop' of git://github.com/jonnyboy/CouchPotatoServer into jonnyboy-develop 2015-02-10 14:03:06 +01:00
Ruud
0fd01aa697 Cleanup 2015-02-10 14:01:51 +01:00
Ruud
58615e6f9b Merge branch 'develop' of git://github.com/grasshide/CouchPotatoServer into grasshide-develop 2015-02-10 13:54:13 +01:00
Ruud
2277322e57 Traceback import missing 2015-02-10 13:47:22 +01:00
Ruud Burger
18020e609e Merge pull request #4479 from sjlu/develop
Adding the ability to receive notifications through Webhooks
2015-02-10 13:19:59 +01:00
Ruud
6a31b920ac Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-02-10 13:15:43 +01:00
Ruud
c1266a36e4 Re-use resursion code 2015-02-10 13:15:08 +01:00
Ruud
578effc538 Merge branch 'develop' of git://github.com/dumaresq/CouchPotatoServer into dumaresq-develop 2015-02-10 13:09:12 +01:00
Ruud Burger
d881120013 Merge pull request #4513 from starkers/remotes/origin/develop
added touch and chown to the $PID_FILE
2015-02-10 13:07:25 +01:00
Ruud Burger
da5318033a Merge pull request #4380 from mannkind/develop
Initial support for Plex Media Server w/Plex Home
2015-02-10 13:04:27 +01:00
Ruud Burger
31df5bce01 Merge pull request #4612 from maikhorma/maikhorma-#2782
Simple workaround for #2782
2015-02-10 13:02:24 +01:00
Ruud
d5622b7cba Remove www from torrentday domain 2015-02-10 13:01:19 +01:00
Ruud Burger
26ad1b354f Merge pull request #4552 from coolius/patch-1
Update torrentday url
2015-02-10 12:53:52 +01:00
Ruud
7a616a81f7 Remove www from iptorrents 2015-02-10 12:52:05 +01:00
Ruud Burger
275aefc3cc Merge pull request #4553 from coolius/patch-2
Update iptorrents url
2015-02-10 12:51:02 +01:00
Ruud Burger
2b32490f72 Merge pull request #4649 from sammy2142/patch-1
Update kickass url from kickass.so to kickass.to
2015-02-10 12:49:16 +01:00
sammy2142
7b9043c16b Update kickass url from kickass.so to kickass.to
Kickass has reverted back to the .to domain as the .so domain was seized:
http://torrentfreak.com/kickasstorrents-taken-domain-name-seizure-150209/
2015-02-10 11:11:30 +00:00
maikhorma
cf83f99be0 Updated UI
Tried to make it a bit cleaner.
2015-02-01 15:28:05 -05:00
maikhorma
fb8a66d207 Shortcut to address #2782
Until there is a more elegant solution to avoid unwanted white space
trimming, this will let users disable that feature if it is not
something they need.
2015-02-01 14:43:16 -05:00
Ruud
e8a3645bc6 Log failed folder getting 2015-02-01 12:18:31 +01:00
Ruud
592e40993c Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-01-31 10:32:24 +01:00
Ruud
b00e69e222 TorrentBytes cut of longer titles
fix #4590
2015-01-31 10:32:15 +01:00
Ruud
c9b4c8167f Actual include host in log 2015-01-28 11:35:26 +01:00
coolius
cdb9cfe756 Update iptorrents.py
Updated iptorrents url to blockade-free iptorrents.eu
2015-01-19 17:18:56 +00:00
coolius
e52f50b204 Update torrentday.py
Updated torrentday url to blockade-free torrentday.eu
2015-01-19 17:17:31 +00:00
Ruud
5b1dd68675 Merge branch 'develop' 2015-01-17 13:04:58 +01:00
Ruud
770c2be14c Create detail url if permalink is false 2015-01-17 13:04:47 +01:00
Ruud
4603b0c3b9 Merge branch 'develop' 2015-01-14 17:01:25 +01:00
Ruud
ab61961a64 Use detail url 2015-01-14 16:59:29 +01:00
Ruud
6aca799bbb Newznab: use guid for detail url 2015-01-14 16:55:30 +01:00
David Stark
89836be1d1 added touch and chown to the $PID_FILE 2015-01-12 17:37:26 +01:00
Andrew Dumaresq
20e1283627 better way to find the folder 2015-01-11 11:57:14 -05:00
Andrew Dumaresq
ee8406e026 Minor text change 2015-01-11 11:45:29 -05:00
Andrew Dumaresq
514941b785 Merge branch 'develop' of https://github.com/dumaresq/CouchPotatoServer into develop 2015-01-11 11:42:52 -05:00
Ruud
1510e37652 Update Tornado 2015-01-11 16:18:22 +01:00
Ruud
e1e39cd3f4 Update requests 2015-01-11 16:17:33 +01:00
Ruud
e1bb8c5419 Update Chardet 2015-01-11 16:15:52 +01:00
Ruud
568e42e2f3 Merge branch 'develop' 2015-01-11 00:28:44 +01:00
Ruud
17fa33a496 Update user agent 2015-01-11 00:25:58 +01:00
Ruud
601f0b54cf Send CP header when downloading from newznab 2015-01-11 00:25:51 +01:00
dumaresq
51d44bfc3e Merge pull request #1 from RuudBurger/develop
Develop
2015-01-10 17:01:43 -05:00
Ruud
12148217a2 Log failed notification 2015-01-10 13:41:17 +01:00
Ruud
4929e7bbcc Merge branch 'master' of github.com:RuudBurger/CouchPotatoServer 2015-01-10 12:31:29 +01:00
Ruud
38ee0ebe7b Merge branch 'develop' 2015-01-10 12:26:47 +01:00
Ruud
132fa12ef4 Late list not loaded on home 2015-01-10 12:17:47 +01:00
Ruud
1827c2e4cd Don't parse omgwtfnzb if no results are returned 2015-01-10 12:17:30 +01:00
Ruud
f423bca06b Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-01-09 20:13:42 +01:00
Ruud
e7b089edf5 Give better XML issues 2015-01-09 20:13:17 +01:00
Ruud Burger
b8b7d94a6a Merge pull request #4456 from dumaresq/develop
Bug fixes and new features for putio
2015-01-09 20:08:30 +01:00
Ruud
9964d9591b Merge branch 'develop' 2015-01-08 16:57:03 +01:00
Ruud
2c080fec3d TorrentBytes nbsp issue
fix #4026
2015-01-08 16:56:38 +01:00
Ruud
4c68566c77 Use new OMGWTFNZB api
fix #4471
2015-01-08 14:59:53 +01:00
Steven Lu
a3af784c18 Adding the ability to receive notifications through Webhooks 2015-01-06 18:47:19 -05:00
grasshide
ac6f295c93 New algogithm to use some kind of crowd logic on newznab powered
providers.
2015-01-05 15:00:40 +01:00
Andrew Dumaresq
2c72cd7d9f Added new folder option and fixed but in callback url 2015-01-04 17:10:40 -05:00
Andrew Dumaresq
d012dc5c85 Added new folder option 2015-01-04 17:10:16 -05:00
Andrew Dumaresq
038b4c63ee Updated to follow putio API changes 2015-01-04 17:09:36 -05:00
Ruud
2f5526d57e Merge branch 'develop' 2015-01-02 18:54:36 +01:00
Ruud Burger
17e37996c4 Add remux category for TorrentShack
close #4427
2015-01-02 18:18:08 +01:00
jonnyboy
9318e19347 New torrent search provider hdaccess.net 2014-12-31 08:21:58 -05:00
Ruud
8f4e03d04b Use detected encoding
#4388
2014-12-27 13:46:25 +01:00
Ruud
0288cc8848 Merge branch 'develop' 2014-12-22 22:02:05 +01:00
Ruud
229d67c086 Don't toUnicode loop 2014-12-22 22:01:47 +01:00
Dustin Brewer
d84897ff33 Initial support for Plex Media Server w/Plex Home 2014-12-21 16:18:11 -08:00
Ruud
387a711538 TorrentBytes not encoding name
fix #4377
2014-12-21 21:14:38 +01:00
Ruud
7a1b914824 Return nonblock results in main thread 2014-12-21 20:19:53 +01:00
Ruud
5e62801666 Send data through finish not write 2014-12-21 20:19:30 +01:00
Ruud
00d887153f Return data in main thread 2014-12-21 19:39:16 +01:00
Ruud
739d668261 Merge branch 'develop' 2014-12-21 14:48:58 +01:00
Ruud
6d5882001a Notificaton.list not returning anything
fix #4348
2014-12-20 22:32:18 +01:00
Ruud
4a6b45c65c SCC not finding seeders 2014-12-20 22:24:00 +01:00
Ruud
b0d1fe5c33 Return false if no media is found on try_next
fix #4345
2014-12-20 22:17:43 +01:00
Ruud
a6e49098c8 Add robots.txt 2014-12-20 22:15:27 +01:00
Ruud
f450f2d1e3 Merge branch 'develop' 2014-12-20 21:45:25 +01:00
Ruud
ffcd36cbf4 IOLoop callback hanging 2014-12-20 21:45:15 +01:00
Ruud
a2be29c3b2 Merge branch 'develop' 2014-12-20 20:14:11 +01:00
Ruud
3bf2d844a0 Release api lock on connection close or finish
fix #4372
2014-12-20 20:13:49 +01:00
Ruud
8770ab6696 Merge branch 'develop' 2014-12-20 19:10:04 +01:00
Ruud
dd24eb8893 Revert "Give response back to the main thread on api calls"
This reverts commit 576bcb9f4b.

Conflicts:
	couchpotato/api.py
2014-12-20 18:49:35 +01:00
Ruud
aea673ddcd Merge branch 'develop' 2014-12-19 14:16:28 +01:00
Ruud
538f51dd5b Log ipv6 failed bind 2014-12-19 14:16:20 +01:00
Ruud
f9fa87ce1d Merge branch 'develop' 2014-12-19 09:12:19 +01:00
Ruud
eea9f40501 Use current 2014-12-19 09:01:52 +01:00
Ruud
576bcb9f4b Give response back to the main thread on api calls
fix #4337
2014-12-19 08:57:24 +01:00
Ruud Burger
62c5365329 Merge pull request #4356 from rtaibah/ReaddTypoFix
Change Readd in tooltip to Re-add. Former is confusing and not an Englis...
2014-12-17 11:36:53 +01:00
Rami Taibah
ddf575a86e Change Readd in tooltip to Re-add. Former is confusing and not an English word 2014-12-17 13:00:54 +03:00
Ruud Burger
6b9383ce92 Merge pull request #4342 from mano3m/develop_fixsize
Fix TorrentShack size
2014-12-16 07:52:08 +01:00
mano3m
cb8d24ef1f Fix TorrentShack size 2014-12-15 22:26:29 +01:00
Ruud
814ddfb79f Don't return password fields
fix #4300
2014-12-14 12:33:28 +01:00
Ruud
766f819c0b Userscript for RT not parsing URL correctly 2014-12-14 12:06:03 +01:00
Ruud
ff43df9ef1 Comments comments comments 2014-12-02 15:38:55 +01:00
Ruud
2e907e93e7 Whiteline 2014-12-02 12:02:49 +01:00
Ruud
4d329d6a36 Revert "Remove torrentleech"
This reverts commit dacc3d8f47.
2014-12-02 11:45:17 +01:00
Ruud
752191bc23 Comments 2014-12-02 11:43:10 +01:00
Ruud
1d73fd9d7e Import optimize 2014-12-02 11:15:29 +01:00
Ruud
79688c412a Merge branch 'develop' of git://github.com/hadouken/CouchPotatoServer into hadouken-develop 2014-12-02 11:07:54 +01:00
Ruud
fc1c95fefb Description 2014-12-01 23:00:59 +01:00
Ruud
6a174716af underscored variables 2014-12-01 22:52:10 +01:00
Ruud
defe256f1b Correct url 2014-12-01 16:52:43 +01:00
Ruud
8a5f154d9e Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-12-01 16:52:04 +01:00
Ruud
fe56a69e8f Put.IO cleanup 2014-12-01 16:49:27 +01:00
Ruud
c6d326f973 Move put.io API 2014-12-01 15:50:03 +01:00
Ruud
9e5f670feb Merge branch 'putio' of git://github.com/dumaresq/CouchPotatoServer into develop 2014-12-01 15:42:43 +01:00
Ruud Burger
9ebacf8816 Merge pull request #4258 from glibix/develop
Transmission status 16 is for "Stopped". So we need to detect a download...
2014-12-01 15:41:41 +01:00
Ruud
df2d7ec9c2 Remove debug code 2014-12-01 15:39:33 +01:00
Ruud
ddab74582b Merge branch 'develop' of git://github.com/psaab/CouchPotatoServer into psaab-develop 2014-12-01 15:28:11 +01:00
Ruud
2801079bc8 Merge branch 'develop_3845' of git://github.com/voidstarstar/CouchPotatoServer into voidstarstar-develop_3845 2014-12-01 15:14:26 +01:00
Ruud Burger
1deb49b524 Merge pull request #4261 from voidstarstar/develop_4211
Added renamer.progress API function. Fixes #4211.
2014-12-01 15:12:21 +01:00
Ruud
ac65775743 Merge branch 'develop' 2014-12-01 12:58:18 +01:00
Ruud Burger
49d550f652 Merge pull request #4270 from sammy2142/patch-1
Update Kickass url to https://kickass.so
2014-11-30 22:05:36 +01:00
sammy2142
1a43ce6ecc Update Kickass url to https://kickass.so
Kickass has recently changed its web address from https://kickass.to 
to https://kickass.so
2014-11-30 20:48:31 +00:00
voidstarstar
15a0131587 Added renamer.progress API function. Fixes #4211.
This function reports the status of the renamer.
Progress value True means the renamer is currently running.
Progress value False means the renamer is not currently running.
2014-11-27 21:51:30 -05:00
voidstarstar
0dca34958c Added a parameter to the renamer API. Fixes #3845.
The renamer now has a new 'to_folder' parameter.
This parameter specifies where movies are moved to.
2014-11-27 21:43:19 -05:00
Mathew Paret
4b231e36ea Merge branch 'feature/3967_add_imdb_link_to_tweet' into develop 2014-11-27 18:16:41 +05:30
Mathew Paret
52478a00db Revert "Feature #3967 - Added IMDB link to download complete tweet"
This reverts commit 87338760ad.
2014-11-27 18:13:41 +05:30
Mathew Paret
e177766270 Merge branch 'feature/3967_add_imdb_link_to_tweet' into develop 2014-11-27 18:06:38 +05:30
Ruud Burger
ff8da7c8f8 Merge pull request #4068 from ofir123/subscenter_support
Added support for subscenter.
2014-11-26 21:51:14 +01:00
Ruud Burger
89c8c5a0c7 Merge pull request #4203 from rkokkelk/develop
Fix startup script Debian/Ubuntu
2014-11-26 21:48:40 +01:00
Ruud
38c6266f9c Use single quotes 2014-11-26 21:47:39 +01:00
Ruud Burger
16f8e7e123 Merge pull request #4205 from kamillus/develop
adding a fix to handle missing directories in the file browser in webkit browsers
2014-11-26 21:46:10 +01:00
Ruud Burger
7110c7a11f Merge pull request #4249 from clinton-hall/patch-1
NZBGet 13 includes more status information
2014-11-25 07:55:39 +01:00
Clinton Hall
6d79f316a6 NZBGet 13 includes more status information
nzb['Status'] returns total (SUCESS/ALL) status and also failed status in V13+
This is particularly important when using fake detector scripts or stopping download due to health checks etc.
http://nzbget.net/RPC_API_reference#Method_.22history.22
https://couchpota.to/forum/viewtopic.php?f=5&t=4644
2014-11-25 11:02:47 +10:30
Paul Saab
c1b6811b8a Tornado requires two sockets to support IPv6
Tornado sets setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
to force IPv6 sockets to only be used for IPv6 connections.  create a
separate socket to allow for CouchPotato to be used over IPv6.
2014-11-17 22:54:56 -08:00
Kamil
7d7b76b2e9 adding a fix to handle missing directories in the file browser in webkit browsers 2014-11-10 20:18:38 -05:00
Roy Kokkelkoren
657aa52fa7 Merge branch 'develop' of https://github.com/rkokkelk/CouchPotatoServer into develop 2014-11-10 15:40:41 +01:00
Roy Kokkelkoren
8e9ef8db39 Merge remote-tracking branch 'upstream/develop' into develop 2014-11-10 15:39:45 +01:00
test
92a0096b54 Merge remote-tracking branch 'upstream/develop' into develop 2014-11-10 08:29:27 -06:00
Mathew Paret
87338760ad Feature #3967 - Added IMDB link to download complete tweet 2014-11-10 18:47:37 +05:30
Mathew Paret
28019b0a09 Transmission status 16 is for "Stopped". So we need to detect a download as completed even if it is stopped but percent done is 100 2014-11-10 18:39:58 +05:30
Ruud Burger
248b007f4a Merge pull request #4094 from georgewhewell/hdbits-add-internal-only
add option for internal-only for hdbits provider
2014-11-09 14:39:50 +01:00
Ruud Burger
9e31c59de8 Merge pull request #4188 from DjSlash/patch-2
Update SSL protocol for Deluge connections
2014-11-09 14:37:23 +01:00
Ruud
269e785888 Yify, don't include quality in search
fix #4190
2014-11-09 14:30:22 +01:00
Ruud
3669aef42d is_movie param 2014-11-09 14:14:06 +01:00
Ruud
1087eb3a06 Add adding parameter to is_movie 2014-11-09 14:10:23 +01:00
Ruud
cd55966575 One up! 2014-11-09 13:50:52 +01:00
Ruud
5873a5c8e2 Force include lxml 2014-11-09 13:50:43 +01:00
Ruud
32163b3951 Merge branch 'master' into desktop 2014-11-09 10:27:35 +01:00
Ruud
e0cc86b51c Merge branch 'develop' 2014-11-09 10:27:12 +01:00
Rutger van Sleen
43af80a137 Update SSL protocol for Deluge connections
Since Deluge 1.3.10 the SSL protocol is updated to TLSv1 instead of SSLv3. This resulted in CP not being able to add new torrents. Link to change in Deluge: http://git.deluge-torrent.org/deluge/commit/?h=1.3-stable&id=26f5be17609a8312c4ba06aa120ed208cd7876f2
2014-11-06 14:33:38 +01:00
Roy Kokkelkoren
0766a27a71 Fixed bug in init.d script which prevented the writing of the PID file.
Altered default value of DATA_DIR to /var/opt/couchpotato in order to comply to linux file structure
2014-11-06 04:40:39 -06:00
Ruud
a12f049d14 Bit-HDTV http -> https
fix #3570
2014-11-01 17:30:11 +01:00
Ruud
6afe2fd9cf IPTorrents webdl category
fix #4150
2014-11-01 15:36:54 +01:00
Viktor Elofsson
61f634a21e Refactored Hadouken downloader. 2014-10-21 16:52:28 +02:00
Ruud
7ec64e202b Merge branch 'develop' 2014-10-20 21:05:14 +02:00
Ruud
02b6659235 Don't show ignored in ETA message 2014-10-20 20:46:07 +02:00
Ruud
dacc3d8f47 Remove torrentleech 2014-10-19 22:47:29 +02:00
Ruud
da97b62c44 Merge branch 'develop' 2014-10-19 22:42:00 +02:00
Ruud
4f140bb1ac Remove print 2014-10-19 22:41:50 +02:00
Ruud
3dffaa7075 Update Tornado 2014-10-19 16:13:03 +02:00
georgewhewell
d626fda710 add option for internal only for hdbits provider 2014-10-17 15:14:44 +01:00
Ruud
51c8de0fc3 Force filename renamer setting 2014-10-16 22:39:39 +02:00
Ruud
4f23ccc284 Also rename on old version database
fix #4083
2014-10-15 21:28:52 +02:00
Ruud
a6ff34a47f Only check exists on file 2014-10-14 22:49:19 +02:00
Viktor Elofsson
b40d1f3463 Merge pull request #1 from RuudBurger/develop
Sync upstream
2014-10-14 13:18:48 +02:00
Ruud
f1a2d960bc Make tab font smaller 2014-10-13 13:24:57 +02:00
Ruud
4e7069e0c6 Deluge, allow "no port"
fix #4055
2014-10-13 13:20:21 +02:00
Ruud
477a47e45e Don't show fanart error on future movies 2014-10-13 13:12:44 +02:00
Ruud
a3264240ab Don't error out on future movies 2014-10-13 13:10:38 +02:00
Ofir Brukner
1030d0d748 Added support for subscenter.
Updated both plugin and lib.
2014-10-13 00:50:29 +03:00
Ruud
1fb031ff40 Merge branch 'develop' 2014-10-12 16:41:56 +02:00
Ruud
f9d9fffedb Don't ss int or float 2014-10-12 16:41:48 +02:00
Ruud
86edf5eb04 Exclude libs 2014-10-12 16:19:27 +02:00
Ruud
92f9743d3c Merge branch 'master' into desktop 2014-10-11 23:01:02 +02:00
Ruud
1b151fbd97 Merge branch 'develop' 2014-10-11 22:59:53 +02:00
Ruud
6b4e9a3fac Remove chardet from requests library 2014-10-11 22:57:57 +02:00
Ruud
0567504394 One up! 2014-10-11 22:46:45 +02:00
Ruud
c8a3b64624 Set black icon for mac 2014-10-11 22:45:31 +02:00
Ruud
c657d6d70b Merge branch 'master' into desktop
Conflicts:
	couchpotato/core/database.py
2014-10-11 19:34:46 +02:00
Ruud
d307d343e5 Merge branch 'develop' 2014-10-11 19:33:06 +02:00
Ruud Burger
6787289846 Merge pull request #4054 from joshka/patch-1
Fix default permissions on files to remove execute bits on files #4053
2014-10-11 18:04:08 +02:00
Joshua McKinney
d31a2e2768 Fix default permissions on files to remove execute bits on files #4053 2014-10-12 03:01:20 +11:00
Ruud
c992680209 Meta and Middle click not triggering new tab 2014-10-11 15:35:56 +02:00
Ruud
f2ab59e384 Merge branch 'develop' 2014-10-11 14:38:21 +02:00
Ruud
65f0dc25d2 Allow 1080p in shitty quality releases 2014-10-11 14:32:12 +02:00
Ruud
b616af3a83 Make minimum scoring editable
fix #4042
2014-10-10 23:16:58 +02:00
Ruud
ca13107330 Ignore exceptions on removing db_backup stuff 2014-10-10 22:46:35 +02:00
Ruud
c7ce18f8c2 Better error message for missing cd number 2014-10-10 15:13:58 +02:00
Ruud
55f201040b Merge branch 'develop' 2014-10-09 23:10:52 +02:00
Ruud
b6f288a522 Close request connection 2014-10-09 23:10:28 +02:00
Ruud
476a5cc3dd Merge branch 'develop' 2014-10-08 23:08:33 +02:00
Ruud
cb48ca03df Remove profile when marking movies done 2014-10-08 23:05:04 +02:00
Ruud
342a4ad885 Merge branch 'develop' 2014-10-08 23:00:37 +02:00
Ruud
7b6641d709 Never restatus "down" when adding release 2014-10-08 23:00:11 +02:00
Ruud
12159a1b7b Merge branch 'develop' 2014-10-08 22:54:41 +02:00
Ruud
3c12a2c4bf Don't restatus movies to active when scanning manage section 2014-10-08 22:35:56 +02:00
Ruud
259e2bc61c Don't skip unpacking on manage scan 2014-10-08 21:58:34 +02:00
Ruud
9f6e4cc2fa Remove NotifyMyWP 2014-10-08 20:46:11 +02:00
Ruud
b773f7b71c Merge branch 'develop' 2014-10-07 23:09:44 +02:00
Ruud
a763957334 Log minimum 1 second wait 2014-10-07 22:53:57 +02:00
Ruud
06293dc0a2 Simplify tmdb provider 2014-10-07 22:50:22 +02:00
Ruud
38a5d967dd Remove tmdb3 lib 2014-10-07 21:40:41 +02:00
Ruud
4cdb9bc81d Remove tmdb3 dependency 2014-10-07 21:40:32 +02:00
Ruud
2104cb2839 Always try to return version string 2014-10-07 20:30:51 +02:00
Ruud
d4a4bd40a8 Always return version info 2014-10-07 20:14:17 +02:00
Ruud
ba47d7eea7 Use torrent-duplicate if returned from Transmission
fix #4014
2014-10-07 11:46:42 +02:00
Ruud
41aba6b19c Merge branch 'develop' 2014-10-07 09:30:07 +02:00
Ruud
96def8563b Merge branch 'develop' 2014-10-05 11:16:06 +02:00
Ruud
bf46a937c0 Merge branch 'develop' 2014-10-04 22:57:51 +02:00
Ruud
2edb6caa97 Merge branch 'develop' 2014-10-04 22:19:37 +02:00
Ruud
9e125a361a Merge branch 'develop' 2014-10-04 15:43:01 +02:00
Viktor Elofsson
2e52c8124a Implemented a downloader for Hadouken. 2014-10-02 20:34:43 +02:00
Ruud
2252ed710c Merge branch 'develop' 2014-09-29 16:25:19 +02:00
Ruud
07a790e9b2 Merge branch 'develop' 2014-09-25 22:32:38 +02:00
Ruud
bb6fefd010 Merge branch 'develop' 2014-09-23 14:54:21 +02:00
Andrew Dumaresq
8de5fcdac6 fixed button name 2014-09-20 19:39:35 -04:00
Andrew Dumaresq
4aa9801be4 general code cleanup 2014-09-20 19:39:12 -04:00
dumaresq
3e58378490 figured out how to make the check work better 2014-09-19 21:41:58 -04:00
Andrew Dumaresq
2c40db3074 removed un-needed variable 2014-09-19 20:28:03 -04:00
dumaresq
fba228fd9d fixing check function 2014-09-19 20:26:54 -04:00
Andrew Dumaresq
ef2b8e88b4 better download checking 2014-09-19 07:07:23 -04:00
Ruud
55e489cc51 Mark faulty movies done 2014-09-19 00:14:52 +02:00
Ruud
7fe5a271dc Make sure original_folder isn't empty
fix #3747
2014-09-18 22:00:59 +02:00
Ruud
ea92c503bb Merge branch 'develop' 2014-09-18 21:44:35 +02:00
Ruud
6942126b7f Merge branch 'develop' 2014-09-18 17:58:48 +02:00
Ruud
a6d37bf9c2 Merge branch 'develop' 2014-09-18 17:51:25 +02:00
Ruud
37c6bc7612 Merge branch 'develop' 2014-09-18 17:48:04 +02:00
Andrew Dumaresq
c77b270fa8 Cleaned up OAUTH and made the download asyc 2014-09-18 06:00:09 -04:00
dumaresq
872a4f4650 Worked on geting Oauth and adding download status 2014-09-07 17:59:16 -04:00
Ruud
d6a264aaed Merge branch 'develop' 2014-09-03 16:47:48 +02:00
Ruud
108f3292c3 Merge branch 'develop' 2014-09-01 23:07:12 +02:00
Ruud
fc60727e82 Merge branch 'develop' 2014-09-01 13:40:19 +02:00
Ruud
49cd8fbc2c Merge branch 'develop' 2014-09-01 13:32:39 +02:00
Ruud
d0f1e7c6a3 Update put.io code 2014-08-29 12:30:31 +02:00
Ruud
53e7e383a3 put.io rename 2014-08-29 11:38:28 +02:00
Ruud
c06e1f3135 Merge branch 'develop' of git://github.com/dumaresq/CouchPotatoServer into dumaresq-develop 2014-08-29 11:37:49 +02:00
Ruud
1991792291 Remove ending seperator 2014-08-27 22:04:33 +02:00
Ruud
29290022e6 Merge branch 'develop' 2014-08-27 19:56:17 +02:00
dumaresq
bb73cb8eec Fixed missing library 2014-08-24 18:19:01 -04:00
dumaresq
5acab98025 fixed hardcoded directory 2014-08-19 19:32:00 -04:00
dumaresq
ed6a46e9c0 Added putioDownloader 2014-08-17 16:28:47 -04:00
Ruud
04aa2e5fa4 Merge branch 'develop'
Conflicts:
	couchpotato/core/database.py
2014-08-16 13:30:37 +02:00
Ruud
6772b9d965 Don't migrate when db is closed 2014-07-17 23:09:26 +02:00
Ruud
5df14d67e1 One up 2014-07-17 22:28:32 +02:00
Ruud
73abd1f022 Merge branch 'refs/heads/master' into desktop 2014-07-17 22:27:23 +02:00
Ruud
e75a8529c9 Try fix migration failure from 2.5.1 2014-07-17 22:26:23 +02:00
Ruud
07a7f8cbcf Change fanart api url 2014-07-16 10:32:02 +02:00
Ruud
9b35a0fb20 Only trigger onClose when it's set 2014-07-08 21:21:22 +02:00
Ruud
0622e6e5ab One up 2014-06-29 23:16:09 +02:00
Ruud
f16931906f Don't remove pyc files when using desktop updater 2014-06-29 23:15:36 +02:00
Ruud
68dcba8853 One up 2/2 2014-06-29 21:56:51 +02:00
Ruud
ae8f66df1a Exit main loop on crash 2014-06-29 21:56:39 +02:00
Ruud
5237ead5cb Merge branch 'refs/heads/develop' into desktop 2014-06-29 17:01:47 +02:00
Ruud
45b2dff6d2 Merge branch 'refs/heads/develop' 2014-06-29 11:01:09 +02:00
Ruud
30d56b5d2c Merge branch 'refs/heads/develop' 2014-06-29 00:02:55 +02:00
Ruud
5ff6824ae9 Merge branch 'refs/heads/develop' 2014-06-25 18:26:10 +02:00
Ruud
0210859155 Merge branch 'refs/heads/develop' 2014-06-25 09:17:12 +02:00
Ruud
665478db13 Merge branch 'refs/heads/develop' 2014-06-23 23:45:03 +02:00
Ruud
84c366ab54 Merge branch 'master' of github.com:RuudBurger/CouchPotatoServer 2014-06-23 20:47:30 +02:00
Ruud
908e5eae77 Merge branch 'refs/heads/develop' 2014-06-23 20:47:06 +02:00
Ruud
c4aaa10308 One up 2014-06-23 20:00:06 +02:00
Ruud
d10536a829 Remove path from getOptions 2014-06-23 20:00:00 +02:00
Ruud
1e7fa82e11 Merge branch 'refs/heads/develop' into desktop 2014-06-23 19:01:58 +02:00
Ruud
1d448f3d9c Merge branch 'refs/heads/develop' 2014-06-23 14:29:20 +02:00
Ruud
338b5f427a Merge branch 'refs/heads/develop' 2014-06-23 13:37:50 +02:00
Ruud
59e3e73c4c Merge branch 'refs/heads/develop' 2014-06-23 01:19:05 +02:00
Ruud
cb2614127c Merge branch 'refs/heads/develop' 2014-06-22 21:14:44 +02:00
Ruud
fdbd826917 Merge branch 'refs/heads/develop' 2014-06-22 20:35:30 +02:00
Ruud
31daf4915e Merge branch 'refs/heads/develop' 2014-06-20 21:31:48 +02:00
Ruud
4ca7691afd Merge branch 'refs/heads/develop' 2014-06-20 21:08:33 +02:00
Ruud
64d3ecd9b8 Merge branch 'refs/heads/develop' 2014-06-20 14:52:15 +02:00
Ruud
d55df3240f Merge branch 'refs/heads/develop' 2014-06-20 14:14:26 +02:00
Ruud
52214e4938 Merge branch 'refs/heads/develop' 2014-06-20 12:22:13 +02:00
Ruud
b45307e493 Merge branch 'refs/heads/develop' 2014-06-11 23:51:05 +02:00
Ruud
4320369448 Merge branch 'refs/heads/develop' 2014-06-11 10:15:31 +02:00
Ruud
f560dc093c Merge branch 'refs/heads/develop' 2014-06-10 22:54:14 +02:00
Ruud
d26a2b1480 Merge branch 'refs/heads/develop' 2014-06-07 20:44:49 +02:00
Ruud
e11b07b559 Don't save profile order twice 2014-06-06 17:26:45 +02:00
Ruud
b6ee8ef4d4 Merge branch 'refs/heads/develop' 2014-06-06 11:24:24 +02:00
Ruud
f80559d380 Merge branch 'refs/heads/develop' 2014-06-03 22:31:20 +02:00
Ruud
8530b00e7b Merge branch 'refs/heads/develop' 2014-06-03 17:18:11 +02:00
Ruud
5851e1e69f Merge branch 'refs/heads/develop' 2014-06-02 23:51:01 +02:00
Ruud
686bfd62eb Merge branch 'refs/heads/develop' 2014-06-02 15:10:29 +02:00
Ruud
9b82603c26 Merge branch 'refs/heads/develop' 2014-06-02 14:20:50 +02:00
Ruud
f41792915f Merge branch 'refs/heads/develop' 2014-06-02 12:59:47 +02:00
Ruud
2fa77fb610 Merge branch 'refs/heads/develop' 2014-06-02 10:40:07 +02:00
Ruud
e64d0e33fc Merge branch 'refs/heads/develop' 2014-06-01 14:31:39 +02:00
Ruud
b168643600 Merge branch 'refs/heads/develop'
Conflicts:
	couchpotato/core/helpers/variable.py
2014-05-31 22:50:02 +02:00
Ruud
240283405e variable 'year' referenced before assignment 2014-05-07 11:50:36 +02:00
Ruud
b69f8b7ed5 Files not properly send to sabnzbd 2014-03-19 22:33:14 +01:00
Ruud
fbccba77a7 64Bit installer setup 2014-03-16 13:00:09 +01:00
Ruud
d3efda74b2 One up 2014-03-16 09:44:44 +01:00
Ruud
66b849cb29 Merge branch 'refs/heads/master' into desktop
Conflicts:
	version.py
2014-03-16 09:43:32 +01:00
Ruud
b19f98ef5b Merge branch 'refs/heads/develop' 2014-03-15 12:35:28 +01:00
Ruud
c389790cf2 Merge branch 'refs/heads/develop' 2014-03-03 22:19:29 +01:00
Ruud
d7445dfa80 Merge branch 'refs/heads/develop' 2014-02-26 14:00:56 +01:00
Ruud
36782768a4 Merge branch 'refs/heads/develop' 2014-02-25 21:37:29 +01:00
Ruud
2c9d487614 Update build url 2014-02-25 21:20:59 +01:00
Ruud
b9a724c8bb Merge branch 'refs/heads/develop' 2014-02-16 09:43:03 +01:00
Ruud
68d826ca1c Merge branch 'refs/heads/develop' 2014-02-15 19:48:07 +01:00
Ruud
d6921882e1 Merge branch 'refs/heads/develop' 2014-02-14 19:39:47 +01:00
Ruud
2cfff73486 Merge branch 'refs/heads/develop' 2014-01-18 19:54:32 +01:00
Ruud
0c7dda8d44 Merge branch 'refs/heads/develop' 2014-01-17 23:17:41 +01:00
Ruud
dbaa377770 version.master 2014-01-17 16:29:29 +01:00
Ruud
47d2b81d1c Merge branch 'refs/heads/develop' 2014-01-17 16:28:59 +01:00
Ruud
f79fcda27f Small one up 2013-11-17 21:22:24 +01:00
Ruud
cdbcad2238 Merge branch 'refs/heads/develop' into desktop 2013-11-17 21:20:30 +01:00
Ruud
5d913e87c3 One up! 2013-11-17 20:20:18 +01:00
Ruud
16f02bda27 Merge branch 'refs/heads/develop' into desktop 2013-11-17 20:03:22 +01:00
Ruud
8d108b92bf One Up 2013-09-23 21:48:12 +02:00
Ruud
46783028b1 Merge branch 'refs/heads/develop' into desktop 2013-09-23 21:36:45 +02:00
Ruud
d08c7c57a8 One up! 2013-09-20 17:46:54 +02:00
Ruud
eeeb845ef3 Simplify string before checking on imdb 2013-09-20 17:30:11 +02:00
Ruud
651a063f94 Fix about submenu 2013-09-20 16:33:01 +02:00
Ruud
f20aaa2d9d Hide IE clear button on search 2013-09-20 16:23:42 +02:00
Ruud
ba925ec191 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/plugins/suggestion/main.py
2013-09-20 16:12:40 +02:00
Ruud
3b7376fd18 One up 2013-07-06 01:01:26 +02:00
Ruud
c31b10c798 Ignore current suggested results 2013-07-06 00:49:11 +02:00
Ruud
acda664686 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2013-07-05 22:43:54 +02:00
Ruud
e2852407ea One up 2013-06-03 22:22:44 +02:00
Ruud
88e738c6cd Don't show double updater name 2013-06-03 22:22:35 +02:00
Ruud
eaae8bdb0b Merge branch 'refs/heads/develop' into desktop 2013-06-03 22:00:21 +02:00
Ruud
821f68909d One up 2013-05-05 21:19:10 +02:00
Ruud
2b8dfed475 Merge branch 'refs/heads/master' into desktop
Conflicts:
	version.py
2013-05-05 20:31:28 +02:00
Ruud
607b5ea766 Run exe after install 2013-03-19 21:22:07 +01:00
Ruud
88579cd71a One up 2013-03-19 20:52:07 +01:00
Ruud
6c57316ce6 Use https for changelog 2013-03-19 20:46:00 +01:00
Ruud
6702683da3 Merge branch 'refs/heads/develop' into desktop 2013-03-19 20:34:38 +01:00
Ruud
1ed58586a1 Force install install in AppData
Add images to installer
2013-03-18 23:56:54 +01:00
Ruud
f08ccd4fd8 One up installer 2013-03-17 22:34:04 +01:00
Ruud
312562a9f5 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2013-03-17 16:42:53 +01:00
Ruud
9e260a89af One up 2013-01-26 14:51:39 +01:00
Ruud
d233e4d22e Merge branch 'refs/heads/develop' into desktop 2013-01-26 13:54:56 +01:00
Ruud
23893dbcb9 Merge branch 'refs/heads/develop' into desktop 2013-01-25 20:13:58 +01:00
Ruud
506871b506 One up 2013-01-23 23:10:55 +01:00
Ruud
6115917660 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2013-01-23 22:57:07 +01:00
Ruud
21df8819d3 Merge branch 'refs/heads/develop' into desktop 2013-01-23 22:55:09 +01:00
Ruud
fb3f3e11f6 Merge branch 'refs/heads/develop' into desktop 2013-01-22 21:40:40 +01:00
Ruud
178c8942c3 Merge branch 'refs/heads/develop' into desktop 2013-01-14 19:54:22 +01:00
Ruud
51e747049d One up 2013-01-07 23:10:42 +01:00
Ruud
0582f7d694 Urlencode spotweb id. fix #1213 2013-01-07 23:10:06 +01:00
Ruud
fa7cac7538 Merge branch 'refs/heads/develop' into desktop 2013-01-07 22:41:55 +01:00
Ruud
9a314cfbc4 One up 2012-12-29 00:03:45 +01:00
Ruud
5941d0bf77 Add version to update url 2012-12-29 00:03:36 +01:00
Ruud
d326c1c25c Merge branch 'refs/heads/master' into desktop
Conflicts:
	version.py
2012-12-28 23:31:08 +01:00
Ruud
96472a9a8f One up 2012-12-16 23:51:58 +01:00
Ruud
27252561e2 Merge branch 'refs/heads/develop' into desktop 2012-12-16 23:51:24 +01:00
Ruud
c9e732651f One up 2012-12-01 12:16:58 +01:00
Ruud
7849e7170d Uninstall only create files, no wildcard *.* 2012-12-01 12:16:51 +01:00
Ruud
087894eb4e Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-12-01 11:50:08 +01:00
Ruud
25f1b8c7a7 Fedora init fix #1009 2012-11-02 18:32:15 +01:00
Ruud
e71da1f14d Use proper description for binary build. fix #1005 2012-11-02 18:24:13 +01:00
Ruud
938b14ba18 One up installer 2012-10-29 20:45:17 +01:00
Ruud
d6522d8f38 One up installer 2012-10-27 18:49:44 +02:00
Ruud
78eab890e7 Merge branch 'refs/heads/develop' into desktop 2012-10-27 18:25:36 +02:00
Ruud
1a56191f83 Don't unzip 2012-10-27 18:22:50 +02:00
Ruud
41c0f34d95 Properly restart 2012-10-27 18:22:40 +02:00
Ruud
37bf205d7a Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-10-27 11:56:57 +02:00
Ruud
aa1fa3eb9a Add description 2012-09-19 15:42:33 +02:00
Ruud
0e2f8a612c Extract zip after build, for testing 2012-09-19 15:29:07 +02:00
Ruud
465e7b2abc Merge branch 'refs/heads/develop' into desktop 2012-09-16 12:36:17 +02:00
Ruud
578fb45785 Installer 1 up 2012-09-16 11:35:56 +02:00
Ruud
96995bbbe5 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-09-16 10:45:19 +02:00
Ruud
4cfdafebbc Merge branch 'refs/heads/develop' into desktop 2012-09-14 13:15:47 +02:00
Ruud
b97acb8ef5 Merge branch 'refs/heads/develop' into desktop 2012-09-14 13:08:19 +02:00
Ruud
d68d2dfdb6 Updated installer 2012-09-09 21:48:38 +02:00
Ruud
39b269a454 Merge branch 'refs/heads/develop' into desktop 2012-09-09 17:32:47 +02:00
Ruud
ac081d3e10 Getting ready for build 2012-09-09 17:28:23 +02:00
Ruud
5d4efb60cf Merge branch 'refs/heads/develop' into desktop 2012-09-08 16:01:49 +02:00
Ruud
cc408b980c Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/_base/updater/main.py
2012-08-05 16:18:35 +02:00
Ruud
59590b3ac9 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/_base/updater/main.py
2012-07-14 00:35:00 +02:00
Ruud
ff759dacf3 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/_base/updater/main.py
2012-07-11 22:43:45 +02:00
Ruud
a328e44130 Merge branch 'desktop' of github.com:RuudBurger/CouchPotatoServer into desktop 2012-05-15 23:23:56 +02:00
Ruud
7924cac5f9 Update installer version 2012-05-15 23:21:24 +02:00
Ruud
1cef3b0c93 remove --nogit tag 2012-05-15 23:21:24 +02:00
Ruud
3cd59edc8b Import errors
File icon
2012-05-15 23:21:24 +02:00
Ruud
0d624af01d Working PNG 2012-05-15 23:21:24 +02:00
Ruud
a09132570c Change branch to desktop 2012-05-15 23:21:14 +02:00
Ruud
ee3fc38432 Better setup 2012-05-15 23:21:14 +02:00
Ruud
dbf0192c8e Inno setup, start 2012-05-15 23:21:14 +02:00
Ruud
6962cfc3f5 new Desktop runner 2012-05-15 23:21:14 +02:00
Ruud
e096ec3b5b Desktop files 2012-05-15 23:20:05 +02:00
Ruud
b30a74ae0c Merge branch 'refs/heads/develop' into desktop 2012-05-15 23:15:17 +02:00
Ruud
978eeb16c9 Update installer version 2012-05-15 23:14:20 +02:00
Ruud
e5c9d91657 Merge branch 'refs/heads/develop' into desktop 2012-05-15 22:27:22 +02:00
Ruud
fa81c3a07a Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-05-14 22:00:02 +02:00
Ruud
9cdd520d41 Merge branch 'refs/heads/develop' into desktop 2012-05-14 20:22:55 +02:00
Ruud
55d7898771 Merge branch 'refs/heads/develop' into desktop 2012-05-13 12:56:45 +02:00
Ruud
b8256bef97 Merge branch 'refs/heads/develop' into desktop 2012-05-12 00:35:52 +02:00
Ruud
5be9dc0b4a Merge branch 'refs/heads/develop' into desktop 2012-05-09 22:20:53 +02:00
Ruud
7d0be0cefb remove --nogit tag 2012-05-07 22:55:54 +02:00
Ruud
f7ce1edb13 Merge branch 'refs/heads/develop' into desktop 2012-05-07 22:44:01 +02:00
Ruud
5ad9280b60 Merge branch 'refs/heads/develop' into desktop 2012-05-07 22:27:55 +02:00
Ruud
2b353f1b20 Merge branch 'refs/heads/develop' into desktop 2012-05-04 17:29:15 +02:00
Ruud
75ab90b87b Merge branch 'refs/heads/develop' into desktop 2012-05-02 21:40:19 +02:00
Ruud
0219296120 Import errors
File icon
2012-05-02 21:34:45 +02:00
Ruud
20032b3a31 Working PNG 2012-05-01 07:35:44 +02:00
Ruud
ea9e9a8c90 Updater base 2012-05-01 07:35:27 +02:00
Ruud
f7b0ee145b Change branch to desktop 2012-04-30 21:37:04 +02:00
Ruud
cc866738ee Merge branch 'refs/heads/develop' into desktop 2012-04-30 21:32:56 +02:00
Ruud
eadccf6e33 Merge branch 'refs/heads/develop' into desktop 2012-04-29 00:00:25 +02:00
Ruud
b70b66e567 Merge branch 'refs/heads/develop' into desktop 2012-04-28 23:14:59 +02:00
Ruud
5b6792dc20 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	CouchPotato.py
	couchpotato/core/plugins/renamer/main.py
	couchpotato/core/plugins/trailer/__init__.py
2012-04-07 21:35:36 +02:00
Ruud
f498e7343a Better setup 2012-02-25 01:48:58 +01:00
Ruud
6962f441e6 Inno setup, start 2012-02-21 18:50:34 +01:00
Ruud
1def62b1b1 new Desktop runner 2012-02-19 17:13:37 +01:00
Ruud
a4a4a6a185 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	CouchPotato.py
2012-02-19 13:14:56 +01:00
Ruud
d4c9469c1a Remove nfo when not renaming as .orig.nfo 2012-02-19 12:53:55 +01:00
Ruud
3e2d4c5d7b Initial trailer support 2012-02-19 12:48:54 +01:00
Ruud
d03f711d69 kwargs in file.download for urlopen 2012-02-19 12:45:22 +01:00
Ruud
44dd8d9b96 Merge lists, not overwrite 2012-02-19 12:37:25 +01:00
Ruud
549a3be0d8 Merge branch 'refs/heads/develop' into desktop 2012-02-12 00:10:56 +01:00
Ruud
1bb2edf8ec Merge branch 'refs/heads/develop' into desktop 2012-02-11 23:33:14 +01:00
Ruud
84c6f36315 Desktop files 2012-02-11 23:06:14 +01:00
206 changed files with 4042 additions and 12041 deletions

View File

@@ -61,7 +61,7 @@ class Loader(object):
self.log = CPLog(__name__)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10, encoding = 'utf-8')
hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10)
hdlr.setLevel(logging.CRITICAL)
hdlr.setFormatter(formatter)
self.log.logger.addHandler(hdlr)

241
Desktop.py Normal file
View File

@@ -0,0 +1,241 @@
from esky.util import appdir_from_executable #@UnresolvedImport
from threading import Thread
from version import VERSION
from wx.lib.softwareupdate import SoftwareUpdate
import os
import sys
import time
import webbrowser
import wx
# Include proper dirs
if hasattr(sys, 'frozen'):
import libs
base_path = os.path.dirname(os.path.dirname(os.path.abspath(libs.__file__)))
else:
base_path = os.path.dirname(os.path.abspath(__file__))
def icon():
icon = 'icon_windows.png'
if os.path.isfile('icon_mac.png'):
icon = 'icon_mac.png'
return wx.Icon(icon, wx.BITMAP_TYPE_PNG)
lib_dir = os.path.join(base_path, 'libs')
sys.path.insert(0, base_path)
sys.path.insert(0, lib_dir)
from couchpotato.environment import Env
class TaskBarIcon(wx.TaskBarIcon):
TBMENU_OPEN = wx.NewId()
TBMENU_SETTINGS = wx.NewId()
TBMENU_EXIT = wx.ID_EXIT
closed = False
menu = False
enabled = False
def __init__(self, frame):
wx.TaskBarIcon.__init__(self)
self.frame = frame
self.SetIcon(icon())
self.Bind(wx.EVT_TASKBAR_LEFT_UP, self.OnTaskBarClick)
self.Bind(wx.EVT_TASKBAR_RIGHT_UP, self.OnTaskBarClick)
self.Bind(wx.EVT_MENU, self.onOpen, id = self.TBMENU_OPEN)
self.Bind(wx.EVT_MENU, self.onSettings, id = self.TBMENU_SETTINGS)
self.Bind(wx.EVT_MENU, self.onTaskBarClose, id = self.TBMENU_EXIT)
def OnTaskBarClick(self, evt):
menu = self.CreatePopupMenu()
self.PopupMenu(menu)
menu.Destroy()
def enable(self):
self.enabled = True
if self.menu:
self.open_menu.Enable(True)
self.setting_menu.Enable(True)
self.open_menu.SetText('Open')
def CreatePopupMenu(self):
if not self.menu:
self.menu = wx.Menu()
self.open_menu = self.menu.Append(self.TBMENU_OPEN, 'Open')
self.setting_menu = self.menu.Append(self.TBMENU_SETTINGS, 'About')
self.exit_menu = self.menu.Append(self.TBMENU_EXIT, 'Quit')
if not self.enabled:
self.open_menu.Enable(False)
self.setting_menu.Enable(False)
self.open_menu.SetText('Loading...')
return self.menu
def onOpen(self, event):
url = self.frame.parent.getSetting('base_url')
webbrowser.open(url)
def onSettings(self, event):
url = self.frame.parent.getSetting('base_url') + 'settings/about/'
webbrowser.open(url)
def onTaskBarClose(self, evt):
if self.closed:
return
self.closed = True
self.RemoveIcon()
wx.CallAfter(self.frame.Close)
def makeIcon(self, img):
if "wxMSW" in wx.PlatformInfo:
img = img.Scale(16, 16)
elif "wxGTK" in wx.PlatformInfo:
img = img.Scale(22, 22)
icon = wx.IconFromBitmap(img.CopyFromBitmap())
return icon
class MainFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, None, style = wx.FRAME_NO_TASKBAR)
self.parent = parent
self.tbicon = TaskBarIcon(self)
class WorkerThread(Thread):
def __init__(self, desktop):
Thread.__init__(self)
self.daemon = True
self._desktop = desktop
self.start()
def run(self):
# Get options via arg
from couchpotato.runner import getOptions
args = ['--quiet']
self.options = getOptions(args)
# Load settings
settings = Env.get('settings')
settings.setFile(self.options.config_file)
# Create data dir if needed
self.data_dir = os.path.expanduser(Env.setting('data_dir'))
if self.data_dir == '':
from couchpotato.core.helpers.variable import getDataDir
self.data_dir = getDataDir()
if not os.path.isdir(self.data_dir):
os.makedirs(self.data_dir)
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
try:
from couchpotato.runner import runCouchPotato
runCouchPotato(self.options, base_path, args, data_dir = self.data_dir, log_dir = self.log_dir, Env = Env, desktop = self._desktop)
except:
pass
self._desktop.frame.Close()
self._desktop.ExitMainLoop()
class CouchPotatoApp(wx.App, SoftwareUpdate):
settings = {}
events = {}
restart = False
closing = False
triggered_onClose = False
def OnInit(self):
# Updater
base_url = 'https://api.couchpota.to/updates/%s'
self.InitUpdates(base_url % VERSION + '/', 'https://couchpota.to/updates/%s' % 'changelog.html',
icon = icon())
self.frame = MainFrame(self)
self.frame.Bind(wx.EVT_CLOSE, self.onClose)
# CouchPotato thread
self.worker = WorkerThread(self)
return True
def onAppLoad(self):
self.frame.tbicon.enable()
def setSettings(self, settings = {}):
self.settings = settings
def getSetting(self, name):
return self.settings.get(name)
def addEvents(self, events = {}):
for name in events.iterkeys():
self.events[name] = events[name]
def onClose(self, event):
if not self.closing:
self.closing = True
self.frame.tbicon.onTaskBarClose(event)
onClose = self.events.get('onClose')
if onClose and not self.triggered_onClose:
self.triggered_onClose = True
onClose(event)
def afterShutdown(self, restart = False):
self.frame.Destroy()
self.restart = restart
self.ExitMainLoop()
if __name__ == '__main__':
app = CouchPotatoApp(redirect = False)
app.MainLoop()
time.sleep(1)
if app.restart:
def appexe_from_executable(exepath):
appdir = appdir_from_executable(exepath)
exename = os.path.basename(exepath)
if sys.platform == "darwin":
if os.path.isdir(os.path.join(appdir, "Contents", "MacOS")):
return os.path.join(appdir, "Contents", "MacOS", exename)
return os.path.join(appdir, exename)
exe = appexe_from_executable(sys.executable)
os.chdir(os.path.dirname(exe))
os.execv(exe, [exe] + sys.argv[1:])

View File

@@ -40,6 +40,8 @@ class WebHandler(BaseHandler):
return
try:
if route == 'robots.txt':
self.set_header('Content-Type', 'text/plain')
self.write(views[route]())
except:
log.error("Failed doing web request '%s': %s", (route, traceback.format_exc()))
@@ -60,6 +62,13 @@ def index():
addView('', index)
# Web view
def robots():
return 'User-agent: * \n' \
'Disallow: /'
addView('robots.txt', robots)
# API docs
def apiDocs():
routes = list(api.keys())

View File

@@ -7,6 +7,7 @@ import urllib
from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, asynchronous
@@ -50,24 +51,22 @@ class NonBlockHandler(RequestHandler):
start, stop = api_nonblock[route]
self.stopper = stop
start(self.onNewMessage, last_id = self.get_argument('last_id', None))
start(self.sendData, last_id = self.get_argument('last_id', None))
def onNewMessage(self, response):
if self.request.connection.stream.closed():
self.on_connection_close()
return
def sendData(self, response):
if not self.request.connection.stream.closed():
try:
self.finish(response)
except:
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
try:
self.finish(response)
except:
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
def on_connection_close(self):
self.removeStopper()
def removeStopper(self):
if self.stopper:
self.stopper(self.onNewMessage)
self.stopper(self.sendData)
self.stopper = None
@@ -83,10 +82,11 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
# Blocking API handler
class ApiHandler(RequestHandler):
route = None
@asynchronous
def get(self, route, *args, **kwargs):
route = route.strip('/')
self.route = route = route.strip('/')
if not api.get(route):
self.write('API call doesn\'t seem to exist')
self.finish()
@@ -123,11 +123,15 @@ class ApiHandler(RequestHandler):
except:
log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
api_locks[route].release()
self.unlock()
post = get
def taskFinished(self, result, route):
IOLoop.current().add_callback(self.sendData, result, route)
self.unlock()
def sendData(self, result, route):
if not self.request.connection.stream.closed():
try:
@@ -135,14 +139,12 @@ class ApiHandler(RequestHandler):
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
self.finish()
self.set_header('Content-Type', 'text/javascript')
self.finish(str(jsonp_callback) + '(' + json.dumps(result) + ')')
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
self.finish()
self.finish(result)
except UnicodeDecodeError:
log.error('Failed proper encode: %s', traceback.format_exc())
except:
@@ -150,7 +152,9 @@ class ApiHandler(RequestHandler):
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
api_locks[route].release()
def unlock(self):
try: api_locks[self.route].release()
except: pass
def addApiView(route, func, static = False, docs = None, **kwargs):

View File

@@ -181,13 +181,13 @@ class Core(Plugin):
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def version(self):
ver = fireEvent('updater.info', single = True)
ver = fireEvent('updater.info', single = True) or {'version': {}}
if os.name == 'nt': platf = 'windows'
elif 'Darwin' in platform.platform(): platf = 'osx'
else: platf = 'linux'
return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash'])
return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown')
def versionView(self, **kwargs):
return {
@@ -290,7 +290,7 @@ config = [{
},
{
'name': 'permission_file',
'default': '0755',
'default': '0644',
'label': 'File CHMOD',
'description': 'See Folder CHMOD description, but for files',
},

View File

@@ -79,7 +79,9 @@ class Updater(Plugin):
try:
if self.conf('notification'):
info = self.updater.info()
version_date = datetime.fromtimestamp(info['update_version']['date'])
version_date = 'the future!'
if info['update_version']['date']:
version_date = datetime.fromtimestamp(info['update_version']['date'])
fireEvent('updater.updated', 'Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info)
except:
log.error('Failed notifying for update: %s', traceback.format_exc())
@@ -97,7 +99,9 @@ class Updater(Plugin):
if self.updater.check():
if not self.available_notified and self.conf('notification') and not self.conf('automatic'):
info = self.updater.info()
version_date = datetime.fromtimestamp(info['update_version']['date'])
version_date = 'the future!'
if info['update_version']['date']:
version_date = datetime.fromtimestamp(info['update_version']['date'])
fireEvent('updater.available', message = 'A new update with hash "%s" is available, this version is from %s' % (info['update_version']['hash'], version_date), data = info)
self.available_notified = True
return True
@@ -205,19 +209,28 @@ class GitUpdater(BaseUpdater):
def getVersion(self):
if not self.version:
hash = None
date = None
branch = self.branch
try:
output = self.repo.getHead() # Yes, please
log.debug('Git version output: %s', output.hash)
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.repo.getCurrentBranch().name or self.branch, output.hash[:8], datetime.fromtimestamp(output.getDate())),
'hash': output.hash[:8],
'date': output.getDate(),
'type': 'git',
'branch': self.repo.getCurrentBranch().name
}
hash = output.hash[:8]
date = output.getDate()
branch = self.repo.getCurrentBranch().name
except Exception as e:
log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
return 'No GIT'
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'),
'hash': hash,
'date': date,
'type': 'git',
'branch': branch
}
return self.version

View File

@@ -621,6 +621,8 @@ class Database(object):
except OperationalError:
log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc())
rename_old = True
except:
log.error('Migration failed: %s', traceback.format_exc())

View File

@@ -20,14 +20,31 @@ class Blackhole(DownloaderBase):
status_support = False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
# The folder needs to exist
if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('protocol'))
else:
try:
# Filedata can be empty, which probably means it a magnet link
if not filedata or len(filedata) < 50:
try:
if data.get('protocol') == 'torrent_magnet':
@@ -36,13 +53,16 @@ class Blackhole(DownloaderBase):
except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
# If it's still empty, don't know what to do!
if not filedata or len(filedata) < 50:
log.error('No nzb/torrent available: %s', data.get('url'))
return False
# Create filename with imdb id and other nice stuff
file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name)
# People want thinks nice and tidy, create a subdir
if self.conf('create_subdir'):
try:
new_path = os.path.splitext(full_path)[0]
@@ -53,6 +73,8 @@ class Blackhole(DownloaderBase):
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try:
# Make sure the file doesn't exist yet, no need in overwriting it
if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f:
@@ -74,6 +96,10 @@ class Blackhole(DownloaderBase):
return False
def test(self):
""" Test and see if the directory is writable
:return: boolean
"""
directory = self.conf('directory')
if directory and os.path.isdir(directory):
@@ -88,6 +114,10 @@ class Blackhole(DownloaderBase):
return False
def getEnabledProtocol(self):
""" What protocols is this downloaded used for
:return: list with protocols
"""
if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
@@ -96,6 +126,12 @@ class Blackhole(DownloaderBase):
return ['nzb']
def isEnabled(self, manual = False, data = None):
""" Check if protocol is used (and enabled)
:param manual: The user has clicked to download a link through the webUI
:param data: dict returned from provider
Contains the release information
:return: boolean
"""
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):

View File

@@ -25,8 +25,18 @@ class Deluge(DownloaderBase):
drpc = None
def connect(self, reconnect = False):
""" Connect to the delugeRPC, re-use connection when already available
:param reconnect: force reconnect
:return: DelugeRPC instance
"""
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
# Force host assignment
if len(host) == 1:
host.append(80)
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
@@ -37,6 +47,20 @@ class Deluge(DownloaderBase):
return self.drpc
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -91,11 +115,21 @@ class Deluge(DownloaderBase):
return self.downloadReturnId(remote_torrent)
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True) and self.drpc.test():
return True
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Deluge download status.')

View File

@@ -0,0 +1,427 @@
from base64 import b16encode, b32decode, b64encode
from distutils.version import LooseVersion
from hashlib import sha1
import httplib
import json
import os
import re
import urllib2
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from bencode import bencode as benc, bdecode
log = CPLog(__name__)
autoload = 'Hadouken'
class Hadouken(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
hadouken_api = None
def connect(self):
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.conf('api_key'):
log.error('Config properties are not filled in correctly, API key is missing.')
return False
self.hadouken_api = HadoukenAPI(host[0], port = host[1], api_key = self.conf('api_key'))
return True
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol')))
if not self.connect():
return False
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
torrent_filename = self.createFileName(data, filedata, media)
if data.get('protocol') == 'torrent_magnet':
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = self.torrent_trackers
torrent_params['name'] = torrent_filename
else:
info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to Hadouken
if data.get('protocol') == 'torrent_magnet':
self.hadouken_api.add_magnet_link(data.get('url'), torrent_params)
else:
self.hadouken_api.add_file(filedata, torrent_params)
return self.downloadReturnId(torrent_hash)
def test(self):
""" Tests the given host:port and API key """
if not self.connect():
return False
version = self.hadouken_api.get_version()
if not version:
log.error('Could not get Hadouken version.')
return False
# The minimum required version of Hadouken is 4.5.6.
if LooseVersion(version) >= LooseVersion('4.5.6'):
return True
log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version)
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Hadouken download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.hadouken_api.get_by_hash_list(ids)
if not queue:
return []
for torrent in queue:
if torrent is None:
continue
torrent_filelist = self.hadouken_api.get_files_by_hash(torrent['InfoHash'])
torrent_files = []
save_path = torrent['SavePath']
# The 'Path' key for each file_item contains
# the full path to the single file relative to the
# torrents save path.
# For a single file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "file1.iso"
# Resulting path: "C:\Downloads\file1.iso"
# For a multi file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "dirname/file1.iso"
# Resulting path: "C:\Downloads\dirname/file1.iso"
for file_item in torrent_filelist:
torrent_files.append(sp(os.path.join(save_path, file_item['Path'])))
release_downloads.append({
'id': torrent['InfoHash'].upper(),
'name': torrent['Name'],
'status': self.get_torrent_status(torrent),
'seed_ratio': self.get_seed_ratio(torrent),
'original_status': torrent['State'],
'timeleft': -1,
'folder': sp(save_path if len(torrent_files == 1) else os.path.join(save_path, torrent['Name'])),
'files': torrent_files
})
return release_downloads
def get_seed_ratio(self, torrent):
""" Returns the seed ratio for a given torrent.
Keyword arguments:
torrent -- The torrent to calculate seed ratio for.
"""
up = torrent['TotalUploadedBytes']
down = torrent['TotalDownloadedBytes']
if up > 0 and down > 0:
return up / down
return 0
def get_torrent_status(self, torrent):
""" Returns the CouchPotato status for a given torrent.
Keyword arguments:
torrent -- The torrent to translate status for.
"""
if torrent['IsSeeding'] and torrent['IsFinished'] and torrent['Paused']:
return 'completed'
if torrent['IsSeeding']:
return 'seeding'
return 'busy'
def pause(self, release_download, pause = True):
""" Pauses or resumes the torrent specified by the ID field
in release_download.
Keyword arguments:
release_download -- The CouchPotato release_download to pause/resume.
pause -- Boolean indicating whether to pause or resume.
"""
if not self.connect():
return False
return self.hadouken_api.pause(release_download['id'], pause)
def removeFailed(self, release_download):
""" Removes a failed torrent and also remove the data associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
"""
log.info('%s failed downloading, deleting...', release_download['name'])
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = True)
def processComplete(self, release_download, delete_files = False):
""" Removes the completed torrent from Hadouken and optionally removes the data
associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
delete_files: Boolean indicating whether to remove the associated data.
"""
log.debug('Requesting Hadouken to remove the torrent %s%s.',
(release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = delete_files)
class HadoukenAPI(object):
def __init__(self, host = 'localhost', port = 7890, api_key = None):
self.url = 'http://' + str(host) + ':' + str(port)
self.api_key = api_key
self.requestId = 0;
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent', 'couchpotato-hadouken-client/1.0'), ('Accept', 'application/json')]
if not api_key:
log.error('API key missing.')
def add_file(self, filedata, torrent_params):
""" Add a file to Hadouken with the specified parameters.
Keyword arguments:
filedata -- The binary torrent data.
torrent_params -- Additional parameters for the file.
"""
data = {
'method': 'torrents.addFile',
'params': [b64encode(filedata), torrent_params]
}
return self._request(data)
def add_magnet_link(self, magnetLink, torrent_params):
""" Add a magnet link to Hadouken with the specified parameters.
Keyword arguments:
magnetLink -- The magnet link to send.
torrent_params -- Additional parameters for the magnet link.
"""
data = {
'method': 'torrents.addUrl',
'params': [magnetLink, torrent_params]
}
return self._request(data)
def get_by_hash_list(self, infoHashList):
""" Gets a list of torrents filtered by the given info hash list.
Keyword arguments:
infoHashList -- A list of info hashes.
"""
data = {
'method': 'torrents.getByInfoHashList',
'params': [infoHashList]
}
return self._request(data)
def get_files_by_hash(self, infoHash):
""" Gets a list of files for the torrent identified by the
given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to return files for.
"""
data = {
'method': 'torrents.getFiles',
'params': [infoHash]
}
return self._request(data)
def get_version(self):
""" Gets the version, commitish and build date of Hadouken. """
data = {
'method': 'core.getVersion',
'params': None
}
result = self._request(data)
if not result:
return False
return result['Version']
def pause(self, infoHash, pause):
""" Pauses/unpauses the torrent identified by the given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to operate on.
pause -- If true, pauses the torrent. Otherwise resumes.
"""
data = {
'method': 'torrents.pause',
'params': [infoHash]
}
if not pause:
data['method'] = 'torrents.resume'
return self._request(data)
def remove(self, infoHash, remove_data = False):
""" Removes the torrent identified by the given info hash and
optionally removes the data as well.
Keyword arguments:
infoHash -- The info hash of the torrent to remove.
remove_data -- If true, removes the data associated with the torrent.
"""
data = {
'method': 'torrents.remove',
'params': [infoHash, remove_data]
}
return self._request(data)
def _request(self, data):
self.requestId += 1
data['jsonrpc'] = '2.0'
data['id'] = self.requestId
request = urllib2.Request(self.url + '/jsonrpc', data = json.dumps(data))
request.add_header('Authorization', 'Token ' + self.api_key)
request.add_header('Content-Type', 'application/json')
try:
f = self.opener.open(request)
response = f.read()
f.close()
obj = json.loads(response)
if not 'error' in obj.keys():
return obj['result']
log.error('JSONRPC error, %s: %s', obj['error']['code'], obj['error']['message'])
except httplib.InvalidURL as err:
log.error('Invalid Hadouken host, check your config %s', err)
except urllib2.HTTPError as err:
if err.code == 401:
log.error('Invalid Hadouken API key, check your config')
else:
log.error('Hadouken HTTPError: %s', err)
except urllib2.URLError as err:
log.error('Unable to connect to Hadouken %s', err)
return False
config = [{
'name': 'hadouken',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'hadouken',
'label': 'Hadouken',
'description': 'Use <a href="http://www.hdkn.net">Hadouken</a> (>= v4.5.6) to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent'
},
{
'name': 'host',
'default': 'localhost:7890'
},
{
'name': 'api_key',
'label': 'API key',
'type': 'password'
},
{
'name': 'label',
'description': 'Label to add torrent as.'
}
]
}
]
}]

View File

@@ -23,6 +23,20 @@ class NZBGet(DownloaderBase):
rpc = 'xmlrpc'
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -71,6 +85,10 @@ class NZBGet(DownloaderBase):
return False
def test(self):
""" Check if connection works
:return: bool
"""
rpc = self.getRPC()
try:
@@ -91,6 +109,13 @@ class NZBGet(DownloaderBase):
return True
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking NZBGet download status.')
@@ -163,12 +188,12 @@ class NZBGet(DownloaderBase):
nzb_id = nzb['NZBID']
if nzb_id in ids:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed',
'original_status': nzb['Status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})

View File

@@ -24,6 +24,20 @@ class NZBVortex(DownloaderBase):
session_id = None
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -45,6 +59,10 @@ class NZBVortex(DownloaderBase):
return False
def test(self):
""" Check if connection works
:return: bool
"""
try:
login_result = self.login()
except:
@@ -53,6 +71,13 @@ class NZBVortex(DownloaderBase):
return login_result
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
raw_statuses = self.call('nzb')

View File

@@ -19,6 +19,20 @@ class Pneumatic(DownloaderBase):
status_support = False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -63,6 +77,10 @@ class Pneumatic(DownloaderBase):
return False
def test(self):
""" Check if connection works
:return: bool
"""
directory = self.conf('directory')
if directory and os.path.isdir(directory):

View File

@@ -0,0 +1,68 @@
from .main import PutIO
def autoload():
return PutIO()
config = [{
'name': 'putio',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'putio',
'label': 'put.io',
'description': 'This will start a torrent download on <a href="http://put.io">Put.io</a>.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'oauth_token',
'label': 'oauth_token',
'description': 'This is the OAUTH_TOKEN from your putio API',
'advanced': True,
},
{
'name': 'folder',
'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'),
'default': 0,
},
{
'name': 'callback_host',
'description': 'External reachable url to CP so put.io can do it\'s thing',
},
{
'name': 'download',
'description': 'Set this to have CouchPotato download the file from Put.io',
'type': 'bool',
'default': 0,
},
{
'name': 'delete_file',
'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'),
'type': 'bool',
'default': 0,
},
{
'name': 'download_dir',
'type': 'directory',
'label': 'Download Directory',
'description': 'The Directory to download files to, does nothing if you don\'t select download',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]

View File

@@ -0,0 +1,181 @@
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEventAsync
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from pio import api as pio
import datetime
log = CPLog(__name__)
autoload = 'Putiodownload'
class PutIO(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
downloading_list = []
oauth_authenticate = 'https://api.couchpota.to/authorize/putio/'
def __init__(self):
addApiView('downloader.putio.getfrom', self.getFromPutio, docs = {
'desc': 'Allows you to download file from prom Put.io',
})
addApiView('downloader.putio.auth_url', self.getAuthorizationUrl)
addApiView('downloader.putio.credentials', self.getCredentials)
addEvent('putio.download', self.putioDownloader)
return super(PutIO, self).__init__()
# This is a recusive function to check for the folders
def recursionFolder(self, client, folder = 0, tfolder = ''):
files = client.File.list(folder)
for f in files:
if f.content_type == 'application/x-directory':
if f.name == tfolder:
return f.id
else:
result = self.recursionFolder(client, f.id, tfolder)
if result != 0:
return result
return 0
# This will check the root for the folder, and kick of recusively checking sub folder
def convertFolder(self, client, folder):
if folder == 0:
return 0
else:
return self.recursionFolder(client, 0, folder)
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to put.io', data.get('name'))
url = data.get('url')
client = pio.Client(self.conf('oauth_token'))
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('putioFolder ID is %s', putioFolder)
# It might be possible to call getFromPutio from the renamer if we can then we don't need to do this.
# Note callback_host is NOT our address, it's the internet host that putio can call too
callbackurl = None
if self.conf('download'):
callbackurl = 'http://' + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder)
log.debug('resp is %s', resp.id);
return self.downloadReturnId(resp.id)
def test(self):
try:
client = pio.Client(self.conf('oauth_token'))
if client.File.list():
return True
except:
log.info('Failed to get file listing, check OAUTH_TOKEN')
return False
def getAuthorizationUrl(self, host = None, **kwargs):
callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/'))
log.debug('callback_url is %s', callback_url)
target_url = self.oauth_authenticate + "?target=" + callback_url
log.debug('target_url is %s', target_url)
return {
'success': True,
'url': target_url,
}
def getCredentials(self, **kwargs):
try:
oauth_token = kwargs.get('oauth')
except:
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
log.debug('oauth_token is: %s', oauth_token)
self.conf('oauth_token', value = oauth_token);
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
def getAllDownloadStatus(self, ids):
log.debug('Checking putio download status.')
client = pio.Client(self.conf('oauth_token'))
transfers = client.Transfer.list()
log.debug(transfers);
release_downloads = ReleaseDownloadList(self)
for t in transfers:
if t.id in ids:
log.debug('downloading list is %s', self.downloading_list)
if t.status == "COMPLETED" and self.conf('download') == False :
status = 'completed'
# So check if we are trying to download something
elif t.status == "COMPLETED" and self.conf('download') == True:
# Assume we are done
status = 'completed'
if not self.downloading_list:
now = datetime.datetime.utcnow()
date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S")
# We need to make sure a race condition didn't happen
if (now - date_time) < datetime.timedelta(minutes=5):
# 5 minutes haven't passed so we wait
status = 'busy'
else:
# If we have the file_id in the downloading_list mark it as busy
if str(t.file_id) in self.downloading_list:
status = 'busy'
else:
status = 'busy'
release_downloads.append({
'id' : t.id,
'name': t.name,
'status': status,
'timeleft': t.estimated_time,
})
return release_downloads
def putioDownloader(self, fid):
log.info('Put.io Real downloader called with file_id: %s',fid)
client = pio.Client(self.conf('oauth_token'))
log.debug('About to get file List')
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('PutioFolderID is %s', putioFolder)
files = client.File.list(parent_id=putioFolder)
downloaddir = self.conf('download_dir')
for f in files:
if str(f.id) == str(fid):
client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file'))
# Once the download is complete we need to remove it from the running list.
self.downloading_list.remove(fid)
return True
def getFromPutio(self, **kwargs):
try:
file_id = str(kwargs.get('file_id'))
except:
return {
'success' : False,
}
log.info('Put.io Download has been called file_id is %s', file_id)
if file_id not in self.downloading_list:
self.downloading_list.append(file_id)
fireEventAsync('putio.download',fid = file_id)
return {
'success': True,
}
return {
'success': False,
}

View File

@@ -0,0 +1,68 @@
var PutIODownloader = new Class({
initialize: function(){
var self = this;
App.addEvent('loadSettings', self.addRegisterButton.bind(self));
},
addRegisterButton: function(){
var self = this;
var setting_page = App.getPage('Settings');
setting_page.addEvent('create', function(){
var fieldset = setting_page.tabs.downloaders.groups.putio,
l = window.location;
var putio_set = 0;
fieldset.getElements('input[type=text]').each(function(el){
putio_set += +(el.get('value') != '');
});
new Element('.ctrlHolder').adopt(
// Unregister button
(putio_set > 0) ?
[
self.unregister = new Element('a.button.red', {
'text': 'Unregister "'+fieldset.getElement('input[name*=oauth_token]').get('value')+'"',
'events': {
'click': function(){
fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change');
self.unregister.destroy();
self.unregister_or.destroy();
}
}
}),
self.unregister_or = new Element('span[text=or]')
]
: null,
// Register button
new Element('a.button', {
'text': putio_set > 0 ? 'Register a different account' : 'Register your put.io account',
'events': {
'click': function(){
Api.request('downloader.putio.auth_url', {
'data': {
'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '')
},
'onComplete': function(json){
window.location = json.url;
}
});
}
}
})
).inject(fieldset.getElement('.test_button'), 'before');
})
}
});
window.addEvent('domready', function(){
new PutIODownloader();
});

View File

@@ -41,12 +41,30 @@ class qBittorrent(DownloaderBase):
return self.qb
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect():
return True
return False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -95,6 +113,14 @@ class qBittorrent(DownloaderBase):
return 'busy'
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking qBittorrent download status.')
if not self.connect():

View File

@@ -84,6 +84,10 @@ class rTorrent(DownloaderBase):
return self.rt
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True):
return True
@@ -94,6 +98,20 @@ class rTorrent(DownloaderBase):
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -161,6 +179,14 @@ class rTorrent(DownloaderBase):
return 'completed'
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking rTorrent download status.')
if not self.connect():

View File

@@ -21,6 +21,21 @@ class Sabnzbd(DownloaderBase):
protocol = ['nzb']
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -69,6 +84,11 @@ class Sabnzbd(DownloaderBase):
return False
def test(self):
""" Check if connection works
Return message if an old version of SAB is used
:return: bool
"""
try:
sab_data = self.call({
'mode': 'version',
@@ -89,6 +109,13 @@ class Sabnzbd(DownloaderBase):
return True
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking SABnzbd download status.')

View File

@@ -19,6 +19,21 @@ class Synology(DownloaderBase):
status_support = False
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -50,6 +65,10 @@ class Synology(DownloaderBase):
return self.downloadReturnId('') if response else False
def test(self):
""" Check if connection works
:return: bool
"""
host = cleanHost(self.conf('host'), protocol = False).split(':')
try:
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
@@ -118,7 +137,7 @@ class SynologyRPC(object):
def _req(self, url, args, files = None):
response = {'success': False}
try:
req = requests.post(url, data = args, files = files)
req = requests.post(url, data = args, files = files, verify = False)
req.raise_for_status()
response = json.loads(req.text)
if response['success']:

View File

@@ -34,6 +34,21 @@ class Transmission(DownloaderBase):
return self.trpc
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -78,19 +93,32 @@ class Transmission(DownloaderBase):
log.error('Failed sending torrent to Transmission')
return False
data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate')
# Change settings of added torrents
if torrent_params:
self.trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
self.trpc.set_torrent(data['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
return self.downloadReturnId(data['hashString'])
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect() and self.trpc.get_session():
return True
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Transmission download status.')
@@ -119,6 +147,8 @@ class Transmission(DownloaderBase):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] == 16 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'

View File

@@ -51,6 +51,21 @@ class uTorrent(DownloaderBase):
return self.utorrent_api
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -120,6 +135,10 @@ class uTorrent(DownloaderBase):
return self.downloadReturnId(torrent_hash)
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect():
build_version = self.utorrent_api.get_build()
if not build_version:
@@ -131,6 +150,13 @@ class uTorrent(DownloaderBase):
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking uTorrent download status.')

View File

@@ -37,27 +37,19 @@ def toUnicode(original, *args):
except:
try:
detected = detect(original)
if detected.get('encoding') == 'utf-8':
return original.decode('utf-8')
try:
if detected.get('confidence') > 0.8:
return original.decode(detected.get('encoding'))
except:
pass
return ek(original, *args)
except:
raise
except:
log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc()))
ascii_text = str(original).encode('string_escape')
return toUnicode(ascii_text)
return 'ERROR DECODING STRING'
def toUTF8(original):
try:
if isinstance(original, str) and len(original) > 0:
# Try to detect
detected = detect(original)
return original.decode(detected.get('encoding')).encode('utf-8')
else:
return original
except:
#log.error('Failed encoding to UTF8: %s', traceback.format_exc())
raise
def ss(original, *args):
@@ -103,7 +95,7 @@ def ek(original, *args):
if isinstance(original, (str, unicode)):
try:
from couchpotato.environment import Env
return original.decode(Env.get('encoding'))
return original.decode(Env.get('encoding'), 'ignore')
except UnicodeDecodeError:
raise

View File

@@ -1,51 +0,0 @@
import os
from chardet import detect
from couchpotato import Env
fs_enc = Env.get('fs_encoding')
def list_dir(path, full_path = True):
"""
List directory don't error when it doesn't exist
"""
path = unicode_path(path)
if os.path.isdir(path):
for f in os.listdir(path):
if full_path:
yield join(path, f)
else:
yield f
def join(*args):
"""
Join path, encode properly before joining
"""
return os.path.join(*[safe(x) for x in args])
def unicode_path(path):
"""
Convert back to unicode
:param path: path string
"""
if isinstance(path, str):
detected = detect(path)
print detected
path = path.decode(detected.get('encoding'))
path = path.decode('unicode_escape')
return path
def safe(path):
if isinstance(path, unicode):
return path.encode('unicode_escape')
return path

View File

@@ -1,6 +1,5 @@
import logging
import re
import traceback
class CPLog(object):
@@ -55,19 +54,19 @@ class CPLog(object):
def safeMessage(self, msg, replace_tuple = ()):
from couchpotato.core.helpers.encoding import ss, toUTF8
from couchpotato.core.helpers.encoding import ss, toUnicode
msg = toUTF8(msg)
msg = ss(msg)
try:
if isinstance(replace_tuple, tuple):
msg = msg % tuple([toUTF8(x) for x in list(replace_tuple)])
msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)])
elif isinstance(replace_tuple, dict):
msg = msg % dict((k, toUTF8(v)) for k, v in replace_tuple.iteritems())
msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems())
else:
msg = msg % toUTF8(replace_tuple)
except:
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, traceback.format_exc()))
msg = msg % ss(replace_tuple)
except Exception as e:
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
self.setup()
if not self.is_develop:
@@ -84,4 +83,4 @@ class CPLog(object):
except:
pass
return toUTF8(msg)
return toUnicode(msg)

View File

@@ -1,9 +1,10 @@
import os
import traceback
from couchpotato import CPLog
from couchpotato import CPLog, md5
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.plugins.base import Plugin
import six
@@ -92,7 +93,15 @@ class MediaBase(Plugin):
if not isinstance(image, (str, unicode)):
continue
if file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
# Check if it has top image
filename = '%s.%s' % (md5(image), getExt(image))
existing = existing_files.get(file_type, [])
has_latest = False
for x in existing:
if filename in x:
has_latest = True
if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
existing_files[file_type] = [toUnicode(file_path)]

View File

@@ -456,6 +456,11 @@ class MediaPlugin(MediaBase):
deleted = True
elif new_media_status:
media['status'] = new_media_status
# Remove profile (no use for in manage)
if new_media_status == 'done':
media['profile_id'] = None
db.update(media)
fireEvent('media.untag', media['_id'], 'recent', single = True)
@@ -491,7 +496,7 @@ class MediaPlugin(MediaBase):
}
})
def restatus(self, media_id, tag_recent = True):
def restatus(self, media_id, tag_recent = True, allowed_restatus = None):
try:
db = get_db()
@@ -526,7 +531,7 @@ class MediaPlugin(MediaBase):
m['status'] = previous_status
# Only update when status has changed
if previous_status != m['status']:
if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus):
db.update(m)
# Tag media as recent

View File

@@ -94,6 +94,8 @@ class Provider(Plugin):
try:
data = XMLTree.fromstring(ss(data))
return self.getElements(data, item_path)
except XMLTree.ParseError:
log.error('Invalid XML returned, check "%s" manually for issues', url)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

View File

@@ -68,8 +68,12 @@ class Base(NZBProvider, RSS):
if not date:
date = self.getTextElement(nzb, 'pubDate')
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
name = self.getTextElement(nzb, 'title')
detail_url = self.getTextElement(nzb, 'guid')
nzb_id = detail_url.split('/')[-1:].pop()
if '://' not in detail_url:
detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)
if not name:
continue
@@ -103,7 +107,7 @@ class Base(NZBProvider, RSS):
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id),
'detail_url': detail_url,
'content': self.getTextElement(nzb, 'description'),
'description': description,
'score': host['extra_score'],
@@ -183,7 +187,7 @@ class Base(NZBProvider, RSS):
return 'try_next'
try:
data = self.urlopen(url, show_error = False)
data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
self.limits_reached[host] = False
return data
except HTTPError as e:

View File

@@ -1,13 +1,9 @@
from urlparse import urlparse, parse_qs
import time
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from dateutil.parser import parse
log = CPLog(__name__)
@@ -16,27 +12,19 @@ log = CPLog(__name__)
class Base(NZBProvider, RSS):
urls = {
'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s',
'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s',
'search': 'https://api.omgwtfnzbs.org/json/?%s',
}
http_time_between_calls = 1 # Seconds
cat_ids = [
([15], ['dvdrip']),
([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']),
([15, 16], ['brrip']),
([16], ['720p', '1080p', 'bd50']),
([17], ['dvdr']),
]
cat_backup_id = 'movie'
def search(self, movie, quality):
if quality['identifier'] in fireEvent('quality.pre_releases', single = True):
return []
return super(Base, self).search(movie, quality)
def _searchOnTitle(self, title, movie, quality, results):
q = '%s %s' % (title, movie['info']['year'])
@@ -47,22 +35,20 @@ class Base(NZBProvider, RSS):
'api': self.conf('api_key', default = ''),
})
nzbs = self.getRSSData(self.urls['search'] % params)
nzbs = self.getJsonData(self.urls['search'] % params)
for nzb in nzbs:
if isinstance(nzbs, list):
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]
results.append({
'id': nzb_id,
'name': toUnicode(self.getTextElement(nzb, 'title')),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': self.urls['detail_url'] % nzb_id,
'description': self.getTextElement(nzb, 'description')
})
results.append({
'id': nzb.get('nzbid'),
'name': toUnicode(nzb.get('release')),
'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
'url': nzb.get('getnzb'),
'detail_url': nzb.get('details'),
'description': nzb.get('weblink')
})
config = [{

View File

@@ -13,11 +13,11 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://www.bit-hdtv.com/',
'login': 'http://www.bit-hdtv.com/takelogin.php',
'login_check': 'http://www.bit-hdtv.com/messages.php',
'detail': 'http://www.bit-hdtv.com/details.php?id=%s',
'search': 'http://www.bit-hdtv.com/torrents.php?',
'test': 'https://www.bit-hdtv.com/',
'login': 'https://www.bit-hdtv.com/takelogin.php',
'login_check': 'https://www.bit-hdtv.com/messages.php',
'detail': 'https://www.bit-hdtv.com/details.php?id=%s',
'search': 'https://www.bit-hdtv.com/torrents.php?',
}
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
@@ -93,7 +93,7 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'BiT-HDTV',
'description': '<a href="http://bit-hdtv.com">BiT-HDTV</a>',
'description': '<a href="https://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
'options': [

View File

@@ -0,0 +1,130 @@
import re
import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://hdaccess.net/',
'detail': 'https://hdaccess.net/details.php?id=%s',
'search': 'https://hdaccess.net/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s',
'download': 'https://hdaccess.net/grab.php?torrent=%s&apikey=%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only')))
if data:
try:
#for result in data[]:
for key, result in data.iteritems():
if tryInt(result['total_results']) == 0:
return
torrentscore = self.conf('extra_score')
releasegroup = result['releasegroup']
resolution = result['resolution']
encoding = result['encoding']
freeleech = tryInt(result['freeleech'])
seeders = tryInt(result['seeders'])
torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)
if freeleech > 0 and self.conf('prefer_internal'):
torrent_desc += '/ Internal'
torrentscore += 200
if seeders == 0:
torrentscore = 0
name = result['release_name']
year = tryInt(result['year'])
results.append({
'id': tryInt(result['torrentid']),
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
'url': self.urls['download'] % (result['torrentid'], self.conf('apikey')),
'detail_url': self.urls['detail'] % result['torrentid'],
'size': tryInt(result['size']),
'seeders': tryInt(result['seeders']),
'leechers': tryInt(result['leechers']),
'age': tryInt(result['age']),
'score': torrentscore
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
config = [{
'name': 'hdaccess',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HDAccess',
'wizard': True,
'description': '<a href="https://hdaccess.net">HDAccess</a>',
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAADuUlEQVQ4yz3T209bdQAH8O/vnNNzWno5FIpAKZdSLi23gWMDtumWuSXOyzJj9M1kyIOPS1xiYuKe9GUPezZZnGIiMTqTxS1bdIuYkG2MWKBAKYVszOgKFkrbA+259HfO+fli/PwPHzI+Pg5CCEAI2VcUlEsl1tHdU7P5bGOkWChEaaUCwvHpmkD93POn6bwgCMQGAMYYYwyCruuQnE7SPzjIstvb8l+bm5fXkokJSmlQEkUQAIpSRH5vd0tyum7I/sA1Z5VH2ctmiGWZjHw4McE1NAZtQ9fD25kXt1VN7es7dNjuGRjiJFeVpWo6slsZPhF/Ys/PPeIs2056ff7zIOS5rpU5/viJEwwEnu3Mi18dojjw0aWP6amz57h9RSE/35zinq2nuGjvIQwOj7K2SKeZWkk0auXSSZ+/ZopSy+CbW1pQKpWu6Jr2/qVPPqWRjm6HWi6Tm999g3RyGbndLCqGgVBrO3F7fHykK0YX47NNtGLYlBq/c+H2iD+3k704dHQUDcFmQVXLyP6zhfTqCl45fQYjx17FemoJunoAk1bQFGoVhkdPwNC0ix2dMT+3llodM02rKdo7gN3dHAEhuH/vNgDg3Pl3cPaNt2GZJpYX5lBbFwClBukfGobL5WrayW6NccVCISY4HIQxYts2Q3J5CXOPHuLlo6NoCoXQ2hbG0JFRpJYWcVDIQ5ZlyL5qW5b9hNlWjKsYBgzDgKppMCoGHty7A0orOHbyNNweL+obGnDm9TdhWSYS8Vn4a2shOZ0QJRGSKIHjeGGtWNhjqqpyG+k04k8eozPai9ZwByavf4kfpyZxZGwMfYOHsbwQx34hB5dL4syKweRq/xpXHwzNapqWSSYWMDszzYqFPEaOn4KiKJiZfoCZ6d8Am+GtC++iXCpjaf4P9vefT8HzfKarp3eWRKMxCILwuWXSz977YIK2RTodDoGH1+OG1+tDlbsKkuiAJEngeWBjNUUnv7rucIiOLyzTvMKJTgnVtbVXLctK3L31g+NAUajL5bEptaDpOnTdgGkzVHl9drms0ju3fnJIkphoaQtfbQiFwAcCAY5wnCE5Xff3i8XX4o9nGksH+8zl9hAGZlWMCivkc9z0L3fZ999+LTCGZKi55YJTFHfye3sc6e/vB88LpK6+iWlqSS4WcpcNXZtwOp3B6mo/REmCSSkEgd+qq3vpRkt75Fp9Y1BZWZwnhq4zEovF/u/MATAti4U7umvyu9kR27aikihC9vvTnV2xufVUMu/2uIksy/9tZvgX49fLmAMx3bsAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
'description': 'Enter your site username.',
},
{
'name': 'apikey',
'default': '',
'label': 'API Key',
'description': 'Enter your site api key. This can be find on <a href="https://hdaccess.net/usercp.php?action=security">Profile Security</a>',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 0,
'description': 'Will not be (re)moved until this seed ratio is met. HDAccess minimum is 1:1.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 0,
'description': 'Will not be (re)moved until this seed time (in hours) is met. HDAccess minimum is 48 hours.',
},
{
'name': 'prefer_internal',
'advanced': True,
'type': 'bool',
'default': 1,
'description': 'Favors internal releases over non-internal releases.',
},
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDAccess internal',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]

View File

@@ -29,6 +29,9 @@ class Base(TorrentProvider):
}
post_data.update(params)
if self.conf('internal_only'):
post_data.update({'origin': [1]})
try:
result = self.getJsonData(self.urls['api'], data = json.dumps(post_data))
@@ -110,6 +113,14 @@ config = [{
'default': 0,
'description': 'Starting score for each release found via this provider.',
},
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDBits internal'
}
],
},
],

View File

@@ -14,11 +14,11 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://www.iptorrents.com/',
'base_url': 'https://www.iptorrents.com',
'login': 'https://www.iptorrents.com/torrents/',
'login_check': 'https://www.iptorrents.com/inbox.php',
'search': 'https://www.iptorrents.com/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
'test': 'https://iptorrents.eu/',
'base_url': 'https://iptorrents.eu',
'login': 'https://iptorrents.eu/torrents/',
'login_check': 'https://iptorrents.eu/inbox.php',
'search': 'https://iptorrents.eu/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
}
http_time_between_calls = 1 # Seconds
@@ -120,7 +120,7 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'IPTorrents',
'description': '<a href="http://www.iptorrents.com">IPTorrents</a>',
'description': '<a href="https://iptorrents.eu">IPTorrents</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
'options': [

View File

@@ -42,6 +42,7 @@ class Base(TorrentProvider):
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '')
@@ -51,7 +52,7 @@ class Base(TorrentProvider):
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
'seeders': tryInt(result.find('td', attrs = {'class': 'ttr_seeders'}).find('a').string),
'seeders': tryInt(seeders.string) if seeders else 0,
'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo,
})

View File

@@ -1,7 +1,7 @@
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -56,11 +56,12 @@ class Base(TorrentProvider):
full_id = link['href'].replace('details.php?id=', '')
torrent_id = full_id[:6]
name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip()
results.append({
'id': torrent_id,
'name': link.contents[0],
'url': self.urls['download'] % (torrent_id, link.contents[0]),
'name': name,
'url': self.urls['download'] % (torrent_id, name),
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
'seeders': tryInt(cells[8].find('span').contents[0]),

View File

@@ -1,3 +1,4 @@
import re
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -8,12 +9,12 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://www.td.af/',
'login': 'http://www.td.af/torrents/',
'login_check': 'http://www.torrentday.com/userdetails.php',
'detail': 'http://www.td.af/details.php?id=%s',
'search': 'http://www.td.af/V3/API/API.php',
'download': 'http://www.td.af/download.php/%s/%s',
'test': 'https://torrentday.eu/',
'login': 'https://torrentday.eu/torrents/',
'login_check': 'https://torrentday.eu/userdetails.php',
'detail': 'https://torrentday.eu/details.php?id=%s',
'search': 'https://torrentday.eu/V3/API/API.php',
'download': 'https://torrentday.eu/download.php/%s/%s',
}
http_time_between_calls = 1 # Seconds
@@ -55,6 +56,10 @@ class Base(TorrentProvider):
}
def loginSuccess(self, output):
often = re.search('You tried too often, please wait .*</div>', output)
if often:
raise Exception(often.group(0)[:-6].strip())
return 'Password not correct' not in output
def loginCheckSuccess(self, output):
@@ -68,7 +73,7 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentDay',
'description': '<a href="http://www.td.af/">TorrentDay</a>',
'description': '<a href="https://torrentday.eu/">TorrentDay</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
'options': [

View File

@@ -17,7 +17,7 @@ class Base(TorrentProvider):
'login': 'https://www.torrentleech.org/user/account/login/',
'login_check': 'https://torrentleech.org/user/messages',
'detail': 'https://www.torrentleech.org/torrent/%s',
'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d',
'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%s',
'download': 'https://www.torrentleech.org%s',
}

View File

@@ -13,12 +13,12 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://torrentshack.eu/',
'login': 'http://torrentshack.eu/login.php',
'login_check': 'http://torrentshack.eu/inbox.php',
'detail': 'http://torrentshack.eu/torrent/%s',
'search': 'http://torrentshack.eu/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download': 'http://torrentshack.eu/%s',
'test': 'https://theshack.us.to/',
'login': 'https://theshack.us.to/login.php',
'login_check': 'https://theshack.us.to/inbox.php',
'detail': 'https://theshack.us.to/torrent/%s',
'search': 'https://theshack.us.to/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download': 'https://theshack.us.to/%s',
}
http_time_between_calls = 1 # Seconds
@@ -42,6 +42,7 @@ class Base(TorrentProvider):
link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')
size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ')
tds = result.find_all('td')
results.append({
@@ -49,7 +50,7 @@ class Base(TorrentProvider):
'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(result.find_all('td')[5].string),
'size': self.parseSize(size),
'seeders': tryInt(tds[len(tds)-2].string),
'leechers': tryInt(tds[len(tds)-1].string),
})

View File

@@ -22,12 +22,12 @@ class Base(TorrentMagnetProvider, RSS):
http_time_between_calls = 0
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']
# Create search parameters
search_params = self.buildUrl(media)
search_params = self.buildUrl(title, media, quality)
smin = quality.get('size_min')
smax = quality.get('size_max')

View File

@@ -2,28 +2,25 @@ import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentMagnetProvider):
class Base(TorrentProvider):
urls = {
'test': '%s/api',
'search': '%s/api/list.json?keywords=%s&quality=%s',
'detail': '%s/api/movie.json?id=%s'
'test': '%s/api/v2',
'search': '%s/api/v2/list_movies.json?limit=50&query_term=%s'
}
http_time_between_calls = 1 # seconds
proxy_list = [
'http://yify.unlocktorrent.com',
'http://yify-torrents.com.come.in',
'http://yts.re',
'http://yts.im'
'http://yify-torrents.im',
'https://yts.re',
'https://yts.wf',
'https://yts.im',
]
def search(self, movie, quality):
@@ -39,28 +36,31 @@ class Base(TorrentMagnetProvider):
if not domain:
return
search_url = self.urls['search'] % (domain, getIdentifier(movie), quality['identifier'])
search_url = self.urls['search'] % (domain, getIdentifier(movie))
data = self.getJsonData(search_url)
data = data.get('data')
if data and data.get('MovieList'):
if isinstance(data, dict) and data.get('movies'):
try:
for result in data.get('MovieList'):
for result in data.get('movies'):
if result['Quality'] and result['Quality'] not in result['MovieTitle']:
title = result['MovieTitle'] + ' BrRip ' + result['Quality']
else:
title = result['MovieTitle'] + ' BrRip'
for release in result.get('torrents', []):
results.append({
'id': result['MovieID'],
'name': title,
'url': result['TorrentMagnetUrl'],
'detail_url': self.urls['detail'] % (domain, result['MovieID']),
'size': self.parseSize(result['Size']),
'seeders': tryInt(result['TorrentSeeds']),
'leechers': tryInt(result['TorrentPeers']),
})
if release['quality'] and release['quality'] not in result['title_long']:
title = result['title_long'] + ' BRRip ' + release['quality']
else:
title = result['title_long'] + ' BRRip'
results.append({
'id': release['hash'],
'name': title,
'url': release['url'],
'detail_url': result['url'],
'size': self.parseSize(release['size']),
'seeders': tryInt(release['seeds']),
'leechers': tryInt(release['peers']),
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))

View File

@@ -65,7 +65,7 @@ class MovieBase(MovieTypeBase):
return False
elif not params.get('info'):
try:
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True)
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True)
if not is_movie:
msg = 'Can\'t add movie, seems to be a TV show.'
log.error(msg)

View File

@@ -696,7 +696,7 @@ MA.Readd = new Class({
if(movie_done || snatched && snatched > 0)
self.el = new Element('a.readd', {
'title': 'Readd the movie and mark all previous snatched/downloaded as ignored',
'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored',
'events': {
'click': self.doReadd.bind(self)
}

View File

@@ -264,3 +264,11 @@
height: 40px;
}
@media all and (max-width: 480px) {
.toggle_menu h2 {
font-size: 16px;
text-align: center;
height: 30px;
}
}

View File

@@ -44,11 +44,12 @@ var Charts = new Class({
if( Cookie.read('suggestions_charts_menu_selected') === 'charts'){
self.show();
self.fireEvent.delay(0, self, 'created');
}
else
self.el.hide();
self.fireEvent.delay(0, self, 'created');
},
fill: function(json){

View File

@@ -0,0 +1,89 @@
import re
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'CrowdAI'
class CrowdAI(Automation, RSS):
interval = 1800
def getIMDBids(self):
movies = []
urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
for url in urls:
if not urls[url]:
continue
rss_movies = self.getRSSData(url)
for movie in rss_movies:
description = self.getTextElement(movie, 'description')
grabs = 0
for item in movie:
if item.attrib.get('name') == 'grabs':
grabs = item.attrib.get('value')
break
if int(grabs) > tryInt(self.conf('number_grabs')):
title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
year = re.match(r'.*Year: (\d{4}).*', description).group(1)
imdb = self.search(title, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'crowdai',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'crowdai_automation',
'label': 'CrowdAI',
'description': 'Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie. Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
'default': '1',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
},
{
'name': 'number_grabs',
'default': '500',
'label': 'Grab threshold',
'description': 'Number of grabs required',
},
],
},
],
}]

View File

@@ -48,11 +48,12 @@ class Letterboxd(Automation):
soup = BeautifulSoup(self.getHTMLData(self.url % username))
for movie in soup.find_all('a', attrs = {'class': 'frame'}):
match = removeEmpty(self.pattern.split(movie['title']))
for movie in soup.find_all('li', attrs = {'class': 'poster-container'}):
img = movie.find('img', movie)
title = img.get('alt')
movies.append({
'title': match[0],
'year': match[1]
'title': title
})
return movies

View File

@@ -39,15 +39,14 @@ class Rottentomatoes(Automation, RSS):
if result:
log.info2('Something smells...')
rating = tryInt(self.getTextElement(movie, rating_tag))
name = result.group(0)
print rating, tryInt(self.conf('tomatometer_percent'))
if rating < tryInt(self.conf('tomatometer_percent')):
log.info2('%s seems to be rotten...', name)
else:
log.info2('Found %s fresh enough movies, enqueuing: %s', (rating, name))
log.info2('Found %s with fresh rating %s', (name, rating))
year = datetime.datetime.now().strftime("%Y")
imdb = self.search(name, year)

View File

@@ -69,12 +69,15 @@ class CouchPotatoApi(MovieProvider):
name_enc = base64.b64encode(ss(name))
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None):
def isMovie(self, identifier = None, adding = False):
if not identifier:
return
data = self.getJsonData(self.urls['is_movie'] % identifier, headers = self.getRequestHeaders())
url = self.urls['is_movie'] % identifier
url += '?adding=1' if adding else ''
data = self.getJsonData(url, headers = self.getRequestHeaders())
if data:
return data.get('is_movie', True)

View File

@@ -4,6 +4,7 @@ from couchpotato import tryInt
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
from requests import HTTPError
log = CPLog(__name__)
@@ -32,12 +33,14 @@ class FanartTV(MovieProvider):
try:
url = self.urls['api'] % identifier
fanart_data = self.getJsonData(url)
fanart_data = self.getJsonData(url, show_error = False)
if fanart_data:
log.debug('Found images for %s', fanart_data.get('name'))
images = self._parseMovie(fanart_data)
except HTTPError as e:
log.debug('Failed getting extra art for %s: %s',
(identifier, e))
except:
log.error('Failed getting extra art for %s: %s',
(identifier, traceback.format_exc()))

View File

@@ -2,6 +2,7 @@ import json
import re
import traceback
from couchpotato import Env
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString
@@ -17,8 +18,8 @@ autoload = 'OMDBAPI'
class OMDBAPI(MovieProvider):
urls = {
'search': 'http://www.omdbapi.com/?%s',
'info': 'http://www.omdbapi.com/?i=%s',
'search': 'http://www.omdbapi.com/?type=movie&%s',
'info': 'http://www.omdbapi.com/?type=movie&i=%s',
}
http_time_between_calls = 0
@@ -38,7 +39,8 @@ class OMDBAPI(MovieProvider):
}
cache_key = 'omdbapi.cache.%s' % q
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}), timeout = 3)
url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
result = self.parseMovie(cached)
@@ -56,7 +58,7 @@ class OMDBAPI(MovieProvider):
return {}
cache_key = 'omdbapi.cache.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3)
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
result = self.parseMovie(cached)

View File

@@ -1,11 +1,10 @@
import traceback
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
import tmdb3
log = CPLog(__name__)
@@ -13,54 +12,66 @@ autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider):
MAX_EXTRATHUMBS = 4
http_time_between_calls = .35
configuration = {
'images': {
'secure_base_url': 'https://image.tmdb.org/t/p/',
},
}
def __init__(self):
addEvent('info.search', self.search, priority = 3)
addEvent('movie.search', self.search, priority = 3)
addEvent('movie.info', self.getInfo, priority = 3)
addEvent('movie.info_by_tmdb', self.getInfo)
addEvent('app.load', self.config)
# Configure TMDB settings
tmdb3.set_key(self.conf('api_key'))
tmdb3.set_cache('null')
def config(self):
configuration = self.request('configuration')
if configuration:
self.configuration = configuration
def search(self, q, limit = 12):
def search(self, q, limit = 3):
""" Find movie by name """
if self.isDisabled():
return False
search_string = simplifyString(q)
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
log.debug('Searching for movie: %s', q)
if not results:
log.debug('Searching for movie: %s', q)
raw = None
try:
name_year = fireEvent('scanner.name_year', q, single = True)
raw = self.request('search/movie', {
'query': name_year.get('name', q),
'year': name_year.get('year'),
'search_type': 'ngram' if limit > 1 else 'phrase'
}, return_key = 'results')
except:
log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc()))
raw = None
results = []
if raw:
try:
raw = tmdb3.searchMovie(search_string)
except:
log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc()))
nr = 0
results = []
if raw:
try:
nr = 0
for movie in raw:
parsed_movie = self.parseMovie(movie, extended = False)
if parsed_movie:
results.append(parsed_movie)
for movie in raw:
results.append(self.parseMovie(movie, extended = False))
nr += 1
if nr == limit:
break
nr += 1
if nr == limit:
break
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
self.setCache(cache_key, results)
return results
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e)
return False
return results
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e)
return False
return results
@@ -69,101 +80,91 @@ class TheMovieDb(MovieProvider):
if not identifier:
return {}
cache_key = 'tmdb.cache.%s%s' % (identifier, '.ex' if extended else '')
result = self.getCache(cache_key)
result = self.parseMovie({
'id': identifier
}, extended = extended)
if not result:
try:
log.debug('Getting info: %s', cache_key)
# noinspection PyArgumentList
movie = tmdb3.Movie(identifier)
try: exists = movie.title is not None
except: exists = False
if exists:
result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result)
else:
result = {}
except:
log.error('Failed getting info for %s: %s', (identifier, traceback.format_exc()))
return result
return result or {}
def parseMovie(self, movie, extended = True):
cache_key = 'tmdb.cache.%s%s' % (movie.id, '.ex' if extended else '')
movie_data = self.getCache(cache_key)
# Do request, append other items
movie = self.request('movie/%s' % movie.get('id'), {
'append_to_response': 'alternative_titles' + (',images,casts' if extended else '')
})
if not movie:
return
if not movie_data:
# Images
poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else []
# Images
poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original', n = self.MAX_EXTRATHUMBS, skipfirst = True)
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {},
'extra_thumbs': extra_thumbs
}
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {},
'extra_thumbs': extra_thumbs
}
# Genres
try:
genres = [genre.get('name') for genre in movie.get('genres', [])]
except:
genres = []
# Genres
try:
genres = [genre.name for genre in movie.genres]
except:
genres = []
# 1900 is the same as None
year = str(movie.get('release_date') or '')[:4]
if not movie.get('release_date') or year == '1900' or year.lower() == 'none':
year = None
# 1900 is the same as None
year = str(movie.releasedate or '')[:4]
if not movie.releasedate or year == '1900' or year.lower() == 'none':
year = None
# Gather actors data
actors = {}
if extended:
# Gather actors data
actors = {}
if extended:
for cast_item in movie.cast:
try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
# Full data
cast = movie.get('casts', {}).get('cast', [])
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.id,
'titles': [toUnicode(movie.title)],
'original_title': movie.originaltitle,
'images': images,
'imdb': movie.imdb,
'runtime': movie.runtime,
'released': str(movie.releasedate),
'year': tryInt(year, None),
'plot': movie.overview,
'genres': genres,
'collection': getattr(movie.collection, 'name', None),
'actor_roles': actors
}
for cast_item in cast:
try:
actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character'))
images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
movie_data = dict((k, v) for k, v in movie_data.items() if v)
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.get('id'),
'titles': [toUnicode(movie.get('title'))],
'original_title': movie.get('original_title'),
'images': images,
'imdb': movie.get('imdb_id'),
'runtime': movie.get('runtime'),
'released': str(movie.get('release_date')),
'year': tryInt(year, None),
'plot': movie.get('overview'),
'genres': genres,
'collection': getattr(movie.get('belongs_to_collection'), 'name', None),
'actor_roles': actors
}
# Add alternative names
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']:
movie_data['titles'].append(movie_data['original_title'])
movie_data = dict((k, v) for k, v in movie_data.items() if v)
if extended:
for alt in movie.alternate_titles:
alt_name = alt.title
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
# Add alternative names
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']:
movie_data['titles'].append(movie_data['original_title'])
# Cache movie parsed
self.setCache(cache_key, movie_data)
# Add alternative titles
alternate_titles = movie.get('alternative_titles', {}).get('titles', [])
for alt in alternate_titles:
alt_name = alt.get('title')
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
return movie_data
@@ -171,36 +172,41 @@ class TheMovieDb(MovieProvider):
image_url = ''
try:
image_url = getattr(movie, type).geturl(size = size)
path = movie.get('%s_path' % type)
image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path)
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_url
def getMultImages(self, movie, type = 'backdrops', size = 'original', n = -1, skipfirst = False):
"""
If n < 0, return all images. Otherwise return n images.
If n > len(getattr(movie, type)), then return all images.
If skipfirst is True, then it will skip getattr(movie, type)[0]. This
is because backdrops[0] is typically backdrop.
"""
def getMultImages(self, movie, type = 'backdrops', size = 'original'):
image_urls = []
try:
images = getattr(movie, type)
if n < 0 or n > len(images):
num_images = len(images)
else:
num_images = n
for i in range(int(skipfirst), num_images + int(skipfirst)):
image_urls.append(images[i].geturl(size = size))
for image in movie.get('images', {}).get(type, [])[1:5]:
image_urls.append(self.getImage(image, 'file', size))
except:
log.debug('Failed getting %i %s.%s for "%s"', (n, type, size, ss(str(movie))))
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_urls
def request(self, call = '', params = {}, return_key = None):
params = dict((k, v) for k, v in params.items() if v)
params = tryUrlencode(params)
try:
url = 'http://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.conf('api_key'), '&%s' % params if params else '')
data = self.getJsonData(url, show_error = False)
except:
log.debug('Movie not found: %s, %s', (call, params))
data = None
if data and return_key and return_key in data:
data = data.get(return_key)
return data
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')

View File

@@ -11,7 +11,7 @@ autoload = 'Bitsoup'
class Bitsoup(MovieProvider, Base):
cat_ids = [
([17], ['3d']),
([41], ['720p', '1080p']),
([80], ['720p', '1080p']),
([20], ['dvdr']),
([19], ['brrip', 'dvdrip']),
]

View File

@@ -0,0 +1,11 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.hdaccess import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'HDAccess'
class HDAccess(MovieProvider, Base):
pass

View File

@@ -13,7 +13,7 @@ class IPTorrents(MovieProvider, Base):
([87], ['3d']),
([48], ['720p', '1080p', 'bd50']),
([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
([7,48], ['dvdrip', 'brrip']),
([7, 48, 20], ['dvdrip', 'brrip']),
([6], ['dvdr']),
]

View File

@@ -16,12 +16,12 @@ class TorrentLeech(MovieProvider, Base):
([9], ['ts', 'tc']),
([10], ['r5', 'scr']),
([11], ['dvdrip']),
([14], ['brrip']),
([13, 14], ['brrip']),
([12], ['dvdr']),
]
def buildUrl(self, title, media, quality):
return (
tryUrlencode(title.replace(':', '')),
self.getCatId(quality)[0]
','.join([str(x) for x in self.getCatId(quality)])
)

View File

@@ -22,8 +22,8 @@ class TorrentShack(MovieProvider, Base):
# Movies-SD Pack - 983 (not included)
cat_ids = [
([970], ['bd50']),
([300], ['720p', '1080p']),
([970, 320], ['bd50']),
([300, 320], ['720p', '1080p']),
([350], ['dvdr']),
([400], ['brrip', 'dvdrip']),
]

View File

@@ -1,6 +1,5 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.torrent.torrentz import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
@@ -11,5 +10,5 @@ autoload = 'Torrentz'
class Torrentz(MovieProvider, Base):
def buildUrl(self, media):
return tryUrlencode('"%s"' % fireEvent('library.query', media, single = True))
def buildUrl(self, title, media, quality):
return tryUrlencode('"%s %s"' % (title, media['info']['year']))

View File

@@ -12,7 +12,7 @@ autoload = 'RottenTomatoes'
class RottenTomatoes(UserscriptBase):
includes = ['*://www.rottentomatoes.com/m/*/']
includes = ['*://www.rottentomatoes.com/m/*']
excludes = ['*://www.rottentomatoes.com/m/*/*/']
version = 2

View File

@@ -166,7 +166,8 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
'quality': q_identifier,
'finish': profile['finish'][index],
'wait_for': tryInt(profile['wait_for'][index]),
'3d': profile['3d'][index] if profile.get('3d') else False
'3d': profile['3d'][index] if profile.get('3d') else False,
'minimum_score': profile.get('minimum_score', 1),
}
could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year'])
@@ -202,13 +203,6 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
quality['custom'] = quality_custom
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
results_count = len(results)
total_result_count += results_count
if results_count == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
# Keep track of releases found outside ETA window
outside_eta_results += results_count if could_not_be_released else 0
# Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True):
@@ -216,11 +210,17 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
results_count = len(found_releases)
total_result_count += results_count
if results_count == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
# Keep track of releases found outside ETA window
outside_eta_results += results_count if could_not_be_released else 0
# Don't trigger download, but notify user of available releases
if could_not_be_released:
if results_count > 0:
log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title))
if could_not_be_released and results_count > 0:
log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title))
# Try find a valid result and download it
if (force_download or not could_not_be_released or always_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True):
@@ -394,8 +394,9 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.info('Trying next release for: %s', getTitle(media))
self.single(media, manual = manual, force_download = force_download)
return True
return True
return False
except:
log.error('Failed searching for next release: %s', traceback.format_exc())
return False

View File

@@ -51,8 +51,8 @@ var SuggestList = new Class({
self.show();
else
self.hide();
self.fireEvent('created');
self.fireEvent.delay(0, self, 'created');
},

View File

@@ -14,6 +14,7 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from .index import NotificationIndex, NotificationUnreadIndex
from couchpotato.environment import Env
from tornado.ioloop import IOLoop
log = CPLog(__name__)
@@ -110,11 +111,11 @@ class CoreNotifier(Notification):
if limit_offset:
splt = splitString(limit_offset)
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
results = db.get_many('notification', limit = limit, offset = offset, with_doc = True)
limit = tryInt(splt[0])
offset = tryInt(0 if len(splt) is 1 else splt[1])
results = db.all('notification', limit = limit, offset = offset, with_doc = True)
else:
results = db.get_many('notification', limit = 200, with_doc = True)
results = db.all('notification', limit = 200, with_doc = True)
notifications = []
for n in results:
@@ -148,16 +149,15 @@ class CoreNotifier(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
n = {
'_t': 'notification',
'time': int(time.time()),
}
try:
db = get_db()
data['notification_type'] = listener if listener else 'unknown'
n = {
'_t': 'notification',
'time': int(time.time()),
'message': toUnicode(message)
}
n['message'] = toUnicode(message)
if data.get('sticky'):
n['sticky'] = True
@@ -170,7 +170,7 @@ class CoreNotifier(Notification):
return True
except:
log.error('Failed notify: %s', traceback.format_exc())
log.error('Failed notify "%s": %s', (n, traceback.format_exc()))
def frontend(self, type = 'notification', data = None, message = None):
if not data: data = {}
@@ -190,7 +190,7 @@ class CoreNotifier(Notification):
while len(self.listeners) > 0 and not self.shuttingDown():
try:
listener, last_id = self.listeners.pop()
listener({
IOLoop.current().add_callback(listener, {
'success': True,
'result': [notification],
})

View File

@@ -1,68 +0,0 @@
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from pynmwp import PyNMWP
import six
log = CPLog(__name__)
autoload = 'NotifyMyWP'
class NotifyMyWP(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
keys = splitString(self.conf('api_key'))
p = PyNMWP(keys, self.conf('dev_key'))
response = p.push(application = self.default_title, event = message, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1)
for key in keys:
if not response[key]['Code'] == six.u('200'):
log.error('Could not send notification to NotifyMyWindowsPhone (%s). %s', (key, response[key]['message']))
return False
return response
config = [{
'name': 'notifymywp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifymywp',
'label': 'Windows Phone',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'api_key',
'description': 'Multiple keys seperated by a comma. Maximum of 5.'
},
{
'name': 'dev_key',
'advanced': True,
},
{
'name': 'priority',
'default': 0,
'type': 'dropdown',
'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)],
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

View File

@@ -23,6 +23,26 @@ config = [{
'default': 'localhost',
'description': 'Hostname/IP, default localhost'
},
{
'name': 'username',
'label': 'Username',
'default': '',
'description': 'Required for myPlex'
},
{
'name': 'password',
'label': 'Password',
'default': '',
'type': 'password',
'description': 'Required for myPlex'
},
{
'name': 'auth_token',
'label': 'Auth Token',
'default': '',
'advanced': True,
'description': 'Required for myPlex'
},
{
'name': 'clients',
'default': '',

View File

@@ -35,11 +35,46 @@ class PlexServer(object):
if path.startswith('/'):
path = path[1:]
data = self.plex.urlopen('%s/%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path
))
#Maintain support for older Plex installations without myPlex
if not self.plex.conf('auth_token') and not self.plex.conf('username') and not self.plex.conf('password'):
data = self.plex.urlopen('%s/%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path
))
else:
#Fetch X-Plex-Token if it doesn't exist but a username/password do
if not self.plex.conf('auth_token') and (self.plex.conf('username') and self.plex.conf('password')):
import urllib2, base64
log.info("Fetching a new X-Plex-Token from plex.tv")
username = self.plex.conf('username')
password = self.plex.conf('password')
req = urllib2.Request("https://plex.tv/users/sign_in.xml", data="")
authheader = "Basic %s" % base64.encodestring('%s:%s' % (username, password))[:-1]
req.add_header("Authorization", authheader)
req.add_header("X-Plex-Product", "Couchpotato Notifier")
req.add_header("X-Plex-Client-Identifier", "b3a6b24dcab2224bdb101fc6aa08ea5e2f3147d6")
req.add_header("X-Plex-Version", "1.0")
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log.info("Error fetching token from plex.tv")
try:
auth_tree = etree.parse(response)
token = auth_tree.findall(".//authentication-token")[0].text
self.plex.conf('auth_token', token)
except (ValueError, IndexError) as e:
log.info("Error parsing plex.tv response: " + ex(e))
#Add X-Plex-Token header for myPlex support workaround
data = self.plex.urlopen('%s/%s?X-Plex-Token=%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path,
self.plex.conf('auth_token')
))
if data_type == 'xml':
return etree.fromstring(data)
else:

View File

@@ -0,0 +1,68 @@
import traceback
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Webhook'
class Webhook(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
post_data = {
'message': toUnicode(message)
}
if getIdentifier(data):
post_data.update({
'imdb_id': getIdentifier(data)
})
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
try:
self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False)
return True
except:
log.error('Webhook notification failed: %s', traceback.format_exc())
return False
config = [{
'name': 'webhook',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'webhook',
'label': 'Webhook',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'url',
'description': 'The URL to send notification data to when '
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
}
]
}
]
}]

View File

@@ -39,7 +39,7 @@ class Plugin(object):
_locks = {}
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20130519 Firefox/24.0'
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'
http_last_use = {}
http_time_between_calls = 0
http_failed_request = {}
@@ -206,7 +206,7 @@ class Plugin(object):
if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests')
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host)
else:
return ''
else:
@@ -279,7 +279,7 @@ class Plugin(object):
wait = (last_use - now) + self.http_time_between_calls
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait)))
time.sleep(min(wait, 30))
def beforeCall(self, handler):

View File

@@ -87,6 +87,7 @@ class FileBrowser(Plugin):
try:
dirs = self.getDirectories(path = path, show_hidden = show_hidden)
except:
log.error('Failed getting directory "%s" : %s', (path, traceback.format_exc()))
dirs = []
parent = os.path.dirname(path.rstrip(os.path.sep))

View File

@@ -1,9 +1,9 @@
import codecs
import os
import re
import traceback
from couchpotato.api import addApiView
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -103,8 +103,9 @@ class Logging(Plugin):
if not os.path.isfile(path):
break
f = codecs.open(path, 'r', 'utf-8')
raw_lines = self.toList(f.read())
f = open(path, 'r')
log_content = toUnicode(f.read())
raw_lines = self.toList(log_content)
raw_lines.reverse()
brk = False
@@ -130,7 +131,7 @@ class Logging(Plugin):
def toList(self, log_content = ''):
logs_raw = log_content.split('[0m\n')
logs_raw = toUnicode(log_content).split('[0m\n')
logs = []
for log_line in logs_raw:

View File

@@ -123,7 +123,7 @@ class Manage(Plugin):
fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder)
onFound = self.createAddToLibrary(folder, added_identifiers)
fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, on_found = onFound, single = True)
fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, check_file_date = False, on_found = onFound, single = True)
# Break if CP wants to shut down
if self.shuttingDown():

View File

@@ -86,6 +86,7 @@ class ProfilePlugin(Plugin):
'label': toUnicode(kwargs.get('label')),
'order': tryInt(kwargs.get('order', 999)),
'core': kwargs.get('core', False),
'minimum_score': tryInt(kwargs.get('minimum_score', 1)),
'qualities': [],
'wait_for': [],
'stop_after': [],
@@ -217,6 +218,7 @@ class ProfilePlugin(Plugin):
'label': toUnicode(profile.get('label')),
'order': order,
'qualities': profile.get('qualities'),
'minimum_score': 1,
'finish': [],
'wait_for': [],
'stop_after': [],

View File

@@ -51,6 +51,11 @@
margin: 0 5px !important;
}
.profile .wait_for .minimum_score_input {
width: 40px !important;
text-align: left;
}
.profile .types {
padding: 0;
margin: 0 20px 0 -4px;

View File

@@ -53,12 +53,21 @@ var Profile = new Class({
}),
new Element('span', {'text':'day(s) for a better quality '}),
new Element('span.advanced', {'text':'and keep searching'}),
// "After a checked quality is found and downloaded, continue searching for even better quality releases for the entered number of days."
new Element('input.inlay.xsmall.stop_after_input.advanced', {
'type':'text',
'value': data.stop_after && data.stop_after.length > 0 ? data.stop_after[0] : 0
}),
new Element('span.advanced', {'text':'day(s) for a better (checked) quality.'})
new Element('span.advanced', {'text':'day(s) for a better (checked) quality.'}),
// Minimum score of
new Element('span.advanced', {'html':'<br/>Releases need a minimum score of'}),
new Element('input.advanced.inlay.xsmall.minimum_score_input', {
'size': 4,
'type':'text',
'value': data.minimum_score || 1
})
)
);
@@ -126,6 +135,7 @@ var Profile = new Class({
'label' : self.el.getElement('.quality_label input').get('value'),
'wait_for' : self.el.getElement('.wait_for_input').get('value'),
'stop_after' : self.el.getElement('.stop_after_input').get('value'),
'minimum_score' : self.el.getElement('.minimum_score_input').get('value'),
'types': []
};

View File

@@ -30,10 +30,10 @@ class QualityPlugin(Plugin):
{'identifier': 'dvdr', 'size': (3000, 10000), 'median_size': 4500, 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'median_size': 1500, 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'median_size': 700, 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr', 'webrip', ('web', 'rip')], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': []},
{'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p'], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p'], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p'], 'ext':[]}
{'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p', '1080p'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p', '1080p'], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p', '1080p'], 'ext':[]}
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
threed_tags = {
@@ -240,7 +240,7 @@ class QualityPlugin(Plugin):
# Add additional size score if only 1 size validated
if len(size_scores) == 1:
self.calcScore(score, size_scores[0], 8)
self.calcScore(score, size_scores[0], 7)
del size_scores
# Return nothing if all scores are <= 0
@@ -278,6 +278,8 @@ class QualityPlugin(Plugin):
'ext': 5,
}
scored_on = []
# Check alt and tags
for tag_type in ['identifier', 'alternative', 'tags', 'label']:
qualities = quality.get(tag_type, [])
@@ -289,10 +291,13 @@ class QualityPlugin(Plugin):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words:
if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words and ss(alt.lower()) not in scored_on:
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
# Don't score twice on same tag
scored_on.append(ss(alt).lower())
# Check extention
for ext in quality.get('ext', []):
if ext == extension:
@@ -485,6 +490,8 @@ class QualityPlugin(Plugin):
'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'},
'Movie Name.2014.720p Web-Dl Aac2.0 h264-ReleaseGroup': {'size': 3800, 'quality': 'brrip'},
'Movie Name.2014.720p.WEBRip.x264.AC3-ReleaseGroup': {'size': 3000, 'quality': 'scr'},
'Movie.Name.2014.1080p.HDCAM.-.ReleaseGroup': {'size': 5300, 'quality': 'cam'},
'Movie.Name.2014.720p.HDSCR.4PARTS.MP4.AAC.ReleaseGroup': {'size': 2401, 'quality': 'scr'},
}
correct = 0

View File

@@ -187,7 +187,7 @@ class Release(Plugin):
release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v)
db.update(release)
fireEvent('media.restatus', media['_id'], single = True)
fireEvent('media.restatus', media['_id'], allowed_restatus = ['done'], single = True)
return True
except:
@@ -389,8 +389,8 @@ class Release(Plugin):
log.info('Ignored: %s', rel['name'])
continue
if rel['score'] <= 0:
log.info('Ignored, score "%s" to low: %s', (rel['score'], rel['name']))
if rel['score'] < quality_custom.get('minimum_score'):
log.info('Ignored, score "%s" to low, need at least "%s": %s', (rel['score'], quality_custom.get('minimum_score'), rel['name']))
continue
if rel['size'] <= 50:
@@ -441,7 +441,6 @@ class Release(Plugin):
for rel in search_results:
rel_identifier = md5(rel['url'])
found_releases.append(rel_identifier)
release = {
'_t': 'release',
@@ -482,6 +481,9 @@ class Release(Plugin):
# Update release in search_results
rel['status'] = rls.get('status')
if rel['status'] == 'available':
found_releases.append(rel_identifier)
return found_releases
except:
log.error('Failed: %s', traceback.format_exc())

View File

@@ -35,6 +35,7 @@ class Renamer(Plugin):
'desc': 'For the renamer to check for new files to rename in a folder',
'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'to_folder': {'desc': 'Optional: The folder to move releases to. Leave empty for default folder.'},
'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'},
@@ -44,6 +45,13 @@ class Renamer(Plugin):
},
})
addApiView('renamer.progress', self.getProgress, docs = {
'desc': 'Get the progress of current renamer scan',
'return': {'type': 'object', 'example': """{
'progress': False || True,
}"""},
})
addEvent('renamer.scan', self.scan)
addEvent('renamer.check_snatched', self.checkSnatched)
@@ -67,11 +75,17 @@ class Renamer(Plugin):
return True
def getProgress(self, **kwargs):
return {
'progress': self.renaming_started
}
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0))
base_folder = kwargs.get('base_folder')
media_folder = sp(kwargs.get('media_folder'))
to_folder = kwargs.get('to_folder')
# Backwards compatibility, to be removed after a few versions :)
if not media_folder:
@@ -95,13 +109,13 @@ class Renamer(Plugin):
})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download)
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download, to_folder = to_folder)
return {
'success': True
}
def scan(self, base_folder = None, release_download = None):
def scan(self, base_folder = None, release_download = None, to_folder = None):
if not release_download: release_download = {}
if self.isDisabled():
@@ -115,7 +129,9 @@ class Renamer(Plugin):
base_folder = sp(self.conf('from'))
from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
if not to_folder:
to_folder = sp(self.conf('to'))
# Get media folder to process
media_folder = sp(release_download.get('folder'))
@@ -220,10 +236,14 @@ class Renamer(Plugin):
nfo_name = self.conf('nfo_name')
separator = self.conf('separator')
if len(file_name) == 0:
log.error('Please fill in the filename option under renamer settings. Forcing it on <original>.<ext> to keep the same name as source file.')
file_name = '<original>.<ext>'
cd_keys = ['<cd>','<cd_nr>', '<original>']
if not any(x in folder_name for x in cd_keys) and not any(x in file_name for x in cd_keys):
log.error('Missing `cd` or `cd_nr` in the renamer. This will cause multi-file releases of being renamed to the same file.'
'Force adding it')
log.error('Missing `cd` or `cd_nr` in the renamer. This will cause multi-file releases of being renamed to the same file. '
'Please add it in the renamer settings. Force adding it for now.')
file_name = '%s %s' % ('<cd>', file_name)
# Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader.
@@ -791,7 +811,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
dest = sp(dest)
try:
if os.path.exists(dest):
if os.path.exists(dest) and os.path.isfile(dest):
raise Exception('Destination "%s" already exists' % dest)
move_type = self.conf('file_action')
@@ -865,7 +885,9 @@ Remove it if you want it to be renamed (again, or at least let it try again)
#If information is not available, we don't want the tag in the filename
replaced = replaced.replace('<' + x + '>', '')
replaced = self.replaceDoubles(replaced.lstrip('. '))
if self.conf('replace_doubles'):
replaced = self.replaceDoubles(replaced.lstrip('. '))
for x, r in replacements.items():
if x in ['thename', 'namethe']:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
@@ -1322,6 +1344,14 @@ config = [{
'type': 'choice',
'options': rename_options
},
{
'advanced': True,
'name': 'replace_doubles',
'type': 'bool',
'label': 'Clean Name',
'description': ('Attempt to clean up double separaters due to missing data for fields.','Sometimes this eliminates wanted white space (see <a href="https://github.com/RuudBurger/CouchPotatoServer/issues/2782">#2782</a>).'),
'default': True
},
{
'name': 'unrar',
'type': 'bool',

View File

@@ -131,7 +131,7 @@ class Scanner(Plugin):
addEvent('scanner.name_year', self.getReleaseNameYear)
addEvent('scanner.partnumber', self.getPartNumber)
def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None):
def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, check_file_date = True, on_found = None):
folder = sp(folder)
@@ -145,7 +145,6 @@ class Scanner(Plugin):
# Scan all files of the folder if no files are set
if not files:
check_file_date = True
try:
files = []
for root, dirs, walk_files in os.walk(folder, followlinks=True):

View File

@@ -16,7 +16,7 @@ autoload = 'Subtitle'
class Subtitle(Plugin):
services = ['opensubtitles', 'thesubdb', 'subswiki', 'podnapisi']
services = ['opensubtitles', 'thesubdb', 'subswiki', 'subscenter']
def __init__(self):
addEvent('renamer.before', self.searchSingle)

View File

@@ -157,7 +157,15 @@ class Settings(object):
values[section] = {}
for option in self.p.items(section):
(option_name, option_value) = option
is_password = False
try: is_password = self.types[section][option_name] == 'password'
except: pass
values[section][option_name] = self.get(option_name, section)
if is_password and values[section][option_name]:
values[section][option_name] = len(values[section][option_name]) * '*'
return values
def save(self):

View File

@@ -14,7 +14,6 @@ class Env(object):
''' Environment variables '''
_app = None
_encoding = 'UTF-8'
_fs_encoding = 'UTF-8'
_debug = False
_dev = False
_settings = Settings()

View File

@@ -86,7 +86,6 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
encoding = 'UTF-8'
Env.set('encoding', encoding)
Env.set('fs_encoding', sys.getfilesystemencoding())
# Do db stuff
db_path = sp(os.path.join(data_dir, 'database'))
@@ -117,7 +116,8 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
# Delete non zip files
if len(ints) != 1:
os.remove(os.path.join(root, backup_file))
try: os.remove(os.path.join(root, backup_file))
except: pass
else:
existing_backups.append((int(ints[0]), backup_file))
else:
@@ -205,7 +205,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
logger.addHandler(hdlr)
# To file
hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = 'utf-8')
hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding'))
hdlr2.setFormatter(formatter)
logger.addHandler(hdlr2)
@@ -244,11 +244,13 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
# Basic config
host = Env.setting('host', default = '0.0.0.0')
# app.debug = development
host6 = Env.setting('host6', default = '::')
config = {
'use_reloader': reloader,
'port': tryInt(Env.setting('port', default = 5050)),
'host': host if host and len(host) > 0 else '0.0.0.0',
'host6': host6 if host6 and len(host6) > 0 else '::',
'ssl_cert': Env.setting('ssl_cert', default = None),
'ssl_key': Env.setting('ssl_key', default = None),
}
@@ -331,6 +333,10 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
while try_restart:
try:
server.listen(config['port'], config['host'])
try: server.listen(config['port'], config['host6'])
except: log.info2('Tried to bind to IPV6 but failed')
loop.start()
server.close_all_connections()
server.stop()

View File

@@ -54,16 +54,22 @@
},
pushState: function(e){
if((!e.meta && Browser.platform.mac) || (!e.control && !Browser.platform.mac)){
var self = this;
if((!e.meta && self.isMac()) || (!e.control && !self.isMac())){
(e).preventDefault();
var url = e.target.get('href');
if(History.getPath() != url)
// Middle click
if(e.event && e.event.button == 1)
window.open(url);
else if(History.getPath() != url)
History.push(url);
}
},
isMac: function(){
return Browser.platform.mac
return Browser.platform == 'mac'
},
createLayout: function(){
@@ -325,11 +331,12 @@
},
openDerefered: function(e, el){
var self = this;
(e).stop();
var url = 'http://www.dereferer.org/?' + el.get('href');
if(el.get('target') == '_blank' || (e.meta && Browser.platform.mac) || (e.control && !Browser.platform.mac))
if(el.get('target') == '_blank' || (e.meta && self.isMac()) || (e.control && !self.isMac()))
window.open(url);
else
window.location = url;

View File

@@ -117,7 +117,7 @@ var AboutSettingTab = new Class({
var self = this;
var date = new Date(json.version.date * 1000);
self.version_text.set('text', json.version.hash + (json.version.date ? ' ('+date.toLocaleString()+')' : ''));
self.updater_type.set('text', json.version.type + ', ' + json.branch);
self.updater_type.set('text', (json.version.type != json.branch) ? (json.version.type + ', ' + json.branch) : json.branch);
}
});

View File

@@ -886,6 +886,9 @@ Option.Directory = new Class({
'text': 'Selected folder is empty'
}).inject(self.dir_list)
//fix for webkit type browsers to refresh the dom for the file browser
//http://stackoverflow.com/questions/3485365/how-can-i-force-webkit-to-redraw-repaint-to-propagate-style-changes
self.dir_list.setStyle('webkitTransform', 'scale(1)');
self.caretAtEnd();
},

View File

@@ -6,6 +6,7 @@
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="mobile-web-app-capable" content="yes">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="robots" content="noindex, nofollow" />
{% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'front', single = True) %}
<link rel="stylesheet" href="{{ Env.get('web_base') }}{{ url }}" type="text/css">{% end %}

BIN
icon.icns Normal file

Binary file not shown.

BIN
icon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 345 KiB

BIN
icon_mac.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 435 B

BIN
icon_windows.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 367 B

View File

@@ -33,8 +33,8 @@ DESC=CouchPotato
##
## CP_USER= #$RUN_AS, username to run couchpotato under, the default is couchpotato
## CP_HOME= #$APP_PATH, the location of couchpotato.py, the default is /opt/couchpotato
## CP_DATA= #$DATA_DIR, the location of couchpotato.db, cache, logs, the default is /var/couchpotato
## CP_PIDFILE= #$PID_FILE, the location of couchpotato.pid, the default is /var/run/couchpotato.pid
## CP_DATA= #$DATA_DIR, the location of couchpotato.db, cache, logs, the default is /var/opt/couchpotato
## CP_PIDFILE= #$PID_FILE, the location of couchpotato.pid, the default is /var/run/couchpotato/couchpotato.pid
## PYTHON_BIN= #$DAEMON, the location of the python binary, the default is /usr/bin/python
## CP_OPTS= #$EXTRA_DAEMON_OPTS, extra cli option for couchpotato, i.e. " --config_file=/home/couchpotato/couchpotato.ini"
## SSD_OPTS= #$EXTRA_SSD_OPTS, extra start-stop-daemon option like " --group=users"
@@ -51,10 +51,10 @@ RUN_AS=${CP_USER-couchpotato}
APP_PATH=${CP_HOME-/opt/couchpotato/}
# Data directory where couchpotato.db, cache and logs are stored
DATA_DIR=${CP_DATA-/var/couchpotato}
DATA_DIR=${CP_DATA-/var/opt/couchpotato}
# Path to store PID file
PID_FILE=${CP_PIDFILE-/var/run/couchpotato.pid}
PID_FILE=${CP_PIDFILE-/var/run/couchpotato/couchpotato.pid}
# path to python bin
DAEMON=${PYTHON_BIN-/usr/bin/python}
@@ -95,6 +95,8 @@ fi
case "$1" in
start)
touch $PID_FILE
chown $RUN_AS $PID_FILE
echo "Starting $DESC"
start-stop-daemon -d $APP_PATH -c $RUN_AS $EXTRA_SSD_OPTS --start --pidfile $PID_FILE --exec $DAEMON -- $DAEMON_OPTS
;;

52
installer.iss Normal file
View File

@@ -0,0 +1,52 @@
#define MyAppName "CouchPotato"
#define MyAppVer "2.6.3"
#define MyAppBit "win32"
//#define MyAppBit "win-amd64"
[Setup]
AppName={#MyAppName}
AppVersion=2
AppVerName={#MyAppName}
DefaultDirName={userappdata}\{#MyAppName}\application
DisableProgramGroupPage=yes
DisableDirPage=yes
UninstallDisplayIcon=./icon.ico
SetupIconFile=./icon.ico
OutputDir=./dist
OutputBaseFilename={#MyAppName}-{#MyAppVer}.{#MyAppBit}.installer
AppPublisher=Your Mom
AppPublisherURL=http://couchpota.to
PrivilegesRequired=none
WizardSmallImageFile=installer_icon.bmp
WizardImageFile=installer_banner.bmp
UsePreviousAppDir=no
[Messages]
WelcomeLabel1=Installing [name]!
WelcomeLabel2=This wizard will install [name] to your AppData folder. It does this so it can use the build in updater without needing admin rights.
[CustomMessages]
LaunchProgram=Launch {#MyAppName} right now.
[Files]
Source: "./dist/{#MyAppName}-{#MyAppVer}.{#MyAppBit}/*"; Flags: recursesubdirs; DestDir: "{app}"
[Icons]
Name: "{commonprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppName}.exe"
Name: "{userstartup}\{#MyAppName}"; Filename: "{app}\{#MyAppName}.exe"; Tasks: startup
[Tasks]
Name: "startup"; Description: "Run {#MyAppName} at startup"; Flags: unchecked
[Run]
Filename: {app}\{#MyAppName}.exe; Description: {cm:LaunchProgram,{#MyAppName}}; Flags: nowait postinstall skipifsilent
[UninstallDelete]
Type: filesandordirs; Name: "{app}\appdata"
Type: filesandordirs; Name: "{app}\Microsoft.VC90.CRT"
Type: filesandordirs; Name: "{app}\updates"
Type: filesandordirs; Name: "{app}\CouchPotato*"
Type: filesandordirs; Name: "{app}\python27.dll"
Type: filesandordirs; Name: "{app}\unins000.dat"
Type: filesandordirs; Name: "{app}\unins000.exe"

BIN
installer_banner.bmp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 151 KiB

BIN
installer_icon.bmp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 KiB

View File

@@ -15,7 +15,7 @@
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.2.1"
__version__ = "2.3.0"
from sys import version_info

View File

@@ -12,34 +12,68 @@ Example::
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in file:
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '%s: no result' % name
return '{0}: no result'.format(name)
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':

View File

@@ -177,6 +177,12 @@ class JapaneseContextAnalysis:
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
@@ -184,6 +190,8 @@ class SJISContextAnalysis(JapaneseContextAnalysis):
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1

View File

@@ -129,11 +129,11 @@ class Latin1Prober(CharSetProber):
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
- (self._mFreqCounter[1] * 20.0 / total))
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
confidence = confidence * 0.73
return confidence

View File

@@ -353,7 +353,7 @@ SJIS_cls = (
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
@@ -369,9 +369,8 @@ SJIS_cls = (
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_st = (
@@ -571,5 +570,3 @@ UTF8SMModel = {'classTable': UTF8_cls,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa

View File

@@ -47,7 +47,7 @@ class SJISProber(MultiByteCharSetProber):
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)

View File

@@ -71,9 +71,9 @@ class UniversalDetector:
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}

Some files were not shown because too many files have changed in this diff Show More