Compare commits

..

770 Commits

Author SHA1 Message Date
Ruud
d4ed4791bf Merge branch 'develop' into redesign 2015-02-22 18:30:47 +01:00
Ruud
adb744a526 Don't show double updater type 2015-02-22 17:42:29 +01:00
Ruud
0f82cda811 Remove podnapisi from subtile list 2015-02-22 16:09:22 +01:00
Ruud
0d6c3c8ecb Yify, only use data when available 2015-02-22 16:06:07 +01:00
Ruud
6598f53fd4 Quality check improve 2015-02-22 15:55:54 +01:00
Ruud
6b8458d87f Hadouken apikey check not using correct settingskey
fix #4674
2015-02-22 14:49:37 +01:00
Ruud
99a0621238 Use keep-alive connection 2015-02-22 14:30:50 +01:00
Ruud Burger
c52666309a Merge pull request #4676 from peerster/develop
Update torrentshack with new URL
2015-02-22 13:39:21 +01:00
Ruud
84a458d40b Add user-agent and type to omdbapi 2015-02-22 13:06:29 +01:00
Ruud
f8631c6d53 Add extra category for TorrentLeech
fix #4683
2015-02-21 21:29:37 +01:00
Ruud
b19b0775c7 Force update to new poster on refresh
fix #4671
2015-02-20 22:16:12 +01:00
peerster
2dc1c1dd38 Update torrentshack with new URL 2015-02-19 20:07:22 +01:00
Ruud
7db8b233c8 Don't decode string if confidence isn't high enough 2015-02-18 17:21:24 +01:00
Ruud
427c77a9ef Remove podnapisi 2015-02-15 19:23:45 +01:00
Ruud
94c3969f10 Use https for yify proxy 2015-02-10 20:52:15 +01:00
Ruud
debd1855dd Move Yify to v2 2015-02-10 20:47:19 +01:00
Ruud
9f77597c11 Torrentz search on title
fix #4510
2015-02-10 17:15:53 +01:00
Ruud
afc9039625 Also search lower qualities on OMGWTF
fix #4527
2015-02-10 16:50:53 +01:00
Ruud
920d3cb44e Don't verify SYNO downloader thingymajig
fix #4641
2015-02-10 16:27:13 +01:00
Ruud
b1fc8ad862 Letterboxed new html markup
fix #4640
2015-02-10 16:21:32 +01:00
Ruud
11b9bc39ab Show tried to often error for TD 2015-02-10 15:40:55 +01:00
Ruud
6dcb3f3bf2 Change bitsoup category id
fixes #4629
2015-02-10 14:55:22 +01:00
Ruud
ce768f45c5 Make RottenTomato logging more clear
close #4618
2015-02-10 14:36:54 +01:00
Ruud
9b91d1d6c0 Remove favor, link to api key page 2015-02-10 14:10:55 +01:00
Ruud
d9c7a97604 Merge branch 'develop' of git://github.com/jonnyboy/CouchPotatoServer into jonnyboy-develop 2015-02-10 14:03:06 +01:00
Ruud
0fd01aa697 Cleanup 2015-02-10 14:01:51 +01:00
Ruud
58615e6f9b Merge branch 'develop' of git://github.com/grasshide/CouchPotatoServer into grasshide-develop 2015-02-10 13:54:13 +01:00
Ruud
2277322e57 Traceback import missing 2015-02-10 13:47:22 +01:00
Ruud Burger
18020e609e Merge pull request #4479 from sjlu/develop
Adding the ability to receive notifications through Webhooks
2015-02-10 13:19:59 +01:00
Ruud
6a31b920ac Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-02-10 13:15:43 +01:00
Ruud
c1266a36e4 Re-use resursion code 2015-02-10 13:15:08 +01:00
Ruud
578effc538 Merge branch 'develop' of git://github.com/dumaresq/CouchPotatoServer into dumaresq-develop 2015-02-10 13:09:12 +01:00
Ruud Burger
d881120013 Merge pull request #4513 from starkers/remotes/origin/develop
added touch and chown to the $PID_FILE
2015-02-10 13:07:25 +01:00
Ruud Burger
da5318033a Merge pull request #4380 from mannkind/develop
Initial support for Plex Media Server w/Plex Home
2015-02-10 13:04:27 +01:00
Ruud Burger
31df5bce01 Merge pull request #4612 from maikhorma/maikhorma-#2782
Simple workaround for #2782
2015-02-10 13:02:24 +01:00
Ruud
d5622b7cba Remove www from torrentday domain 2015-02-10 13:01:19 +01:00
Ruud Burger
26ad1b354f Merge pull request #4552 from coolius/patch-1
Update torrentday url
2015-02-10 12:53:52 +01:00
Ruud
7a616a81f7 Remove www from iptorrents 2015-02-10 12:52:05 +01:00
Ruud Burger
275aefc3cc Merge pull request #4553 from coolius/patch-2
Update iptorrents url
2015-02-10 12:51:02 +01:00
Ruud Burger
2b32490f72 Merge pull request #4649 from sammy2142/patch-1
Update kickass url from kickass.so to kickass.to
2015-02-10 12:49:16 +01:00
sammy2142
7b9043c16b Update kickass url from kickass.so to kickass.to
Kickass has reverted back to the .to domain as the .so domain was seized:
http://torrentfreak.com/kickasstorrents-taken-domain-name-seizure-150209/
2015-02-10 11:11:30 +00:00
maikhorma
cf83f99be0 Updated UI
Tried to make it a bit cleaner.
2015-02-01 15:28:05 -05:00
maikhorma
fb8a66d207 Shortcut to address #2782
Until there is a more elegant solution to avoid unwanted white space
trimming, this will let users disable that feature if it is not
something they need.
2015-02-01 14:43:16 -05:00
Ruud
e8a3645bc6 Log failed folder getting 2015-02-01 12:18:31 +01:00
Ruud
592e40993c Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-01-31 10:32:24 +01:00
Ruud
b00e69e222 TorrentBytes cut of longer titles
fix #4590
2015-01-31 10:32:15 +01:00
Ruud
c9b4c8167f Actual include host in log 2015-01-28 11:35:26 +01:00
coolius
cdb9cfe756 Update iptorrents.py
Updated iptorrents url to blockade-free iptorrents.eu
2015-01-19 17:18:56 +00:00
coolius
e52f50b204 Update torrentday.py
Updated torrentday url to blockade-free torrentday.eu
2015-01-19 17:17:31 +00:00
Ruud
770c2be14c Create detail url if permalink is false 2015-01-17 13:04:47 +01:00
Ruud
ab61961a64 Use detail url 2015-01-14 16:59:29 +01:00
Ruud
6aca799bbb Newznab: use guid for detail url 2015-01-14 16:55:30 +01:00
David Stark
89836be1d1 added touch and chown to the $PID_FILE 2015-01-12 17:37:26 +01:00
Andrew Dumaresq
20e1283627 better way to find the folder 2015-01-11 11:57:14 -05:00
Andrew Dumaresq
ee8406e026 Minor text change 2015-01-11 11:45:29 -05:00
Andrew Dumaresq
514941b785 Merge branch 'develop' of https://github.com/dumaresq/CouchPotatoServer into develop 2015-01-11 11:42:52 -05:00
Ruud
1510e37652 Update Tornado 2015-01-11 16:18:22 +01:00
Ruud
e1e39cd3f4 Update requests 2015-01-11 16:17:33 +01:00
Ruud
e1bb8c5419 Update Chardet 2015-01-11 16:15:52 +01:00
Ruud
17fa33a496 Update user agent 2015-01-11 00:25:58 +01:00
Ruud
601f0b54cf Send CP header when downloading from newznab 2015-01-11 00:25:51 +01:00
dumaresq
51d44bfc3e Merge pull request #1 from RuudBurger/develop
Develop
2015-01-10 17:01:43 -05:00
Ruud
12148217a2 Log failed notification 2015-01-10 13:41:17 +01:00
Ruud
132fa12ef4 Late list not loaded on home 2015-01-10 12:17:47 +01:00
Ruud
1827c2e4cd Don't parse omgwtfnzb if no results are returned 2015-01-10 12:17:30 +01:00
Ruud
f423bca06b Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2015-01-09 20:13:42 +01:00
Ruud
e7b089edf5 Give better XML issues 2015-01-09 20:13:17 +01:00
Ruud Burger
b8b7d94a6a Merge pull request #4456 from dumaresq/develop
Bug fixes and new features for putio
2015-01-09 20:08:30 +01:00
Ruud
2c080fec3d TorrentBytes nbsp issue
fix #4026
2015-01-08 16:56:38 +01:00
Ruud
4c68566c77 Use new OMGWTFNZB api
fix #4471
2015-01-08 14:59:53 +01:00
Steven Lu
a3af784c18 Adding the ability to receive notifications through Webhooks 2015-01-06 18:47:19 -05:00
grasshide
ac6f295c93 New algogithm to use some kind of crowd logic on newznab powered
providers.
2015-01-05 15:00:40 +01:00
Andrew Dumaresq
2c72cd7d9f Added new folder option and fixed but in callback url 2015-01-04 17:10:40 -05:00
Andrew Dumaresq
d012dc5c85 Added new folder option 2015-01-04 17:10:16 -05:00
Andrew Dumaresq
038b4c63ee Updated to follow putio API changes 2015-01-04 17:09:36 -05:00
Ruud Burger
17e37996c4 Add remux category for TorrentShack
close #4427
2015-01-02 18:18:08 +01:00
jonnyboy
9318e19347 New torrent search provider hdaccess.net 2014-12-31 08:21:58 -05:00
Ruud
045c8f4dc8 Trailer 2014-12-28 11:29:41 +01:00
Ruud
02e25a9e25 Releases 2014-12-28 00:53:37 +01:00
Ruud
819f619297 Search 2014-12-27 21:10:33 +01:00
Ruud
c303789817 Merge branch 'develop' into redesign 2014-12-27 14:02:15 +01:00
Ruud
8f4e03d04b Use detected encoding
#4388
2014-12-27 13:46:25 +01:00
Ruud
229d67c086 Don't toUnicode loop 2014-12-22 22:01:47 +01:00
Dustin Brewer
d84897ff33 Initial support for Plex Media Server w/Plex Home 2014-12-21 16:18:11 -08:00
Ruud
387a711538 TorrentBytes not encoding name
fix #4377
2014-12-21 21:14:38 +01:00
Ruud
7a1b914824 Return nonblock results in main thread 2014-12-21 20:19:53 +01:00
Ruud
5e62801666 Send data through finish not write 2014-12-21 20:19:30 +01:00
Ruud
00d887153f Return data in main thread 2014-12-21 19:39:16 +01:00
Ruud
1a2d79f719 Merge branch 'develop' into redesign 2014-12-21 14:49:25 +01:00
Ruud
6d5882001a Notificaton.list not returning anything
fix #4348
2014-12-20 22:32:18 +01:00
Ruud
4a6b45c65c SCC not finding seeders 2014-12-20 22:24:00 +01:00
Ruud
b0d1fe5c33 Return false if no media is found on try_next
fix #4345
2014-12-20 22:17:43 +01:00
Ruud
a6e49098c8 Add robots.txt 2014-12-20 22:15:27 +01:00
Ruud
ffcd36cbf4 IOLoop callback hanging 2014-12-20 21:45:15 +01:00
Ruud
3bf2d844a0 Release api lock on connection close or finish
fix #4372
2014-12-20 20:13:49 +01:00
Ruud
dd24eb8893 Revert "Give response back to the main thread on api calls"
This reverts commit 576bcb9f4b.

Conflicts:
	couchpotato/api.py
2014-12-20 18:49:35 +01:00
Ruud
ac382d5131 Search and login 2014-12-20 10:49:01 +01:00
Ruud
abc9e78027 Merge branch 'develop' into redesign 2014-12-19 14:16:36 +01:00
Ruud
538f51dd5b Log ipv6 failed bind 2014-12-19 14:16:20 +01:00
Ruud
c94d79cc6c Popups over pages 2014-12-19 14:14:20 +01:00
Ruud
9883a7a85a Merge branch 'develop' into redesign 2014-12-19 09:11:54 +01:00
Ruud
eea9f40501 Use current 2014-12-19 09:01:52 +01:00
Ruud
576bcb9f4b Give response back to the main thread on api calls
fix #4337
2014-12-19 08:57:24 +01:00
Ruud
f4a486c47b Menu 2014-12-18 13:31:12 +01:00
Ruud
80cf144e8b Don't load async 2014-12-18 09:58:52 +01:00
Ruud
cf5a774313 Don't destroy events 2014-12-18 09:56:10 +01:00
Ruud
b9b77042dc Remove Async 2014-12-18 09:55:58 +01:00
Ruud
9e96aa14b7 Update Mootools 2014-12-18 09:55:48 +01:00
Ruud
6a0220b496 Filters 2014-12-17 22:00:43 +01:00
Ruud
02ff0acc64 Update page actions 2014-12-17 17:29:54 +01:00
Ruud
ae6affdb52 Movie details page 2014-12-17 17:10:40 +01:00
Ruud
a08df704be Update fonts 2014-12-17 17:10:25 +01:00
Ruud
af9a47d528 Add dev packages 2014-12-17 14:20:13 +01:00
Ruud Burger
62c5365329 Merge pull request #4356 from rtaibah/ReaddTypoFix
Change Readd in tooltip to Re-add. Former is confusing and not an Englis...
2014-12-17 11:36:53 +01:00
Rami Taibah
ddf575a86e Change Readd in tooltip to Re-add. Former is confusing and not an English word 2014-12-17 13:00:54 +03:00
Ruud
0155c8de2d Movie lists 2014-12-16 23:55:26 +01:00
Ruud Burger
6b9383ce92 Merge pull request #4342 from mano3m/develop_fixsize
Fix TorrentShack size
2014-12-16 07:52:08 +01:00
mano3m
cb8d24ef1f Fix TorrentShack size 2014-12-15 22:26:29 +01:00
Ruud
5bfdb121df Merge branch 'develop' into redesign 2014-12-14 13:05:47 +01:00
Ruud
814ddfb79f Don't return password fields
fix #4300
2014-12-14 12:33:28 +01:00
Ruud
766f819c0b Userscript for RT not parsing URL correctly 2014-12-14 12:06:03 +01:00
Ruud
b8b6024592 Styling 2014-12-14 12:04:26 +01:00
Ruud
d77cfb3e69 Start CP via grunt 2014-12-05 22:30:19 +01:00
Ruud
858d8b4291 Ignore vendor scripts 2014-12-05 15:14:36 +01:00
Ruud
3852fc720d Remove scss lib 2014-12-05 15:13:40 +01:00
Ruud
5145618c39 Damn semicolons 2014-12-05 14:44:12 +01:00
Ruud
d6cfcae45b Move to vendor folder 2014-12-05 11:29:25 +01:00
Ruud
5609536f46 Cleanup 2014-12-05 11:19:00 +01:00
Ruud
f992c00eb7 Remove unused 2014-12-04 23:31:45 +01:00
Ruud
87086a0336 Rename to scss 2014-12-04 23:22:14 +01:00
Ruud
62cb57f217 Concat 2014-12-03 23:30:14 +01:00
Ruud
2a0e46fe00 Dev tools 2014-12-03 23:19:22 +01:00
Ruud
1f7555e8fd Merge branch 'develop' into redesign
Conflicts:
	couchpotato/templates/login.html
2014-12-03 20:46:11 +01:00
Ruud
ff43df9ef1 Comments comments comments 2014-12-02 15:38:55 +01:00
Ruud
2e907e93e7 Whiteline 2014-12-02 12:02:49 +01:00
Ruud
4d329d6a36 Revert "Remove torrentleech"
This reverts commit dacc3d8f47.
2014-12-02 11:45:17 +01:00
Ruud
752191bc23 Comments 2014-12-02 11:43:10 +01:00
Ruud
1d73fd9d7e Import optimize 2014-12-02 11:15:29 +01:00
Ruud
79688c412a Merge branch 'develop' of git://github.com/hadouken/CouchPotatoServer into hadouken-develop 2014-12-02 11:07:54 +01:00
Ruud
fc1c95fefb Description 2014-12-01 23:00:59 +01:00
Ruud
6a174716af underscored variables 2014-12-01 22:52:10 +01:00
Ruud
defe256f1b Correct url 2014-12-01 16:52:43 +01:00
Ruud
8a5f154d9e Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-12-01 16:52:04 +01:00
Ruud
fe56a69e8f Put.IO cleanup 2014-12-01 16:49:27 +01:00
Ruud
c6d326f973 Move put.io API 2014-12-01 15:50:03 +01:00
Ruud
9e5f670feb Merge branch 'putio' of git://github.com/dumaresq/CouchPotatoServer into develop 2014-12-01 15:42:43 +01:00
Ruud Burger
9ebacf8816 Merge pull request #4258 from glibix/develop
Transmission status 16 is for "Stopped". So we need to detect a download...
2014-12-01 15:41:41 +01:00
Ruud
df2d7ec9c2 Remove debug code 2014-12-01 15:39:33 +01:00
Ruud
ddab74582b Merge branch 'develop' of git://github.com/psaab/CouchPotatoServer into psaab-develop 2014-12-01 15:28:11 +01:00
Ruud
2801079bc8 Merge branch 'develop_3845' of git://github.com/voidstarstar/CouchPotatoServer into voidstarstar-develop_3845 2014-12-01 15:14:26 +01:00
Ruud Burger
1deb49b524 Merge pull request #4261 from voidstarstar/develop_4211
Added renamer.progress API function. Fixes #4211.
2014-12-01 15:12:21 +01:00
Ruud Burger
49d550f652 Merge pull request #4270 from sammy2142/patch-1
Update Kickass url to https://kickass.so
2014-11-30 22:05:36 +01:00
sammy2142
1a43ce6ecc Update Kickass url to https://kickass.so
Kickass has recently changed its web address from https://kickass.to 
to https://kickass.so
2014-11-30 20:48:31 +00:00
voidstarstar
15a0131587 Added renamer.progress API function. Fixes #4211.
This function reports the status of the renamer.
Progress value True means the renamer is currently running.
Progress value False means the renamer is not currently running.
2014-11-27 21:51:30 -05:00
voidstarstar
0dca34958c Added a parameter to the renamer API. Fixes #3845.
The renamer now has a new 'to_folder' parameter.
This parameter specifies where movies are moved to.
2014-11-27 21:43:19 -05:00
Mathew Paret
4b231e36ea Merge branch 'feature/3967_add_imdb_link_to_tweet' into develop 2014-11-27 18:16:41 +05:30
Mathew Paret
52478a00db Revert "Feature #3967 - Added IMDB link to download complete tweet"
This reverts commit 87338760ad.
2014-11-27 18:13:41 +05:30
Mathew Paret
e177766270 Merge branch 'feature/3967_add_imdb_link_to_tweet' into develop 2014-11-27 18:06:38 +05:30
Ruud Burger
ff8da7c8f8 Merge pull request #4068 from ofir123/subscenter_support
Added support for subscenter.
2014-11-26 21:51:14 +01:00
Ruud Burger
89c8c5a0c7 Merge pull request #4203 from rkokkelk/develop
Fix startup script Debian/Ubuntu
2014-11-26 21:48:40 +01:00
Ruud
38c6266f9c Use single quotes 2014-11-26 21:47:39 +01:00
Ruud Burger
16f8e7e123 Merge pull request #4205 from kamillus/develop
adding a fix to handle missing directories in the file browser in webkit browsers
2014-11-26 21:46:10 +01:00
Ruud Burger
7110c7a11f Merge pull request #4249 from clinton-hall/patch-1
NZBGet 13 includes more status information
2014-11-25 07:55:39 +01:00
Clinton Hall
6d79f316a6 NZBGet 13 includes more status information
nzb['Status'] returns total (SUCESS/ALL) status and also failed status in V13+
This is particularly important when using fake detector scripts or stopping download due to health checks etc.
http://nzbget.net/RPC_API_reference#Method_.22history.22
https://couchpota.to/forum/viewtopic.php?f=5&t=4644
2014-11-25 11:02:47 +10:30
Paul Saab
c1b6811b8a Tornado requires two sockets to support IPv6
Tornado sets setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
to force IPv6 sockets to only be used for IPv6 connections.  create a
separate socket to allow for CouchPotato to be used over IPv6.
2014-11-17 22:54:56 -08:00
Kamil
7d7b76b2e9 adding a fix to handle missing directories in the file browser in webkit browsers 2014-11-10 20:18:38 -05:00
Roy Kokkelkoren
657aa52fa7 Merge branch 'develop' of https://github.com/rkokkelk/CouchPotatoServer into develop 2014-11-10 15:40:41 +01:00
Roy Kokkelkoren
8e9ef8db39 Merge remote-tracking branch 'upstream/develop' into develop 2014-11-10 15:39:45 +01:00
test
92a0096b54 Merge remote-tracking branch 'upstream/develop' into develop 2014-11-10 08:29:27 -06:00
Mathew Paret
87338760ad Feature #3967 - Added IMDB link to download complete tweet 2014-11-10 18:47:37 +05:30
Mathew Paret
28019b0a09 Transmission status 16 is for "Stopped". So we need to detect a download as completed even if it is stopped but percent done is 100 2014-11-10 18:39:58 +05:30
Ruud Burger
248b007f4a Merge pull request #4094 from georgewhewell/hdbits-add-internal-only
add option for internal-only for hdbits provider
2014-11-09 14:39:50 +01:00
Ruud Burger
9e31c59de8 Merge pull request #4188 from DjSlash/patch-2
Update SSL protocol for Deluge connections
2014-11-09 14:37:23 +01:00
Ruud
269e785888 Yify, don't include quality in search
fix #4190
2014-11-09 14:30:22 +01:00
Ruud
3669aef42d is_movie param 2014-11-09 14:14:06 +01:00
Ruud
1087eb3a06 Add adding parameter to is_movie 2014-11-09 14:10:23 +01:00
Rutger van Sleen
43af80a137 Update SSL protocol for Deluge connections
Since Deluge 1.3.10 the SSL protocol is updated to TLSv1 instead of SSLv3. This resulted in CP not being able to add new torrents. Link to change in Deluge: http://git.deluge-torrent.org/deluge/commit/?h=1.3-stable&id=26f5be17609a8312c4ba06aa120ed208cd7876f2
2014-11-06 14:33:38 +01:00
Roy Kokkelkoren
0766a27a71 Fixed bug in init.d script which prevented the writing of the PID file.
Altered default value of DATA_DIR to /var/opt/couchpotato in order to comply to linux file structure
2014-11-06 04:40:39 -06:00
Ruud
a12f049d14 Bit-HDTV http -> https
fix #3570
2014-11-01 17:30:11 +01:00
Ruud
6afe2fd9cf IPTorrents webdl category
fix #4150
2014-11-01 15:36:54 +01:00
Viktor Elofsson
61f634a21e Refactored Hadouken downloader. 2014-10-21 16:52:28 +02:00
Ruud
02b6659235 Don't show ignored in ETA message 2014-10-20 20:46:07 +02:00
Ruud
dacc3d8f47 Remove torrentleech 2014-10-19 22:47:29 +02:00
Ruud
4f140bb1ac Remove print 2014-10-19 22:41:50 +02:00
Ruud
3dffaa7075 Update Tornado 2014-10-19 16:13:03 +02:00
georgewhewell
d626fda710 add option for internal only for hdbits provider 2014-10-17 15:14:44 +01:00
Ruud
51c8de0fc3 Force filename renamer setting 2014-10-16 22:39:39 +02:00
Ruud
4f23ccc284 Also rename on old version database
fix #4083
2014-10-15 21:28:52 +02:00
Ruud
a6ff34a47f Only check exists on file 2014-10-14 22:49:19 +02:00
Viktor Elofsson
b40d1f3463 Merge pull request #1 from RuudBurger/develop
Sync upstream
2014-10-14 13:18:48 +02:00
Ruud
f1a2d960bc Make tab font smaller 2014-10-13 13:24:57 +02:00
Ruud
4e7069e0c6 Deluge, allow "no port"
fix #4055
2014-10-13 13:20:21 +02:00
Ruud
477a47e45e Don't show fanart error on future movies 2014-10-13 13:12:44 +02:00
Ruud
a3264240ab Don't error out on future movies 2014-10-13 13:10:38 +02:00
Ofir Brukner
1030d0d748 Added support for subscenter.
Updated both plugin and lib.
2014-10-13 00:50:29 +03:00
Ruud
f9d9fffedb Don't ss int or float 2014-10-12 16:41:48 +02:00
Ruud
6b4e9a3fac Remove chardet from requests library 2014-10-11 22:57:57 +02:00
Ruud Burger
6787289846 Merge pull request #4054 from joshka/patch-1
Fix default permissions on files to remove execute bits on files #4053
2014-10-11 18:04:08 +02:00
Joshua McKinney
d31a2e2768 Fix default permissions on files to remove execute bits on files #4053 2014-10-12 03:01:20 +11:00
Ruud
c992680209 Meta and Middle click not triggering new tab 2014-10-11 15:35:56 +02:00
Ruud
65f0dc25d2 Allow 1080p in shitty quality releases 2014-10-11 14:32:12 +02:00
Ruud
b616af3a83 Make minimum scoring editable
fix #4042
2014-10-10 23:16:58 +02:00
Ruud
ca13107330 Ignore exceptions on removing db_backup stuff 2014-10-10 22:46:35 +02:00
Ruud
c7ce18f8c2 Better error message for missing cd number 2014-10-10 15:13:58 +02:00
Ruud
b6f288a522 Close request connection 2014-10-09 23:10:28 +02:00
Ruud
cb48ca03df Remove profile when marking movies done 2014-10-08 23:05:04 +02:00
Ruud
7b6641d709 Never restatus "down" when adding release 2014-10-08 23:00:11 +02:00
Ruud
3c12a2c4bf Don't restatus movies to active when scanning manage section 2014-10-08 22:35:56 +02:00
Ruud
259e2bc61c Don't skip unpacking on manage scan 2014-10-08 21:58:34 +02:00
Ruud
9f6e4cc2fa Remove NotifyMyWP 2014-10-08 20:46:11 +02:00
Ruud
a763957334 Log minimum 1 second wait 2014-10-07 22:53:57 +02:00
Ruud
06293dc0a2 Simplify tmdb provider 2014-10-07 22:50:22 +02:00
Ruud
38a5d967dd Remove tmdb3 lib 2014-10-07 21:40:41 +02:00
Ruud
4cdb9bc81d Remove tmdb3 dependency 2014-10-07 21:40:32 +02:00
Ruud
2104cb2839 Always try to return version string 2014-10-07 20:30:51 +02:00
Ruud
d4a4bd40a8 Always return version info 2014-10-07 20:14:17 +02:00
Ruud
ba47d7eea7 Use torrent-duplicate if returned from Transmission
fix #4014
2014-10-07 11:46:42 +02:00
Ruud
c9638ec3fa Encode templ download destination 2014-10-05 18:47:30 +02:00
Ruud
14d636d098 Return image filepath in unicode 2014-10-05 18:46:44 +02:00
Ruud
e1d4df7937 Give api encoding error log 2014-10-05 18:46:16 +02:00
Ruud
e08d06ba31 Update chardet 2014-10-05 14:40:27 +02:00
Ruud
984ee7580d Force unicode on file loading for guessit 2014-10-05 13:53:57 +02:00
Ruud
ab118ea580 Windows special chars not returning any folders 2014-10-05 13:38:22 +02:00
Ruud
f897eebb41 Allow regex in required and ignored words
close #3376
2014-10-05 12:37:00 +02:00
Ruud
755873c5e7 Don't ss int and float 2014-10-05 12:33:13 +02:00
Ruud
dbc254efbe Also encode log with names tuples 2014-10-05 11:15:55 +02:00
Ruud
9de8ed2dee Don't migrate old databases 2014-10-05 11:05:56 +02:00
Ruud
230b7f47cc Always save encode logger stuff 2014-10-05 00:41:20 +02:00
Ruud
58878d8a0f Replace non existing chars 2014-10-05 00:18:22 +02:00
Ruud
d9bb1bfbfb - Debug code 2014-10-04 22:57:18 +02:00
Ruud
f8674f9baa Grammar 2014-10-04 22:17:58 +02:00
Ruud
20f1076037 Don't load suggestions when chartview active 2014-10-04 21:48:46 +02:00
Ruud
e84f2aa04c Don't load charts if suggestion tab is enabled 2014-10-04 17:47:21 +02:00
Ruud
01f70051f8 For people who can't read good 2014-10-04 17:37:25 +02:00
Ruud
492f69b149 Actually use the smtp port from settings
fix #4003
2014-10-04 16:53:40 +02:00
Ruud
2270b2a28b Don't force parser for trailer searching 2014-10-04 16:50:01 +02:00
Ruud
b5a0418a36 Make available space check optional
fix #3973
2014-10-04 16:26:33 +02:00
Ruud
e595722139 Safari hanging on password input creation
Fix #3997
2014-10-04 15:37:18 +02:00
Ruud
78ba855c68 Add CP tag only when renamer or unique tag is enabled. 2014-10-04 14:33:51 +02:00
Ruud
158f638fb9 No need for double replace 2014-10-04 14:07:38 +02:00
Viktor Elofsson
2e52c8124a Implemented a downloader for Hadouken. 2014-10-02 20:34:43 +02:00
Ruud
5bea9dd04f Always return safestring on renamer replace 2014-09-29 16:24:57 +02:00
Ruud
910393d00e Allow original without cd name 2014-09-26 15:33:38 +02:00
Ruud
4b66b0ea07 Add NZBVortex group support
fix #1279
2014-09-23 22:36:01 +02:00
Ruud
543226450c NZBVortex status checking 2014-09-23 22:22:27 +02:00
Ruud
b9dbadda0b Add randomstring support to cptag 2014-09-23 22:00:27 +02:00
Ruud
7cb214d8a2 Don't force send host with every request 2014-09-23 20:33:26 +02:00
Ruud
f6d4ddbe80 NZBVortex, create unique ID 2014-09-23 17:40:21 +02:00
Ruud
faefd7a5b5 Traks notifier always enabled 2014-09-23 16:54:24 +02:00
Ruud
8f02b0eea0 Api documention updates
close #3955
2014-09-23 12:36:46 +02:00
Ruud
39d0f91de2 Add permission calculator link
#3953
2014-09-23 12:27:09 +02:00
Ruud
b3d75cb485 Check if file got moved successful on move/copy
close #3893
2014-09-23 10:02:36 +02:00
Ruud
17b940a271 Allow 5 redirects 2014-09-23 00:38:44 +02:00
Ruud
3338b72d1f Stop endless redirect loop
fix #3931
2014-09-23 00:38:36 +02:00
Ruud
70ca31a265 Only allow single redirect for now
fix #3931
2014-09-23 00:29:48 +02:00
Ruud
d7f43c2cf8 Make minimum seeders configurable
fix #3202
2014-09-22 22:15:13 +02:00
Ruud
b1f88c1c48 Allow https for Transmission
close #3880
2014-09-22 21:53:27 +02:00
Ruud
6fa6d530ec Remove older backup folders 2014-09-22 21:45:43 +02:00
Ruud
11e7fb23ca Stream larger file download
fix #2488
2014-09-22 21:38:53 +02:00
Ruud
da9d2b5ed8 Check free diskspace before starting moving files
fix #3893
2014-09-22 21:03:13 +02:00
Ruud Burger
2599bac1a4 Merge pull request #3944 from softcat/develop
Fixed filmstarts.de provider
2014-09-21 23:12:25 +02:00
Ruud
0bae509311 Use github img url for Growl notification
fix #1363
2014-09-21 22:29:52 +02:00
Ruud
2fa7834e6e Allow typing in directory setting
closes #479
2014-09-21 22:17:09 +02:00
Ruud
2deb6ee6a7 Trakt not moving movie to collection
fix #3018
2014-09-21 15:57:29 +02:00
Ruud
0d166025d0 Safestring before base encode 2014-09-21 15:13:50 +02:00
Ruud
7861416dc5 Don't write over files already renamed 2014-09-21 10:43:53 +02:00
Ruud
2639c5e9ad Force add <cd> if not set
fix #1811
2014-09-21 10:40:29 +02:00
Andrew Dumaresq
8de5fcdac6 fixed button name 2014-09-20 19:39:35 -04:00
Andrew Dumaresq
4aa9801be4 general code cleanup 2014-09-20 19:39:12 -04:00
Ruud
c4db4ace13 Log move, copy, link 2014-09-21 00:09:56 +02:00
Ruud
db367a80d1 Do proper cleanup after rename 2014-09-20 23:58:54 +02:00
softcat
3093b21555 Fixed filmstarts.de provider 2014-09-20 17:57:51 +02:00
dumaresq
3e58378490 figured out how to make the check work better 2014-09-19 21:41:58 -04:00
Andrew Dumaresq
2c40db3074 removed un-needed variable 2014-09-19 20:28:03 -04:00
dumaresq
fba228fd9d fixing check function 2014-09-19 20:26:54 -04:00
Andrew Dumaresq
ef2b8e88b4 better download checking 2014-09-19 07:07:23 -04:00
Ruud
9b62e32da8 Symlink failing on encode
fix #3371
2014-09-19 10:42:48 +02:00
Ruud
a0b3ee8186 Safe encode path names in renamer
fix #3425
2014-09-19 10:39:54 +02:00
Ruud
d70da1edce TorrentShack use correct columns
fix #3940
2014-09-19 10:20:02 +02:00
Ruud
7c674b3aab Re-add movie giving rev conflict
fix #3939
2014-09-19 10:03:02 +02:00
Ruud
98540f2fcd Make sure original_folder isn't empty
fix #3747
2014-09-19 00:15:40 +02:00
Ruud
2f0e197320 Mark faulty movies done 2014-09-19 00:15:18 +02:00
Ruud
db49585818 Renamer doesn't loop over all movies properly 2014-09-18 21:32:31 +02:00
Ruud
160bc1a5c4 Always release lock 2014-09-18 20:57:46 +02:00
Ruud
8e23b02653 Stop on 429 code 2014-09-18 20:19:49 +02:00
Ruud
41e69aeac3 Delete nzbindex provider 2014-09-18 19:52:10 +02:00
Ruud
be30200a18 Use correct arg 2014-09-18 17:57:10 +02:00
Ruud
387650d040 Don't tag recent for fixed ignored movies 2014-09-18 17:51:21 +02:00
Ruud
052d64eb39 Force restatus on ignored movies 2014-09-18 17:45:55 +02:00
Ruud
a3a8a820fe release.update_status not triggered on frontend 2014-09-18 16:49:44 +02:00
Ruud
1b724b5606 Media got tagged with ignored, instead of release 2014-09-18 16:04:21 +02:00
Ruud
5fc9d7182c Hide urllib3 error
closes #3887
2014-09-18 14:26:34 +02:00
Ruud
c948216e33 I need to watch more Sesame Street..
26 letters in the alphabet + # for numbers is 27..
2014-09-18 14:17:00 +02:00
Andrew Dumaresq
c77b270fa8 Cleaned up OAUTH and made the download asyc 2014-09-18 06:00:09 -04:00
Ruud
035b99bc8a Don't use event when not needed 2014-09-17 23:06:59 +02:00
Ruud
f74b837faa Ignore RecordDeleted in release for media call 2014-09-17 23:05:01 +02:00
Ruud
4c198f7116 Ignore RecordDeleted on notification getter
fix #3888
2014-09-17 22:51:27 +02:00
Ruud
76322c0145 Don't save data in notification 2014-09-17 21:50:41 +02:00
Ruud
12150c5efc Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-09-17 12:59:29 +02:00
Ruud
4a9452672a Use status_code to stop requesting url 2014-09-17 12:59:24 +02:00
Ruud
f7eeaf3eda Locking mechanism 2014-09-16 22:44:33 +02:00
Ruud
002ce4d4e1 Ignore deleted media documents 2014-09-16 22:05:54 +02:00
Ruud
80df57f2b6 Delete corrupted documents 2014-09-16 22:00:54 +02:00
Ruud
0358378cae Fix marshal data corrupted documents 2014-09-16 15:16:32 +02:00
Ruud
fa054b6b34 Migration: Don't fail on missing release file 2014-09-16 13:55:44 +02:00
Ruud Burger
4b9e226cc6 Merge pull request #3884 from botez/develop
Update iptorrents.py
2014-09-11 13:58:18 +02:00
Ruud
ca24bf031c Change quality test 2014-09-08 19:21:31 +02:00
dumaresq
872a4f4650 Worked on geting Oauth and adding download status 2014-09-07 17:59:16 -04:00
Ruud
af8806e292 Move webrip to scr alternative 2014-09-07 21:50:20 +02:00
Ruud
4f646094b5 Add quality test 2014-09-07 21:50:08 +02:00
Ruud
6e8503cfc5 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-09-07 21:43:16 +02:00
Ruud
4879bc6251 Move hdtv & hdrip to alternative for brrips 2014-09-07 21:42:45 +02:00
Ruud
ab253f9030 Add quality test 2014-09-07 21:41:59 +02:00
Ruud
bc6d197004 Don't score identifier quality.guess twice 2014-09-07 21:40:54 +02:00
Troy Olson
1de457fa8d Update iptorrents.py
Adds another URL parameter when searching on iptorrents for brrip.  This allows it to find brrip that are also classified as 720p/1080p.

See this thread for info:
https://couchpota.to/forum/viewtopic.php?f=5&t=4032&sid=7892abbaaca9dad8bd3cc27cafb7fd33

And this prior pull request for more info:
https://github.com/RuudBurger/CouchPotatoServer/pull/3696
2014-09-03 12:33:04 -07:00
Ruud
9e564c49b3 Update library: requests 2014-09-03 16:47:03 +02:00
Ruud
50a150f570 Update library: httplib2 2014-09-03 16:47:03 +02:00
Ruud
8d55b0c92a Assume non-3d if quality guess fails 2014-09-02 21:11:54 +02:00
Ruud
5a2df62462 Add quality test 2014-09-01 22:51:55 +02:00
Ruud
9d21dd9196 Newznab download status code not caught properly 2014-09-01 22:13:49 +02:00
Ruud
3b34196901 Also untag active movies 2014-09-01 14:57:59 +02:00
Ruud
bad26026ae Clean done after week, not 3 days 2014-09-01 14:50:42 +02:00
Ruud
6e455e62d5 Remove debug print 2014-09-01 14:49:46 +02:00
Ruud
c97bd38c83 Import cleanup 2014-09-01 14:23:26 +02:00
Ruud
356322c5b1 Use named argument for with_status calls 2014-09-01 13:40:14 +02:00
Ruud
9dbb477dd8 Use latest found release if files are the same 2014-08-31 23:11:28 +02:00
Ruud
089609d5d2 Give higher penalty for allowed identifiers 2014-08-31 22:19:41 +02:00
Ruud
487ddf1c25 Don't check message in dev mode 2014-08-31 22:19:09 +02:00
Ruud
83b4c17969 Better quality guessing based on size 2014-08-31 16:26:25 +02:00
Ruud
08c381cf0d Use video metadata titles for scoring 2014-08-31 15:03:51 +02:00
Ruud
286f14a6d2 Return titles included in video headers 2014-08-31 15:03:10 +02:00
Ruud
0b14fe5454 Look at all releases for restatus "done" 2014-08-31 15:02:35 +02:00
Ruud
c5a0d521d1 Simplify coming soon / late listing 2014-08-31 01:11:40 +02:00
Ruud
4a1f70da09 ConnectionError not caught properly 2014-08-31 00:23:06 +02:00
Ruud
87e97cd8a5 Only show poster when available
fix #3866
2014-08-30 17:06:51 +02:00
Ruud
e0dffe20a4 Update lib: Tornado 2014-08-30 14:01:12 +02:00
Ruud
73d37584ad Cleanup import 2014-08-30 14:00:57 +02:00
Ruud Burger
5fd3e86624 Merge pull request #3857 from MLWALK3R/develop
TPB & Kickass proxy update
Use https for torrentleech
2014-08-29 12:39:44 +02:00
Ruud
d0f1e7c6a3 Update put.io code 2014-08-29 12:30:31 +02:00
Ruud
53e7e383a3 put.io rename 2014-08-29 11:38:28 +02:00
Ruud
c06e1f3135 Merge branch 'develop' of git://github.com/dumaresq/CouchPotatoServer into dumaresq-develop 2014-08-29 11:37:49 +02:00
Ruud
b0ff526c95 Improved quality matching
fixes #3829
2014-08-28 16:33:59 +02:00
Ruud
3cfe90d581 break backlog on error 2014-08-28 15:25:25 +02:00
Ruud
1d60d9caf1 Blu-ray backlog not working
fixes #3826
2014-08-28 15:23:01 +02:00
Michael Walker
8e0d1520e8 Removed URL
URL had a captcha
2014-08-28 12:44:24 +01:00
Ruud
b07f91d6a5 Wrap exceptions around score calculations
Fix #3859
2014-08-28 13:41:41 +02:00
Michael Walker
43af091b02 SSL'd Links
SSL'd Links
2014-08-28 02:13:22 +01:00
Michael Walker
5f0543ba42 Updated URL's
Removed dead links
2014-08-28 02:10:30 +01:00
Michael Walker
ef8cd1aa40 URL Fix
Domain is missing WWW recond casing an ISSUE.
2014-08-28 01:59:37 +01:00
Michael Walker
e01fe51b9e Merge pull request #1 from RuudBurger/develop
Develop
2014-08-28 01:58:20 +01:00
Ruud
afa782194d Remove ending seperator 2014-08-27 22:05:16 +02:00
Ruud
77e602f359 Use proper bitsoup variable 2014-08-27 21:45:53 +02:00
Ruud
a6063b0665 Merge branch 'fuzeman-develop_tv_sync' into develop 2014-08-27 21:44:43 +02:00
Ruud
9a7e4ea500 Merge branch 'develop_tv_sync' of git://github.com/fuzeman/CouchPotatoServer into fuzeman-develop_tv_sync 2014-08-27 21:44:09 +02:00
Ruud
1daedb7259 Update bitsoup tables
closes #3807
2014-08-27 20:07:27 +02:00
Ruud Burger
8e82e976f1 Merge pull request #3809 from dkboy/develop
Update bitsoup.py table order
2014-08-27 19:48:40 +02:00
Ruud
8b445ac9f9 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-08-27 19:46:53 +02:00
Ruud
91c24105cc Dockers readme install 2014-08-27 19:45:46 +02:00
Ruud Burger
13df26851e Merge pull request #3836 from clinton-hall/patch-1
Ignore separator at end of string. Fixes #3823
2014-08-27 19:41:37 +02:00
Ruud
ca58d25785 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-08-27 19:40:47 +02:00
Ruud
42d728f71e Update ubuntu start script
closes #3846
2014-08-27 19:40:28 +02:00
Ruud Burger
659960899e Merge pull request #3848 from xombiemp/develop
Fix brrip search parameters for passthepopcorn
2014-08-27 19:38:43 +02:00
Ruud Burger
d40b052cbc Merge pull request #3852 from MLWALK3R/patch-2
Updated TPB URL's
2014-08-27 19:38:11 +02:00
Ruud
282f6fb73a Only get fanart with extended info 2014-08-27 19:35:52 +02:00
Ruud
416c9eabde Update charts every 3 days 2014-08-27 19:35:32 +02:00
Michael Walker
b4a15f344d Updated URL's
Replaced TPB links with working URL's
2014-08-26 15:53:10 +01:00
Andrew Parker
c545c9aab1 Couchpotato will identify a release with 'hdtv' in the name as brrip. The passthepopcorn provider searched for brrip with the parameters media=Blu-ray. This created a condition where a 720p hdtv movie would not be snatched because the 720p quality would skip it due to being marked as 'brrip', and br-rip quality would not find it because it was not from a Blu-ray source.
This changes the search parameters for brrip to resolution=anyhd to remove the requirement that it's source media is Blu-ray.
2014-08-25 09:43:06 -06:00
dumaresq
bb73cb8eec Fixed missing library 2014-08-24 18:19:01 -04:00
Clinton Hall
c0492a41d9 Ignore separator at end of string. Fixes #3823 2014-08-22 11:58:29 +09:30
seedzero
cfd92b8268 Documentation added for media type .list & .delete APIs 2014-08-20 15:00:09 +12:00
Dean Gardiner
436883a96d Fixed media.types & addSingleListView
addSingleCharView, addSingleDeleteView

Conflicts:

	couchpotato/core/media/show/_base/main.py
2014-08-20 14:59:57 +12:00
seedzero
c381b719b1 Stop movie searcher searching for TV shows and
hosing episodes
2014-08-20 14:59:11 +12:00
Dean Gardiner
81d4d9a4e2 Changed "media.with_identifiers" to remove "No media found with..." messages 2014-08-20 14:58:55 +12:00
Dean Gardiner
e2df3a4dfd Added children to "library.related" 2014-08-20 14:58:27 +12:00
Dean Gardiner
7df92f2882 Fix possible dashboard error, add "types" parameter to "media.with_status", limit suggestions to movies (for now) 2014-08-20 14:58:04 +12:00
Dean Gardiner
072b6d09fa Renamed "[media].update_info" event to "[media].update" 2014-08-20 14:57:35 +12:00
Dean Gardiner
3869e350bf Added "media_id" parameter to "library.tree" event 2014-08-20 14:57:16 +12:00
Dean Gardiner
058846f54f Added "find" helper function 2014-08-20 14:57:02 +12:00
Dean Gardiner
cd836f3660 Include releases in "library.tree" 2014-08-20 14:56:36 +12:00
Dean Gardiner
d75f58f5ec Fixed "library.related" and "libary.tree" to work with "show.episode", 'show.season" media types 2014-08-20 14:56:13 +12:00
Dean Gardiner
f2b0d3f80b Switched "library.tree" to use "media_children" index 2014-08-20 14:55:56 +12:00
Dean Gardiner
a366d57278 Added "library.tree" event/api call 2014-08-20 14:55:34 +12:00
Dean Gardiner
a821d85bf2 Fixed MediaBase.getPoster(), switched MovieBase to use this generic method 2014-08-20 14:55:07 +12:00
Dean Gardiner
a1ce3e0d6b Added "library.root" event, fixes to "matcher", "release" and "score" to use "library.root" + handle missing "year" 2014-08-20 14:54:24 +12:00
Dean Gardiner
e7be5c7809 Added "library.related" event and "library.query", "library.related" API calls 2014-08-20 14:52:08 +12:00
dumaresq
5acab98025 fixed hardcoded directory 2014-08-19 19:32:00 -04:00
dumaresq
ed6a46e9c0 Added putioDownloader 2014-08-17 16:28:47 -04:00
dkboy
89f3b6624e Update bitsoup.py
They've rearranged the table
2014-08-17 14:52:43 +12:00
Ruud Burger
3546f29caf Merge pull request #3771 from fenduru/ptp-freeleech
Give higher weight to freeleech torrents
2014-08-16 13:32:45 +02:00
Ruud
e3414fe91f Remove movie if no releases are left after delete 2014-08-16 13:29:34 +02:00
Ruud
bdadd00d93 Don't add & on url creation 2014-08-16 12:44:52 +02:00
Ruud
dd7de31e9f Update TorrentShack url
fix #3797
2014-08-14 21:28:17 +02:00
fenduru
6897dab647 Give higher weight to freeleech torrents 2014-08-09 11:20:23 -04:00
Ruud
accf19bb26 Different log level 2014-08-01 13:33:59 +02:00
Ruud
4126007cac Don't download 0 seed torrents
fix #3728
2014-08-01 13:29:39 +02:00
Ruud
9f12fe2636 Add edge meta for IE
fix #3727
2014-08-01 13:23:00 +02:00
Ruud
9fb348f3a4 Don't try to ignore None release 2014-08-01 13:21:15 +02:00
Ruud
e749d132cd Better message 2014-07-28 16:12:54 +02:00
Ruud Burger
bed9458604 Update contributing.md 2014-07-28 16:06:38 +02:00
Ruud
7984ee9fcf Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-07-28 16:02:49 +02:00
Ruud
69e3e36fae Update contribute 2014-07-28 16:01:27 +02:00
Ruud
456563eab0 Update log post text 2014-07-28 16:01:16 +02:00
Ruud
7b6fa4f0e5 Transmission failed to download 2014-07-27 21:28:06 +02:00
Ruud
cd1dc39ef2 Add bd50 torrentleech 2014-07-27 21:14:31 +02:00
Ruud
0771aeac3b Update nzbgeek api url 2014-07-20 16:20:12 +02:00
Ruud
cd0afd20e5 Always reconnect Transmission. fix #3631 2014-07-17 23:58:51 +02:00
Ruud
324920cd8c Don't migrate when db is closed 2014-07-17 23:25:35 +02:00
Ruud
12cda35494 Try fix migration failure from 2.5.1 2014-07-17 22:24:48 +02:00
Ruud
4291e2233d Releader class 2014-07-13 12:23:27 +02:00
Ruud
6ccbad031f Use own style reloader 2014-07-13 12:23:14 +02:00
Ruud
d1dfed2833 Merge branch 'refs/heads/develop' into redesign 2014-07-12 20:39:26 +02:00
Ruud Burger
1c1af9f90c Merge pull request #3620 from peerster/develop
Include instructions for Linux with systemd
2014-07-12 20:21:23 +02:00
Ruud
687221f035 Update last_edit when tagging with recent 2014-07-12 19:38:49 +02:00
peerster
a99d52392f Another layout fix 2014-07-12 16:40:24 +02:00
peerster
bd6690b159 layout fix 2014-07-12 16:34:41 +02:00
peerster
b13df16b53 Added systemd specific instructions 2014-07-12 16:29:42 +02:00
Ruud
06f49be090 Don't error out on media.get. fix #3611 2014-07-11 16:47:15 +02:00
Ruud
0b48ad5084 Change fanart api url 2014-07-11 16:24:45 +02:00
Ruud
32ce93d2e9 Encode video path 2014-07-11 10:30:32 +02:00
Ruud
e0479e79bd AwesomeHD not returning proper size. fix #3587 2014-07-08 20:24:18 +02:00
Ruud
04e22b3966 XBMC error > info2 2014-07-08 20:09:24 +02:00
Ruud
3986de4ebc Merge branch 'refs/heads/develop' into redesign 2014-07-06 22:38:51 +02:00
Ruud
40a5ce087b Better label for Pushbullet settings 2014-07-06 22:19:45 +02:00
Ruud
330e15bbcb Snatched not giving enough data to show notification. fix #3564 2014-07-04 20:36:48 +02:00
Ruud
d201d9fff9 Allow change of file move action 2014-07-02 19:02:55 +02:00
Ruud
f765794c99 Don't add managed to no-process renamer list. fix #3538 2014-07-01 21:00:05 +02:00
Ruud
34320e617d Unrar extract time options. closes #2733 2014-06-30 22:47:41 +02:00
Ruud
169ddeef5d Allow custom unrar path. fix #3460 2014-06-30 22:38:18 +02:00
Ruud
33ad4c22c7 Try make unrar executable for user 2014-06-30 21:58:19 +02:00
Ruud
265f90fe69 Unrar cleanup 2014-06-30 21:36:32 +02:00
Ruud
099b72ed27 Allow 720p in lower qualities. fix #3539 2014-06-30 16:28:45 +02:00
Ruud
d20c0ee37e Remove Smackdown from defaults 2014-06-29 23:34:48 +02:00
Ruud
f6030a333a Don't remove pyc files when using desktop updater 2014-06-29 23:33:23 +02:00
Ruud
4cbc089de2 Log subfolder errors in renamer 2014-06-29 10:51:33 +02:00
Ruud
c45c04659f Use html parser for hdtrailers 2014-06-29 10:24:19 +02:00
Ruud
61a9037835 Don't error out if XBMC is turned off. fix #3515 2014-06-29 09:49:48 +02:00
Ruud
ad33c0bcca Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-06-28 23:58:21 +02:00
Ruud
7afc524a9f Freespace Windows not working. fix #3535 2014-06-28 23:58:02 +02:00
Ruud Burger
c5a4bc9a1b Merge pull request #3534 from mano3m/develop_fix_torrentshack
Fix torrentshack
2014-06-28 23:55:47 +02:00
Ruud
1c0178dbaf Fix 'ignore' mis-tagging 2014-06-28 23:54:52 +02:00
Ruud
dbf7feca3e Properly delete from manage 2014-06-28 23:46:28 +02:00
mano3m
d92de8ec4e Fix torrentshack 2014-06-28 23:43:16 +02:00
Ruud Burger
8347da5a58 Merge pull request #3529 from genial123/api-fix
Finish non-existent API calls instead of timing out
2014-06-28 23:17:37 +02:00
Ruud
59e248d7de Wrong argument split. fix #3518 2014-06-28 22:57:42 +02:00
genial123
12e556e1d1 Finish non-existent API calls instead of timing out 2014-06-28 08:19:05 +02:00
Ruud
14d3ab93da Add mp4 quality brrip 2014-06-25 23:22:35 +02:00
Ruud
e27ece512f Use release quality, not identifier to match downloaded release 2014-06-25 23:08:37 +02:00
Ruud
b88d8efc8d Allow 720p in cam quality. fix #3512 2014-06-25 22:56:40 +02:00
Ruud
9ec4c2837e Don't put original title first 2014-06-25 18:20:42 +02:00
Ruud
ffc3fc9ec4 Check for broken indexes and reindex if needed 2014-06-25 18:16:45 +02:00
Ruud
a566b4f428 Setup property index with database module 2014-06-25 16:13:35 +02:00
Ruud
69819460f3 Add zoink.it for torrent caching 2014-06-25 09:27:23 +02:00
Ruud
24a8cb41fe Keep previous status if restatus check fails 2014-06-24 22:01:27 +02:00
Ruud
1de0443492 Get default "stop after" if it isn't set yet. fix #3499 2014-06-24 21:56:46 +02:00
Ruud
bb19b380b4 Don't start CP when less then 100MB is available. fix #3502 2014-06-24 21:20:13 +02:00
Ruud
b6b936ddf3 Use other name guess. fix #3501 2014-06-24 20:50:25 +02:00
Ruud
b00b6acba8 Profile don't save. fix #3437 2014-06-24 20:18:51 +02:00
Ruud
3941076c06 Forgot to add the separator to test 2014-06-24 10:09:34 +02:00
Ruud
7401201af2 Add subfolder path test 2014-06-24 10:02:43 +02:00
Ruud
5c586fbf30 Update isSubFolder test 2014-06-24 10:02:14 +02:00
Ruud
5c891b7e8e Try next on failed trailer download 2014-06-23 23:47:30 +02:00
Ruud
5425fcae9e Manually get with_status releases 2014-06-23 23:40:36 +02:00
Ruud
4008cce12f Manually get media with status 2014-06-23 23:37:17 +02:00
Ruud
d227105527 Make keep search advanced 2014-06-23 22:04:42 +02:00
Ruud
508649e6b6 Optimize import 2014-06-23 21:51:14 +02:00
Ruud
b4e25d4345 Indent fixes 2014-06-23 21:50:23 +02:00
Ruud
733f925c75 Merge branch 'refs/heads/mano3m-develop_wait_for_better' into develop 2014-06-23 21:49:21 +02:00
mano3m
40e910192e Fix tagging 2014-06-23 21:14:00 +02:00
mano3m
424a3cd892 Clean-up 2014-06-23 21:13:59 +02:00
mano3m
9f6036c8d6 Redo status update for media 2014-06-23 21:13:58 +02:00
mano3m
5af5749d4a Catch missing deleted profile error
@RuudBurger should we reset the profile of the media to default or None
in case this happens or leave it the way it is?
2014-06-23 21:10:11 +02:00
mano3m
f01449f14c Rename scanned files for done media properly 2014-06-23 21:10:10 +02:00
mano3m
03dff14ee9 Massive bug fix 2014-06-23 21:10:09 +02:00
mano3m
e55302592a Improve description 2014-06-23 21:10:09 +02:00
mano3m
dbeaab052d Wait before marking media as done 2014-06-23 21:10:08 +02:00
Ruud
9f07dd5a21 Reindex after full scan. fix #3492 2014-06-23 20:46:26 +02:00
Ruud
b933cd8718 Delete when total releases was 0 2014-06-23 20:40:04 +02:00
Ruud
8d85dde2c6 Don't use empty name_year return for moviemeter. fix #3493 2014-06-23 16:19:40 +02:00
Ruud
eaaa8dc834 Only try other if it's different 2014-06-23 14:15:00 +02:00
Ruud
5350dbf0ce Filter out extended and try other result on determine media. fix #3489 2014-06-23 14:13:32 +02:00
Ruud
28ffad10ab Standardize path for list directory api call. #3487 2014-06-23 13:43:33 +02:00
Ruud
a37517bf6a Use ssl startup options. fix #3490
Thanks @sjmcinness
2014-06-23 13:37:13 +02:00
Ruud
fab9b96c8e Keep done releases when removing from wanted/dashboard. fix #3488 2014-06-23 13:16:05 +02:00
Ruud
50d6882a98 Close all attached after start 2014-06-23 01:17:06 +02:00
Ruud
94064ac7da Rework restart methods 2014-06-23 01:09:32 +02:00
Ruud
1c5f19a68a Better reload hook name 2014-06-22 23:41:31 +02:00
Ruud
a26abd0dbb Don't use nonblock requests results if empty 2014-06-22 23:39:43 +02:00
Ruud
fb9080c18a Except value error 2014-06-22 23:38:51 +02:00
Ruud
15980471b0 Create api lock on the fly 2014-06-22 22:41:56 +02:00
Ruud
b11bb9cdac Catch missing profile in restatus 2014-06-22 21:32:52 +02:00
Ruud
474cd45fc5 Reset profile
to default when old one is empty or doesn't exist anymore
2014-06-22 21:14:31 +02:00
Ruud
0b6843a1b9 Force readd not adding with proper profile 2014-06-22 20:58:56 +02:00
Ruud
fdcdf07fa6 Untag on delete from dashboard 2014-06-22 20:35:19 +02:00
Ruud
5617953d39 Mark as done missing. #3472 2014-06-22 20:23:47 +02:00
Ruud
964144996f Advanced not hidden. 2014-06-22 16:17:11 +02:00
Ruud
37214dd413 Put Pushover in config. close #3480 2014-06-22 16:15:36 +02:00
Ruud
5a08fed0b6 Manage release_id not assigned. fix #3479 2014-06-22 15:53:43 +02:00
Ruud
443866ef04 Use default title for search query. fix #3477 2014-06-21 18:50:36 +02:00
Ruud
96275adaff Use always search and ignore ETA. fix #3475 2014-06-21 18:44:09 +02:00
Ruud
33884deb6c Send single Pushbullet when no device is selected. fix #3471 2014-06-20 21:29:43 +02:00
Ruud
7db291fc93 Show all in wizard 2014-06-20 21:07:17 +02:00
Ruud
9df14bd55a Cleanup provider lists 2014-06-20 21:04:24 +02:00
Ruud
1e183625c9 Description update 2014-06-20 20:45:26 +02:00
Ruud
643be19711 Update descriptions 2014-06-20 20:45:17 +02:00
Ruud
21a1770f3f Nzb icons 2014-06-20 18:14:08 +02:00
Ruud
07063d855a Add icons to torrent providers 2014-06-20 17:35:15 +02:00
Ruud
cf95e417f1 Delete publichd 2014-06-20 17:11:11 +02:00
Ruud
3f92ed0ea0 Don't autodownload releases with no file size. fix #3467 2014-06-20 14:33:20 +02:00
Ruud
578b74f2c0 Fix PushBullet url. fix #3470 2014-06-20 14:14:10 +02:00
Ruud
8e17b9aea5 Remove BoxCar 2014-06-20 14:12:43 +02:00
Ruud
6f766aae8c Tag and untag dashboard media 2014-06-20 12:13:54 +02:00
Ruud
5797348bb3 Update tag index 2014-06-20 12:10:40 +02:00
Ruud
57ca5067ff Insert themoviedb original_title in by default. 2014-06-19 16:48:30 +02:00
Ruud
e8ff8a41de Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-06-19 14:15:53 +02:00
Ruud
0b5dfe826a Fix BitSoup parsing. close #3465 2014-06-19 14:15:42 +02:00
Ruud
67fbcc8238 Tag filter index 2014-06-17 23:32:27 +02:00
Ruud
dd61c7dc21 Compact DB every 7 days if needed 2014-06-17 16:27:22 +02:00
Ruud
3786b5435f Only remove movie title from 3d words check 2014-06-17 15:36:18 +02:00
Ruud
1857e047b0 Remove moviename words when scanning for 3d tags. fix #3395 2014-06-17 15:22:20 +02:00
Ruud
648ac7793f Add multiple 3d tags to clean regex 2014-06-17 15:21:30 +02:00
Ruud
664ce6421f Try only parse filename for release name 2014-06-17 15:21:08 +02:00
Ruud
cfb77a1076 Don't use extension to test for quality tags. fix #3457 2014-06-17 14:22:42 +02:00
Ruud
f65ddbbb9e Encode environments args in html 2014-06-16 22:39:02 +02:00
Ruud
76126271fc Don't add default profile if status is done 2014-06-16 22:06:39 +02:00
Ruud
3faece0b4c Don't log already deleted releases. 2014-06-16 21:21:55 +02:00
Ruud
530d3cd91e Update rentals URL 2014-06-15 22:37:18 +02:00
Ruud
e659aba176 Clean .pyc files before starting 2014-06-15 22:22:34 +02:00
Ruud
a196a499ae Only cache qualities if list length is correct 2014-06-15 22:13:01 +02:00
Ruud
58bd9cd7a1 Unable to hide & reorder profiles. fix #3437 2014-06-15 14:59:02 +02:00
Ruud
9dd9f850c6 Treat seeding as "done" 2014-06-14 18:59:52 +02:00
Ruud
cbecb74307 Show ETA on soon list. fix #2702 2014-06-14 18:57:27 +02:00
Ruud
8ae1e58614 Don't call parent init for synoindex 2014-06-11 23:34:17 +02:00
Ruud
83e8ae392d Don't create a new "done" release on rename. fix #3250 2014-06-11 23:17:59 +02:00
Ruud
c0297f10cb Force download on "best release" selection 2014-06-11 22:24:11 +02:00
Ruud
41052ae508 Use same before ETA message 2014-06-11 22:14:13 +02:00
Ruud
2d243d51e4 Ignore ETA on manual refresh 2014-06-11 22:03:38 +02:00
Ruud
fdec80f676 Set last_force_eta time 2014-06-11 21:21:32 +02:00
Ruud
5d3b0deb4d Simpler progress update 2014-06-11 21:10:20 +02:00
Ruud
f68c356944 Update title index 2014-06-11 21:07:02 +02:00
Ruud
553f8d6ccd Ignore ETA every 7 days on search 2014-06-11 17:05:08 +02:00
Ruud
60fb3e33ae Sony PS3 metadata 2014-06-11 15:22:07 +02:00
Ruud
9b7c1db509 Sony PS3 metadata 2014-06-11 15:20:21 +02:00
Ruud
963ce356fb MediaBrowser metadata 2014-06-11 15:19:25 +02:00
Ruud
dcd0364ecc Re-use tiny scroller for webkit 2014-06-11 14:59:06 +02:00
Ruud
a2da428777 Chart css cleanup
Tiny webkit scroll
2014-06-11 14:40:22 +02:00
Ruud
876c602710 Code cleanup 2014-06-11 12:29:31 +02:00
Ruud
79cb716ced Update Mootools 2014-06-11 10:34:52 +02:00
Ruud
ba9c975335 Allow empty quality 2014-06-11 10:11:36 +02:00
Ruud
ef407bcb3c Don't clear pyc when develop 2014-06-11 09:53:52 +02:00
Ruud
2898a066fe Prevent threading from GC before proper close. fix #3420 2014-06-11 09:49:30 +02:00
Ruud
7950c4bdb4 Update fedora service init 2014-06-11 09:34:06 +02:00
Ruud
2499012d88 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-06-11 09:31:43 +02:00
Ruud
7788669de1 Fill in profiles & qualities when they are empty. fix #3396 2014-06-11 09:31:29 +02:00
Ruud
d7f6fad3dd Unicode filenames before saving release. fix #3383 2014-06-10 22:51:15 +02:00
Ruud
699c562d34 Return default resolution if nothing found 2014-06-10 22:47:23 +02:00
Ruud
36d8225389 Trigger search change on paste. fix #3416 2014-06-10 21:02:48 +02:00
Ruud
17ba9ee96b Allow full library refresh interval. fix #2807 2014-06-10 13:54:31 +02:00
Ruud
2769fc28d3 Catch RecordNotFound error. fix #3373 2014-06-10 13:40:51 +02:00
Ruud
f5f3cfba50 More general logs 2014-06-10 11:14:19 +02:00
Ruud
1b1c77d225 Use magnetprovider for yify #3406 2014-06-10 11:13:18 +02:00
Ruud
cfc49e286b Allowed datadir giving false positive. fix #3399 2014-06-08 11:57:53 +02:00
Ruud
a2b3677c59 Settings.save doc update. closes #3391 2014-06-07 08:48:11 +02:00
Ruud
e5cfafdb00 Update Tornado 3.2.2 2014-06-06 22:25:16 +02:00
Ruud
bff05925e8 Only allow 3d tag as single word, not partial. fix #3368 2014-06-06 21:37:16 +02:00
Ruud
05f4b2b8ce Allow full scan and quick scan separately 2014-06-06 21:36:35 +02:00
Ruud
2eac294643 Allow already deleted releases 2014-06-06 20:42:01 +02:00
Ruud
f6789f79ea Import cleanup 2014-06-06 20:14:57 +02:00
Ruud
0b5976bdb1 Catch HTTPError properly in trailer search. fix #3388 2014-06-06 18:51:51 +02:00
Ruud
7d2b2b9809 Metadata fixes 2014-06-06 18:09:17 +02:00
Ruud
cce92dc1f8 Don't test for redirect. fix #3381 2014-06-06 18:04:13 +02:00
Ruud
fa7e59e842 Don't save profile order twice 2014-06-06 17:26:54 +02:00
Ruud
8635f0ddb2 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-06-06 11:23:46 +02:00
Ruud
c90a423012 Disable SSL verification 2014-06-06 11:23:21 +02:00
Ruud
f0daee669b Add opener to env 2014-06-06 10:53:08 +02:00
Ruud Burger
d252b660f5 Merge pull request #3362 from clinton-hall/patch-1
Fix instructions for Ubuntu
2014-06-04 08:18:41 +02:00
Clinton Hall
e717a49c0c Fix instructions for Ubuntu 2014-06-04 13:29:20 +09:30
Ruud
426155e65c Add extra score if size is unique. fix #3344 2014-06-03 23:34:32 +02:00
Ruud
6b9b446e3d Quality guess keyerror. fix #3347 2014-06-03 22:53:30 +02:00
Ruud
ab2b2cfe6e Cleaner empty dir cleanup 2014-06-03 22:49:12 +02:00
Ruud
4b236c6ed6 Only cleanup source folders 2014-06-03 22:25:17 +02:00
Ruud
2396fadf04 Remove debug print 2014-06-03 20:25:57 +02:00
Ruud
a3bffb5867 Use searchOnTitle for TorrentLeech. fix #3351 2014-06-03 17:32:53 +02:00
Ruud
1b44fc40af Properly delete from late-list. fix #3350 2014-06-03 17:26:38 +02:00
Ruud
b894139ca1 Make full path for logs 2014-06-03 16:54:21 +02:00
Ruud
daa0662869 XMPP was importing itself 2014-06-03 16:54:08 +02:00
Ruud
81de9529c3 Force folder creation on startup 2014-06-03 16:52:44 +02:00
Ruud
6b06caf00d Api call release lock never triggered 2014-06-02 22:57:45 +02:00
Ruud
9370366112 Don't limit fanart calls 2014-06-02 22:57:27 +02:00
Ruud
32bcf6e615 Requests 2.3.1 2014-06-02 22:36:09 +02:00
Ruud
aa804471a7 Prioritize image info 2014-06-02 22:27:56 +02:00
Ruud
681d8b1ddc Simplify fanart provider 2014-06-02 22:23:26 +02:00
Ruud
c82b1f51e3 Get messages from last 7 days, not just unread. fix #3331 2014-06-02 14:44:09 +02:00
Ruud
6d048e0003 Don't try to parse faulty IMDB page 2014-06-02 14:35:06 +02:00
Ruud
0314910bbe Don't migrate empty library items 2014-06-02 14:11:26 +02:00
Ruud
3bd831782c Release lock inside thread 2014-06-02 14:02:55 +02:00
Ruud
40f01dca6f Use async request for all api calls 2014-06-02 13:31:18 +02:00
Ruud
8dead66b58 Migration fixes 2014-06-02 12:59:21 +02:00
Ruud
18807191c0 Don't reindex on startup 2014-06-01 17:36:34 +02:00
Ruud
9d9630a27a Sorted backup files 2014-06-01 16:46:21 +02:00
Ruud
8ac851555d Can't trigger same api call
Thread never closes
2014-06-01 16:14:53 +02:00
Ruud
27f331a1fc Don't verify ssl for downloaders 2014-06-01 14:30:45 +02:00
Ruud
e6b4d32506 IMDB Watchlist count was off 2014-06-01 11:37:57 +02:00
Ruud
a28ee58a1f Remove digestauth header 2014-06-01 00:26:37 +02:00
Ruud
47749c2d73 Transmission login failed. #1110 2014-06-01 00:23:46 +02:00
Ruud
d6d0ff724a Change label 2014-06-01 00:11:09 +02:00
Ruud
ba65700aad Use textarea value for log posting 2014-05-31 23:46:34 +02:00
Ruud
84a7cfe07d Add CP version by default in logs 2014-05-31 22:09:18 +02:00
Ruud
9ccd4a5e84 Shutdown logging 2014-05-31 21:56:37 +02:00
Ruud
616434a00f Delay release cleanup 2014-05-31 21:31:06 +02:00
Ruud
4cf62f73da Use proper conf variable 2014-05-31 19:51:58 +02:00
Ruud
0145aecab4 data['size'] sometimes doesn't exist 2014-05-31 13:53:45 +02:00
Ruud
6c4184d1f5 Use minimal requirements for popular movie automation 2014-05-31 13:37:30 +02:00
Ruud
9d011b42a9 Moved PopularMovies automation to single file 2014-05-31 13:30:40 +02:00
Ruud
bf81b5cacc Move automation provider 2014-05-31 13:24:23 +02:00
Ruud
8d2b6e4097 Merge branch 'refs/heads/sjlu-develop' into develop 2014-05-31 13:22:18 +02:00
Ruud
50d8399f09 Merge branch 'develop' of git://github.com/sjlu/CouchPotatoServer into sjlu-develop 2014-05-31 13:21:58 +02:00
Ruud
bc99b77dbe Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-05-31 13:18:48 +02:00
Ruud
1c7edc9487 TPB and Kickass proxy update 2014-05-31 13:18:11 +02:00
Ruud Burger
90c06fb3c9 Merge pull request #3290 from kackar/patch-4
Update contributing.md
2014-05-31 13:14:17 +02:00
Ruud Burger
10a04c16ba Merge pull request #3289 from kackar/patch-3
Update README.md
2014-05-31 13:13:29 +02:00
Ruud
90a618bd7e Allow search on imdb urls 2014-05-31 12:33:35 +02:00
Ruud
b630b84ab0 Get proper poster image from tmdb 2014-05-31 12:09:55 +02:00
Ruud
a5ee362fc0 Remove scandir lib, use os.walk 2014-05-31 11:53:24 +02:00
Ruud
7c0870b6b8 Capitalize 2014-05-29 20:44:15 +02:00
Ruud
a42264b280 Tornado 3.2.1 2014-05-29 20:09:16 +02:00
Ruud
e714604ec0 Requests 2.3.0 2014-05-29 20:01:22 +02:00
Ruud
c094120f04 Do verify requests 2014-05-29 19:50:36 +02:00
Ruud
6691c8ddd7 Convert underscore method to proper camelcase 2014-05-29 19:43:33 +02:00
kackar
013705c318 Update contributing.md 2014-05-25 21:40:49 +02:00
kackar
bda6f92a4d Update README.md 2014-05-25 21:34:59 +02:00
Ruud
7ceb8dc79c ILoveTorrents search fix 2014-05-19 21:49:08 +02:00
Ruud
7f48210c97 Use libs as import 2014-05-19 21:16:58 +02:00
Ruud
23c440cd58 Merge branch 'refs/heads/Boehemyth-develop' into develop 2014-05-19 21:07:56 +02:00
Ruud
0097167dec Fanart PR cleanup 2014-05-19 21:07:51 +02:00
Ruud
21e5f156bb Merge branch 'develop' of git://github.com/Boehemyth/CouchPotatoServer into Boehemyth-develop 2014-05-19 19:41:31 +02:00
Ruud
08f55314d5 Re-use imdb page parser 2014-05-19 19:25:16 +02:00
Ruud
577bf09859 Add fallback imdb filter.
BeautifulSoup fails to load elements based on ID for the html imdb returns...
Hacky way of filtering out the correct elements.
2014-05-19 13:35:44 +02:00
Ruud Burger
c446cd2fb0 Merge pull request #3252 from mano3m/develop_notif
Add 3D to download notification. fixes #3242
2014-05-19 10:28:43 +02:00
Ruud
d80fe99609 Load subpages 2014-05-13 22:37:00 +02:00
mano3m
06a8414f12 Add 3D to download notification. fixes #3242 2014-05-13 20:23:43 +02:00
Ruud
43b6e3ac07 Use proper extend 2014-05-11 20:46:00 +02:00
Ruud
58acd53a9a Merge branch 'refs/heads/develop' into redesign
Conflicts:
	couchpotato/static/scripts/couchpotato.js
2014-05-11 20:25:15 +02:00
Ruud
1ac01456a9 Don't show snatched movies in late section. fix #3013 2014-05-11 20:18:50 +02:00
Ruud
b86853f06f More path encoding 2014-05-11 19:36:22 +02:00
Ruud
311a2798dd Revert "Encode before path join"
This reverts commit b87c00c041.
2014-05-11 18:58:14 +02:00
Ruud
fe9998fb9d Revert "Don't re-encode by filesystem encoding"
This reverts commit d5e19db5e6.
2014-05-11 18:53:13 +02:00
Ruud
ce648c5d35 Post mass edit commands 2014-05-11 18:35:38 +02:00
Ruud
5a2a9bbf9a Loop through release files properly 2014-05-11 17:40:55 +02:00
Ruud
0f8ab05fd4 Clean up managed movies 2014-05-11 17:40:43 +02:00
Ruud
b87c00c041 Encode before path join 2014-05-11 17:18:27 +02:00
Ruud
8999f51dc9 Make quality guess debug message 2014-05-11 16:07:23 +02:00
Ruud
d5e19db5e6 Don't re-encode by filesystem encoding 2014-05-11 16:03:27 +02:00
Ruud
675bee83ca Path encode 2014-05-11 16:02:49 +02:00
Ruud
33e5dd1fdb Speed up log highlight
Allow reverse selection
2014-05-11 00:52:55 +02:00
Ruud Burger
4ff2794c83 Merge pull request #3234 from mikke89/fix-providers
Fix searches on torrentday and sceneaccess
2014-05-11 00:30:13 +02:00
Ruud
81f9302da1 Use super threaded db connection 2014-05-11 00:19:05 +02:00
Ruud
93f4b8b537 Don't log non existing properties 2014-05-11 00:14:56 +02:00
mikke89
0587d2f8db Fix searches on torrentday and sceneaccess 2014-05-10 22:26:41 +02:00
Ruud
6ba25b5468 Better highlight 2014-05-10 15:58:29 +02:00
Ruud
cc10969506 Keep log filter
Pre-fill in issue when possible
2014-05-10 15:53:27 +02:00
Ruud
c2eb50a7ee Log reporting 2014-05-10 13:23:13 +02:00
Dan Boehm
33d24068fd Merge remote-tracking branch 'upstream/develop' into develop 2014-05-09 13:04:36 -05:00
Ruud
3a4c191b11 Make logs filterable 2014-05-09 16:53:25 +02:00
Ruud
e06b4ccb3f Ignore "wait for" for all if 1 is old enough 2014-05-09 15:53:37 +02:00
Ruud
3c6b86ea28 Delay first search 2014-05-09 15:53:07 +02:00
Ruud
c4a9a13d6c Don't continue searching lower qualities of correct one is found 2014-05-09 14:30:06 +02:00
Ruud
c0f1a3c603 Show chart scrollbar only on hover 2014-05-09 12:14:29 +02:00
Ruud
9d3425061a Resize thumbnail-less soon movies 2014-05-09 12:04:07 +02:00
Ruud
c2dcd2f67d Use the "wait for" option properly. fix #3224 2014-05-09 11:46:32 +02:00
Ruud
24b822aecd Info2 log 2014-05-09 11:44:58 +02:00
Ruud
a7d3de766f Don't migrate release if quality doesn't exist 2014-05-09 00:58:52 +02:00
Ruud
b56c897e4b Don't give negative score for non matching size 2014-05-08 23:45:35 +02:00
Ruud
df14032107 Add offset for log partial 2014-05-08 16:59:41 +02:00
Ruud
66b4821f7f Profile references before assigned. fix #3220 2014-05-08 16:40:27 +02:00
Ruud
d301cde266 Newznab custom tag wasn't used. fix #3219 2014-05-08 16:37:36 +02:00
Ruud
0590a0d722 Update log api 2014-05-08 16:30:58 +02:00
Ruud
fc71a03a12 Just loop over log array 2014-05-08 16:27:13 +02:00
Ruud
923c794e39 Logs return list 2014-05-08 16:16:42 +02:00
Ruud
e7fbff5b3f Only remove non-existing releases only once 2014-05-08 15:40:24 +02:00
Ruud
1bd556fbb3 Close DB on shutdown 2014-05-08 15:39:40 +02:00
Ruud
18a870f8c3 Log if no quality is found 2014-05-08 14:57:26 +02:00
Ruud
3e2a2c3bee Remove unused variable 2014-05-08 14:52:03 +02:00
Ruud
73e74881a6 Always return handler 2014-05-08 14:51:53 +02:00
Ruud
b37112600e Only cache ignored proxies for 1 day 2014-05-08 14:51:42 +02:00
Ruud
6172ce4960 Use contains other quality in log 2014-05-08 14:51:26 +02:00
Ruud
3d277e1c01 Quality scoring and tests 2014-05-08 14:51:12 +02:00
Ruud
b3b13899f1 Return found qualities 2014-05-08 14:50:59 +02:00
Ruud
7c4a59539a Return brrip in Yify provider 2014-05-08 14:50:04 +02:00
Ruud
e6dfb3da16 Allow last year films to be search after April 2014-05-07 00:47:50 +02:00
Ruud
8e220ededa Bitsoup: Allow param in search url 2014-05-07 00:25:18 +02:00
Ruud
11126f8083 forceDefaults priority 2014-05-07 00:23:08 +02:00
Ruud
ac8a13db22 Remove orphaned releases 2014-05-06 23:49:34 +02:00
Ruud
5ab10ff97a Change dognzb default url 2014-05-06 22:24:58 +02:00
Ruud
f3b0346ba2 Use encoding as backup 2014-05-06 21:38:08 +02:00
Ruud
96c94f97f4 Filter out tvshows in charts 2014-05-06 20:35:00 +02:00
Ruud
192c0200e5 Disable top 250 chart 2014-05-06 20:06:06 +02:00
Ruud
03ae8f459c Merge branch 'refs/heads/mano3m-develop_higherq' into develop 2014-05-06 16:08:47 +02:00
Ruud
377fdd9e5e Use correct event 2014-05-06 16:08:36 +02:00
Ruud
daec7d20fe Merge branch 'develop_higherq' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_higherq 2014-05-06 16:00:16 +02:00
Ruud
66a149590b Chart fixes 2014-05-06 15:51:09 +02:00
Ruud
1b6f010df2 Merge branch 'refs/heads/mano3m-develop_rentals' into develop 2014-05-06 15:39:44 +02:00
Ruud
7e4bc29b59 Chart cleanup 2014-05-06 15:39:41 +02:00
Ruud
0284fa9b0a Load correct beautifulsoup module 2014-05-06 14:31:24 +02:00
Ruud
e5bcea59b5 Merge branch 'develop_rentals' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_rentals 2014-05-06 14:14:41 +02:00
Ruud
16f603ced2 Cast float 2014-05-05 22:42:14 +02:00
Ruud
bdcb3b7e33 Merge branch 'refs/heads/mano3m-develop_3D_stuff' into develop 2014-05-05 22:36:45 +02:00
Ruud
0def6fcfe3 Cleanup PR 2014-05-05 22:36:41 +02:00
Ruud
75a352fef3 Merge branch 'develop_3D_stuff' of git://github.com/mano3m/CouchPotatoServer into mano3m-develop_3D_stuff 2014-05-05 22:13:36 +02:00
Ruud
07eb1f7f4c Allow page ordering 2014-05-05 22:01:11 +02:00
Ruud
8e35c02763 Cleanup searchOnTitle queries 2014-05-05 21:18:30 +02:00
Ruud
c1f6d9a858 Merge branch 'refs/heads/mikke89-dev-torrentsearch' into develop 2014-05-05 20:45:04 +02:00
Ruud
3e20a3bac7 Merge branch 'dev-torrentsearch' of git://github.com/mikke89/CouchPotatoServer into mikke89-dev-torrentsearch 2014-05-05 20:44:34 +02:00
Ruud
818570fd2d Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2014-05-05 20:42:22 +02:00
Ruud
05a97a19ab Mixins 2014-05-05 20:41:29 +02:00
Ruud
db23f5cdef Update 2014-05-05 20:40:55 +02:00
Ruud
bcd2d22fbf Update bitsoup searchOnTitle 2014-05-05 20:38:47 +02:00
mikke89
ffc99cd4f4 Fix false positives from Sceneaccess and Torrentday 2014-05-04 23:23:45 +02:00
Ruud Burger
bb56750c1a Merge pull request #3143 from mano3m/develop_charts
Improve charts
2014-05-04 14:50:15 +02:00
Ruud Burger
b08d587a22 Merge pull request #3166 from harrv/develop
Added an mpaa_only rename replacement token
2014-05-04 13:58:37 +02:00
Ruud Burger
47f4132b39 Merge pull request #3188 from rfgamaral/duplicate_subtitle_identifier
Avoid duplicate subtitle language identifier
2014-05-04 13:48:06 +02:00
Ruud Burger
faefab5554 Merge pull request #3189 from rfgamaral/force_download_subtitles
Force download all subtitle languages
2014-05-04 13:44:39 +02:00
Ruud Burger
243a033055 Merge pull request #3192 from mano3m/develop_yifi
Fix Yifi
2014-05-04 13:42:52 +02:00
Ruud Burger
db1eeaae38 Merge pull request #3193 from mano3m/develop_api
Add Blu-ray to Bit HDTV provider
2014-05-04 13:41:17 +02:00
mano3m
8c2960e891 Add Blu-ray to Bit HDTV provider
Fixes #3171
2014-05-04 11:14:54 +02:00
mano3m
d6a86e8616 correct caps 2014-05-04 10:11:25 +02:00
mano3m
5260f42378 Fix Yifi
this should fix #3114
2014-05-04 10:03:30 +02:00
Ricardo Amaral
84f28f3c54 Add advanced option to force download all languages 2014-05-03 17:50:12 +01:00
Ricardo Amaral
860b6793fb Prevent duplicate subtitle language identifier 2014-05-03 17:44:03 +01:00
harrv
df03409d7a Added an mpaa_only rename replacement token
The mpaa replacement token includes certifications from around the world. If the user wishes to limit the values to one of 'G', 'PG', 'PG-13', 'R', 'NC-17' or 'Not Rated' they can use the added mpaa_only replacement token. The original mpaa replacement token remains unchanged.
2014-04-27 00:47:38 -06:00
Dan Boehm
6a81f2241d Added option to run the Artwork Downloader addon during XBMC notify.
This option will only work in XBMCv12 (Frodo) or later.  It also requires the Artwork Downloader
Addon.

Since XBMC's API doesn't support notifications over HTML, there is no way for couchpotato to know
when the Library Scan is complete.  Since running the Artwork Downloader before the movie has
been scanned won't solve anything, a delay timer can be adjusted to suit the user's needs.

Squashed commit of the following:

commit bd60ed585f77cc40c31fd67d4ae732e0845d31ab
Merge: fcb092e b113a4d
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Thu Apr 24 14:26:24 2014 -0500

    Merge branch 'fanarttv' into artdlnotify

commit b113a4def197a9ca8545bde9f5081c0591b93b36
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Thu Apr 24 14:24:12 2014 -0500

    Bug-fix and code cleanup.

    Fixed a bug where the movie.info event would crash if there aren't any pictures to scrape in
    fanart.tv.

commit fcb092e776e00ceabea016b3c26d9394e32d72b0
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Thu Apr 24 14:21:27 2014 -0500

    Option to run the artwork downloader addon during XBMC notify.

commit adf7a4675d472e9e95a316c6cccc681a52804f13
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 16:15:03 2014 -0500

    Added support for extrafanart.
    Also, the main fanart will be taken from fanart.tv unless one
    does not exist.

commit 1791e46c8602f40bb56fe0cf7ecb0607f35b4b12
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 15:13:14 2014 -0500

    Couchpotato now downloads extrathumbs from the extra tmdb backdrops if they exist.

    This commit made some major changes to the core image creation functionality that
    makes writing multiple images to folders possible.

commit c0858807873749dbc928c0260037138f51f894ca
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 12:18:53 2014 -0500

    Bug Fix & Implemented functionality to select bluray or dvd disc images.

    Currently, only blurays will be selected, unless there are no blurays.
    However, if a mechanism for determining the quality of the release is
    implemented, it would be simple to make this selection based on the
    quality.

commit 786751371d243f53d0f5c6f2c38d92288d8608ba
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 10:59:25 2014 -0500

    Fixed a bug where non-HD clearart and logos couldn't be downloaded.

commit feda8df483d13b5a5df3a869f25de8f2c7e6ffe3
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 10:12:31 2014 -0500

    Fixed some problems that were missed with the previous merge.

commit 5ddab6c40e69a5accc6c0336cd7485920ff82d8f
Merge: 7273abf ff46aa0
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 10:02:11 2014 -0500

    Merge branch 'develop' into fanarttv

    Conflicts:
    	couchpotato/core/media/movie/providers/info/themoviedb.py
    	couchpotato/core/providers/metadata/xbmc/__init__.py

commit 7273abf827735cf245711c3d3199a6a173a964aa
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Thu Feb 27 13:29:57 2014 -0600

    Downloads extra artwork from fanart.tv

    Downloads occur with correct filenaming when XBMC metadata is generated,
    but the image URLs are selected when the movie.info event is called.

commit 9080d9d749c7e1ddbdc78f7b37a3c5f83195d580
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Wed Feb 26 16:31:37 2014 -0600

    Added basic functionality for fanarttv provider.

    This should be mostly done and is based on the tvdb provider.

commit 1b39b246c2a9d65f9ef86c4e150a12d893e362c0
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Wed Feb 26 14:50:17 2014 -0600

    Updated fanarttv library with the correct package hierarchy
    (libs.fanarttv).

commit 8abb7c8f8ad3347900debb9f6a6d5a7acb7df396
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Wed Feb 26 13:12:48 2014 -0600

    Added fanart.tv API python library (lib.fanarttv).

    The upstream for this library is at
    https://github.com/z4r/python-fanart.
2014-04-24 15:02:29 -05:00
Dan Boehm
5ce817cee6 Support for downloading extra artwork from Fanart.tv (resolves #1023).
New image types include:
* clearart
* discart
* extrathumbs
* extrafanart
* logo
* banner
* landscape (16:9 Thumb)

There are a couple things that should be noted:
1. Only English images will be downloaded.
2. The fanart image is now downloaded from Fanart.tv if it can find one, otherwise it uses TMDB
like it used to.  This is because the images on Fanart.tv tend to be higher resolutions &
quality.
3. Since multiple extrathumbs and extrafanarts are downloaded into a subdirectory, subdirectories
are now supported for metadata file names.  The subdirectories will be automatically created if
they don't exist.
4. Bluray discart will always be preferred over DVD.  Ideally, it would prefer DVD versions for
SD quality movies, but I couldn't find an easy way to determine the quality from within the
plugin.  I suspect major changes would be needed to the plugin system in general in order to get
this to work.  If a user cares about the distinction, the best work-around is to not download
these in Couchpotato and run the Artwork Downloader addon from within XBMC.
5. A maximum of 4 extrathumbs and 20 extrafanarts will be downloaded.

Squashed commit of the following:

commit b113a4def197a9ca8545bde9f5081c0591b93b36
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Thu Apr 24 14:24:12 2014 -0500

    Bug-fix and code cleanup.

    Fixed a bug where the movie.info event would crash if there aren't any pictures to scrape in
    fanart.tv.

commit adf7a4675d472e9e95a316c6cccc681a52804f13
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 16:15:03 2014 -0500

    Added support for extrafanart.
    Also, the main fanart will be taken from fanart.tv unless one
    does not exist.

commit 1791e46c8602f40bb56fe0cf7ecb0607f35b4b12
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 15:13:14 2014 -0500

    Couchpotato now downloads extrathumbs from the extra tmdb backdrops if they exist.

    This commit made some major changes to the core image creation functionality that
    makes writing multiple images to folders possible.

commit c0858807873749dbc928c0260037138f51f894ca
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 12:18:53 2014 -0500

    Bug Fix & Implemented functionality to select bluray or dvd disc images.

    Currently, only blurays will be selected, unless there are no blurays.
    However, if a mechanism for determining the quality of the release is
    implemented, it would be simple to make this selection based on the
    quality.

commit 786751371d243f53d0f5c6f2c38d92288d8608ba
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 10:59:25 2014 -0500

    Fixed a bug where non-HD clearart and logos couldn't be downloaded.

commit feda8df483d13b5a5df3a869f25de8f2c7e6ffe3
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 10:12:31 2014 -0500

    Fixed some problems that were missed with the previous merge.

commit 5ddab6c40e69a5accc6c0336cd7485920ff82d8f
Merge: 7273abf ff46aa0
Author: Dan Boehm <dboehm.dev@gmail.com>
Date:   Wed Apr 23 10:02:11 2014 -0500

    Merge branch 'develop' into fanarttv

    Conflicts:
    	couchpotato/core/media/movie/providers/info/themoviedb.py
    	couchpotato/core/providers/metadata/xbmc/__init__.py

commit 7273abf827735cf245711c3d3199a6a173a964aa
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Thu Feb 27 13:29:57 2014 -0600

    Downloads extra artwork from fanart.tv

    Downloads occur with correct filenaming when XBMC metadata is generated,
    but the image URLs are selected when the movie.info event is called.

commit 9080d9d749c7e1ddbdc78f7b37a3c5f83195d580
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Wed Feb 26 16:31:37 2014 -0600

    Added basic functionality for fanarttv provider.

    This should be mostly done and is based on the tvdb provider.

commit 1b39b246c2a9d65f9ef86c4e150a12d893e362c0
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Wed Feb 26 14:50:17 2014 -0600

    Updated fanarttv library with the correct package hierarchy
    (libs.fanarttv).

commit 8abb7c8f8ad3347900debb9f6a6d5a7acb7df396
Author: dan <dan@DBoehm-Arch.danboehm>
Date:   Wed Feb 26 13:12:48 2014 -0600

    Added fanart.tv API python library (lib.fanarttv).

    The upstream for this library is at
    https://github.com/z4r/python-fanart.
2014-04-24 15:00:04 -05:00
mano3m
7cdf124f9d Improve charts
- Add a max height to each chart with a scrollbar
- Add advanced options to hide chart items already in wanted or library
(note that this can be done more efficiently...)
2014-04-21 16:18:51 +02:00
Ruud Burger
ff46aa0226 Merge pull request #3138 from mano3m/develop_kat
Add verified only option for kat
2014-04-21 15:21:37 +02:00
mano3m
669e331f6c Ruud's comments 2014-04-21 12:15:05 +02:00
mano3m
4179ba642b Various Fixes 2014-04-21 00:26:23 +02:00
mano3m
00954d98f7 Improve scanner
- Fix the disc and tag removal: they received the filename with capse
- add default metadata when resolution is known
2014-04-21 00:26:21 +02:00
mano3m
037e77860b Add 3D type to renamer (e.g. SBS, Half OU, etc) 2014-04-21 00:26:20 +02:00
mano3m
47e187449d Add use of size to scanner
And check if snatched quality is the same as what we detected
2014-04-21 00:26:19 +02:00
mano3m
06e9afbe69 Improve quality self test 2014-04-21 00:26:18 +02:00
mano3m
bfe8aa5f5f Add size to quality guessing
And cleanup searcher
2014-04-21 00:26:17 +02:00
mano3m
e51ddd7a50 BR-Disk detection fixes 2014-04-21 00:26:16 +02:00
mano3m
442552c024 fix debug msg 2014-04-21 00:26:16 +02:00
mano3m
ce4806df64 Add 3D renamer option 2014-04-21 00:26:15 +02:00
mano3m
0c2e65c92b Check for better quality
Actually check the quality profile order and determine:
- if the searcher needs to search for a certain quality
- if the renamer needs to rename a certain qualoty release

Fixes #3122
2014-04-21 00:25:27 +02:00
mano3m
b01aa2b385 Add verified only option for kat
Fixes ##3137
2014-04-20 22:54:43 +02:00
Ruud Burger
2e04890756 Merge pull request #3087 from softcat/develop
Added filmstarts.de userscript
2014-04-20 10:20:59 +02:00
Ruud Burger
1657857b4a Merge pull request #3131 from mano3m/develop_prefix
Add 'A' and 'An' to 'The' prefix
2014-04-20 10:20:25 +02:00
mano3m
72383592ba Clean-up 2014-04-20 10:18:44 +02:00
Ruud Burger
d093f935f9 Merge pull request #3130 from mano3m/develop_binsearch
Simplify binsearch result string
2014-04-20 10:09:02 +02:00
Ruud Burger
8cc7d101aa Merge pull request #3119 from mano3m/develop_tagging
Only tag existing files
2014-04-20 10:07:43 +02:00
Ruud Burger
f39eebbd22 Merge pull request #3118 from mano3m/develop_spotweb
Add password searching in spots from spotweb
2014-04-20 10:07:16 +02:00
Ruud Burger
3ac8bc738a Merge pull request #3105 from mano3m/develop_standardize_renamer
Use more standardized codec/source names
2014-04-20 10:06:46 +02:00
mano3m
0eac041a26 Add 'A' and 'An' to 'The' prefix
This was bothering me for a long time now ;) We do put The at the end
but not A nor An. Fixed now :)
2014-04-19 22:20:17 +02:00
mano3m
ab0f5daaf3 Simplify binsearch result string
fixes #3099
2014-04-19 21:17:44 +02:00
mano3m
b59a0f82ab Add IMDB rentals list to charts
This should add the IMDB rentals list to the charts and imdb automation.
This is actually a nice list as you can download the movies right away
instead of waiting until they release like with the rest of the imdb
charts.

The problem is that this does not work. And frankly I gave up. When I
type this in my python command window it works:

'''
from bs4 import BeautifulSoup
import urllib2

data = urllib2.urlopen('http://www.imdb.com/boxoffice/rentals')
html = BeautifulSoup(data)
result_div = html.find('div', attrs = {'id': 'main'})
'''

Then result_div contains the list of movies. In the code from this PR
result_div becomes None....?!?!?! @Ruudburger please help before I jump
off my building ;)
2014-04-19 21:13:32 +02:00
mano3m
9b75e6af5c Only tag existing files
Fixes #3088
2014-04-15 21:35:16 +02:00
mano3m
aa37f2b0ef Add password searching in spots from spotweb 2014-04-15 19:23:07 +02:00
Steven Lu
d22237a5cc Adding in a new source for automation. 2014-04-14 23:50:10 -04:00
Ruud Burger
26f5e8aa4b Merge pull request #3109 from mano3m/develop_provider
Provider fixes
2014-04-14 09:06:07 +02:00
Ruud Burger
9072c6cae0 Merge pull request #3112 from jonnsl/bluray_chart
Don't show duplicated results in the blu-ray releases chart.
2014-04-14 09:01:22 +02:00
Jonnathan
8739c1197f Don't show duplicated results in the blu-ray releases chart. 2014-04-14 03:56:59 -03:00
mano3m
a477973862 Provider fixes
Fixes #3097 #3086  #3106
2014-04-13 15:48:25 +02:00
mano3m
95ce26d261 Use more standardized codec/source names
Fixes #999
2014-04-12 18:27:59 +02:00
Ruud
85163443e3 Re-use original paths 2014-04-09 19:54:01 +02:00
Ruud
6ea49405f4 Make clientside ordered 2014-04-09 19:29:30 +02:00
Ruud
4776cef473 Reinit css 2014-04-09 16:08:10 +02:00
Ruud
e8fe9da602 Livereload css 2014-04-09 16:07:59 +02:00
Joel Kåberg
8c934c1ca8 Merge pull request #3080 from fuzeman/feature/dev_rtorrent
[rtorrent] fixed how torrent status is determined
2014-04-09 14:48:18 +02:00
softcat
349d7d4866 Added filmstarts.de userscript 2014-04-08 13:44:03 +02:00
Dean Gardiner
f1ea8fa693 [rtorrent] fixed how torrent status is determined 2014-04-06 22:24:27 +12:00
413 changed files with 24731 additions and 29554 deletions

2
.gitignore vendored
View File

@@ -3,3 +3,5 @@
/_source/
.project
.pydevproject
node_modules
.tmp

View File

@@ -10,7 +10,6 @@ import socket
import subprocess
import sys
import traceback
import time
# Root path
base_path = dirname(os.path.abspath(__file__))
@@ -19,7 +18,12 @@ base_path = dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(base_path, 'libs'))
from couchpotato.environment import Env
from couchpotato.core.helpers.variable import getDataDir
from couchpotato.core.helpers.variable import getDataDir, removePyc
# Remove pyc files before dynamic load (sees .pyc files regular .py modules)
removePyc(base_path)
class Loader(object):
@@ -29,7 +33,7 @@ class Loader(object):
# Get options via arg
from couchpotato.runner import getOptions
self.options = getOptions(base_path, sys.argv[1:])
self.options = getOptions(sys.argv[1:])
# Load settings
settings = Env.get('settings')
@@ -50,7 +54,7 @@ class Loader(object):
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
os.makedirs(self.log_dir)
# Logging
from couchpotato.core.logger import CPLog
@@ -67,10 +71,11 @@ class Loader(object):
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
from couchpotato.core.event import addEvent
addEvent('app.after_shutdown', self.afterShutdown)
addEvent('app.do_shutdown', self.setRestart)
def afterShutdown(self, restart):
def setRestart(self, restart):
self.do_restart = restart
return True
def onExit(self, signal, frame):
from couchpotato.core.event import fireEvent
@@ -98,7 +103,6 @@ class Loader(object):
# Release log files and shutdown logger
logging.shutdown()
time.sleep(3)
args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:]
subprocess.Popen(args)

121
Gruntfile.js Normal file
View File

@@ -0,0 +1,121 @@
'use strict';
module.exports = function(grunt){
require('time-grunt')(grunt);
// Configurable paths
var config = {
tmp: '.tmp',
base: 'couchpotato',
css_dest: 'couchpotato/static/style/combined.min.css'
};
grunt.initConfig({
// Project settings
config: config,
// Make sure code styles are up to par and there are no obvious mistakes
jshint: {
options: {
reporter: require('jshint-stylish'),
unused: false,
camelcase: false,
devel: true
},
all: [
'<%= config.base %>/{,**/}*.js',
'!<%= config.base %>/static/scripts/vendor/{,**/}*.js'
]
},
// Compiles Sass to CSS and generates necessary files if requested
sass: {
options: {
compass: true,
update: true
},
server: {
files: [{
expand: true,
cwd: '<%= config.base %>/',
src: ['**/*.scss'],
dest: '<%= config.tmp %>/styles/',
ext: '.css'
}]
}
},
// Add vendor prefixed styles
autoprefixer: {
options: {
browsers: ['> 1%', 'Android >= 2.1', 'Chrome >= 21', 'Explorer >= 7', 'Firefox >= 17', 'Opera >= 12.1', 'Safari >= 6.0']
},
dist: {
files: [{
expand: true,
cwd: '<%= config.tmp %>/styles/',
src: '{,**/}*.css',
dest: '<%= config.tmp %>/styles/'
}]
}
},
cssmin: {
dist: {
files: {
'<%= config.css_dest %>': ['<%= config.tmp %>/styles/**/*.css']
}
}
},
shell: {
runCouchPotato: {
command: 'python CouchPotato.py'
}
},
// COOL TASKS ==============================================================
watch: {
scss: {
files: ['<%= config.base %>/**/*.{scss,sass}'],
tasks: ['sass:server', 'autoprefixer', 'cssmin']
},
js: {
files: [
'<%= config.base %>/**/*.js'
],
tasks: ['jshint']
},
livereload: {
options: {
livereload: 35729
},
files: [
'<%= config.css_dest %>'
]
}
},
concurrent: {
options: {
logConcurrentOutput: true
},
tasks: ['shell:runCouchPotato', 'sass:server', 'autoprefixer', 'cssmin', 'watch']
}
});
grunt.loadNpmTasks('grunt-contrib-jshint');
//grunt.loadNpmTasks('grunt-contrib-uglify');
grunt.loadNpmTasks('grunt-contrib-sass');
grunt.loadNpmTasks('grunt-contrib-cssmin');
grunt.loadNpmTasks('grunt-contrib-watch');
grunt.loadNpmTasks('grunt-autoprefixer');
grunt.loadNpmTasks('grunt-concurrent');
grunt.loadNpmTasks('grunt-shell');
grunt.registerTask('default', ['concurrent']);
};

View File

@@ -17,9 +17,9 @@ Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for
* Open up `Git Bash` (or CMD) and go to the folder you want to install CP. Something like Program Files.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`.
* You can now start CP via `CouchPotatoServer\CouchPotato.py` to start
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
OSx:
OS X:
* If you're on Leopard (10.5) install Python 2.6+: [Python 2.6.5](http://www.python.org/download/releases/2.6.5/)
* Install [GIT](http://git-scm.com/)
@@ -27,20 +27,27 @@ OSx:
* Go to your App folder `cd /Applications`
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py`
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
Linux (ubuntu / debian):
Linux:
* Install [GIT](http://git-scm.com/) with `apt-get install git-core`
* (Ubuntu / Debian) Install [GIT](http://git-scm.com/) with `apt-get install git-core`
* (Fedora / CentOS) Install [GIT](http://git-scm.com/) with `yum install git`
* 'cd' to the folder of your choosing.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py` to start
* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
* Change the paths inside the init script. `sudo nano /etc/init.d/couchpotato`
* Make it executable. `sudo chmod +x /etc/init.d/couchpotato`
* Add it to defaults. `sudo update-rc.d couchpotato defaults`
* Open your browser and go to: `http://localhost:5050/`
* (Ubuntu / Debian) To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
* (Ubuntu / Debian) Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato`
* (Ubuntu / Debian) Change the paths inside the default file `sudo nano /etc/default/couchpotato`
* (Ubuntu / Debian) Make it executable `sudo chmod +x /etc/init.d/couchpotato`
* (Ubuntu / Debian) Add it to defaults `sudo update-rc.d couchpotato defaults`
* (systemd) To run on boot copy the systemd config `sudo cp CouchPotatoServer/init/couchpotato.fedora.service /etc/systemd/system/couchpotato.service`
* (systemd) Update the systemd config file with your user and path to CouchPotato.py
* (systemd) Enable it at boot with `sudo systemctl enable couchpotato`
* Open your browser and go to `http://localhost:5050/`
Docker:
* You can use [razorgirl's Dockerfile](https://github.com/razorgirl/docker-couchpotato) to quickly build your own isolated app container. It's based on the Linux instructions above. For more info about Docker check out the [official website](https://www.docker.com).
FreeBSD :

45
config.rb Normal file
View File

@@ -0,0 +1,45 @@
# First, require any additional compass plugins installed on your system.
# require 'zen-grids'
require 'susy'
# require 'breakpoint'
# Toggle this between :development and :production when deploying the CSS to the
# live server. Development mode will retain comments and spacing from the
# original Sass source and adds line numbering comments for easier debugging.
environment = :development
# environment = :development
# In development, we can turn on the FireSass-compatible debug_info.
firesass = false
# firesass = true
# Location of the your project's resources.
# Set this to the root of your project. All resource locations above are
# considered to be relative to this path.
http_path = "/"
# To use relative paths to assets in your compiled CSS files, set this to true.
# relative_assets = true
##
## You probably don't need to edit anything below this.
##
sass_dir = "./"
css_dir = "./static/style_compiled"
# You can select your preferred output style here (can be overridden via the command line):
# output_style = :expanded or :nested or :compact or :compressed
output_style = (environment == :development) ? :expanded : :compressed
# To disable debugging comments that display the original location of your selectors. Uncomment:
# line_comments = false
# Pass options to sass. For development, we turn on the FireSass-compatible
# debug_info if the firesass config variable above is true.
sass_options = (environment == :development && firesass == true) ? {:debug_info => true} : {}

View File

@@ -13,6 +13,8 @@ Lastly, for anything related to CouchPotato, feel free to stop by the [forum](ht
## Issues
Issues are intended for reporting bugs and weird behaviour or suggesting improvements to CouchPotatoServer.
Before you submit an issue, please go through the following checklist:
* **FILL IN ALL THE FIELDS ASKED FOR**
* **POST MORE THAN A SINGLE LINE LOG**, if you do, you'd better have a easy reproducable bug
* Search through existing issues (*including closed issues!*) first: you might be able to get your answer there.
* Double check your issue manually, because it could be an external issue.
* Post logs with your issue: Without seeing what is going on, the developers can't reproduce the error.
@@ -22,15 +24,17 @@ Before you submit an issue, please go through the following checklist:
* What providers are you using? (While your logs include these, scanning through hundreds of lines of logs isn't our hobby)
* Post the logs from the *config* directory, please do not copy paste the UI. Use pastebin to store these logs!
* Give a short step by step of how to reproduce the error.
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed then when you use CP on OSX or Windows.
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows.
* Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag.
* If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!!
The more relevant information you can provide, the more likely it is the issue will be resolved rather than closed.
* If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!
* Do not "bump" issues with "Any updates on this" or whatever. Yes I've seen it, you don't have to remind me of it. There will be an update when the code is done or I need information. If you feel the need to do so, you'd better have more info on the issue.
The more relevant information you provide, the more likely that your issue will be resolved.
If you don't follow any of the checks above, I'll close the issue. If you are wondering why (and ask) I'll block you from posting new issues and the repo.
## Pull Requests
Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following:
* Make sure your pull request is made for the *develop* branch (or relevant feature branch).
* Have you tested your PR? If not, why?
* Does your PR have any limitations we should know of?
* Does your PR have any limitations I should know of?
* Is your PR up-to-date with the branch you're trying to push into?

View File

@@ -1,3 +1,7 @@
import os
import time
import traceback
from couchpotato.api import api_docs, api_docs_missing, api
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import md5, tryInt
@@ -5,9 +9,6 @@ from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from tornado import template
from tornado.web import RequestHandler, authenticated
import os
import time
import traceback
log = CPLog(__name__)
@@ -39,13 +40,15 @@ class WebHandler(BaseHandler):
return
try:
if route == 'robots.txt':
self.set_header('Content-Type', 'text/plain')
self.write(views[route]())
except:
log.error("Failed doing web request '%s': %s", (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
def addView(route, func, static = False):
def addView(route, func):
views[route] = func
@@ -59,6 +62,13 @@ def index():
addView('', index)
# Web view
def robots():
return 'User-agent: * \n' \
'Disallow: /'
addView('robots.txt', robots)
# API docs
def apiDocs():
routes = list(api.keys())

View File

@@ -7,9 +7,8 @@ import urllib
from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog
from tornado.gen import coroutine
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, asynchronous
import tornado
log = CPLog(__name__)
@@ -28,10 +27,18 @@ def run_async(func):
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def run_handler(route, kwargs, callback = None):
try:
res = api[route](**kwargs)
callback(res, route)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'}, route)
# NonBlock API handler
class NonBlockHandler(RequestHandler):
@@ -44,24 +51,22 @@ class NonBlockHandler(RequestHandler):
start, stop = api_nonblock[route]
self.stopper = stop
start(self.onNewMessage, last_id = self.get_argument('last_id', None))
start(self.sendData, last_id = self.get_argument('last_id', None))
def onNewMessage(self, response):
if self.request.connection.stream.closed():
self.on_connection_close()
return
def sendData(self, response):
if not self.request.connection.stream.closed():
try:
self.finish(response)
except:
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
try:
self.finish(response)
except:
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
def on_connection_close(self):
self.removeStopper()
def removeStopper(self):
if self.stopper:
self.stopper(self.onNewMessage)
self.stopper(self.sendData)
self.stopper = None
@@ -77,14 +82,20 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
# Blocking API handler
class ApiHandler(RequestHandler):
route = None
@coroutine
@asynchronous
def get(self, route, *args, **kwargs):
route = route.strip('/')
self.route = route = route.strip('/')
if not api.get(route):
self.write('API call doesn\'t seem to exist')
self.finish()
return
# Create lock if it doesn't exist
if route in api_locks and not api_locks.get(route):
api_locks[route] = threading.Lock()
api_locks[route].acquire()
try:
@@ -102,36 +113,49 @@ class ApiHandler(RequestHandler):
except: pass
# Add async callback handler
@run_async
def run_handler(callback):
try:
res = api[route](**kwargs)
callback(res)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'})
result = yield tornado.gen.Task(run_handler)
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
run_handler(route, kwargs, callback = self.taskFinished)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
try:
self.write({'success': False, 'error': 'Failed returning results'})
self.finish()
except:
log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
api_locks[route].release()
self.unlock()
post = get
def taskFinished(self, result, route):
IOLoop.current().add_callback(self.sendData, result, route)
self.unlock()
def sendData(self, result, route):
if not self.request.connection.stream.closed():
try:
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.set_header('Content-Type', 'text/javascript')
self.finish(str(jsonp_callback) + '(' + json.dumps(result) + ')')
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.finish(result)
except UnicodeDecodeError:
log.error('Failed proper encode: %s', traceback.format_exc())
except:
log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
def unlock(self):
try: api_locks[self.route].release()
except: pass
def addApiView(route, func, static = False, docs = None, **kwargs):

View File

@@ -8,7 +8,7 @@ import webbrowser
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import cleanHost, md5
from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
@@ -71,13 +71,14 @@ class Core(Plugin):
return value if value and len(value) > 3 else uuid4().hex
def checkDataDir(self):
if Env.get('app_dir') in Env.get('data_dir'):
if isSubFolder(Env.get('data_dir'), Env.get('app_dir')):
log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
return True
def cleanUpFolders(self):
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False)
only_clean = ['couchpotato', 'libs', 'init']
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean)
def available(self, **kwargs):
return {
@@ -90,7 +91,11 @@ class Core(Plugin):
def shutdown():
self.initShutdown()
IOLoop.current().add_callback(shutdown)
if IOLoop.current()._closing:
shutdown()
else:
IOLoop.current().add_callback(shutdown)
return 'shutdown'
@@ -113,7 +118,7 @@ class Core(Plugin):
self.shutdown_started = True
fireEvent('app.do_shutdown')
fireEvent('app.do_shutdown', restart = restart)
log.debug('Every plugin got shutdown event')
loop = True
@@ -138,8 +143,11 @@ class Core(Plugin):
log.debug('Safe to shutdown/restart')
loop = IOLoop.current()
try:
IOLoop.current().stop()
if not loop._closing:
loop.stop()
except RuntimeError:
pass
except:
@@ -173,13 +181,13 @@ class Core(Plugin):
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def version(self):
ver = fireEvent('updater.info', single = True)
ver = fireEvent('updater.info', single = True) or {'version': {}}
if os.name == 'nt': platf = 'windows'
elif 'Darwin' in platform.platform(): platf = 'osx'
else: platf = 'linux'
return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash'])
return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown')
def versionView(self, **kwargs):
return {
@@ -278,13 +286,13 @@ config = [{
'name': 'permission_folder',
'default': '0755',
'label': 'Folder CHMOD',
'description': 'Can be either decimal (493) or octal (leading zero: 0755)',
'description': 'Can be either decimal (493) or octal (leading zero: 0755). <a target="_blank" href="http://permissions-calculator.org/">Calculate the correct value</a>',
},
{
'name': 'permission_file',
'default': '0755',
'default': '0644',
'label': 'File CHMOD',
'description': 'Same as Folder CHMOD but for files',
'description': 'See Folder CHMOD description, but for files',
},
],
},

View File

@@ -1,6 +1,5 @@
import os
import re
import traceback
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import ss
@@ -8,8 +7,6 @@ from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from minify.cssmin import cssmin
from minify.jsmin import jsmin
from tornado.web import StaticFileHandler
@@ -22,30 +19,26 @@ class ClientScript(Plugin):
core_static = {
'style': [
'style/main.css',
'style/uniform.generic.css',
'style/uniform.css',
'style/settings.css',
'style/combined.min.css',
],
'script': [
'scripts/library/mootools.js',
'scripts/library/mootools_more.js',
'scripts/vendor/mootools.js',
'scripts/vendor/mootools_more.js',
'scripts/vendor/form_replacement/form_check.js',
'scripts/vendor/form_replacement/form_radio.js',
'scripts/vendor/form_replacement/form_dropdown.js',
'scripts/vendor/form_replacement/form_selectoption.js',
'scripts/vendor/Array.stableSort.js',
'scripts/vendor/history.js',
'scripts/library/uniform.js',
'scripts/library/form_replacement/form_check.js',
'scripts/library/form_replacement/form_radio.js',
'scripts/library/form_replacement/form_dropdown.js',
'scripts/library/form_replacement/form_selectoption.js',
'scripts/library/question.js',
'scripts/library/scrollspy.js',
'scripts/library/spin.js',
'scripts/library/Array.stableSort.js',
'scripts/library/async.js',
'scripts/couchpotato.js',
'scripts/api.js',
'scripts/library/history.js',
'scripts/page.js',
'scripts/block.js',
'scripts/block/navigation.js',
'scripts/block/header.js',
'scripts/block/footer.js',
'scripts/block/menu.js',
'scripts/page/home.js',
@@ -54,8 +47,9 @@ class ClientScript(Plugin):
],
}
urls = {'style': {}, 'script': {}}
minified = {'style': {}, 'script': {}}
watches = {}
original_paths = {'style': {}, 'script': {}}
paths = {'style': {}, 'script': {}}
comment = {
'style': '/*** %s:%d ***/\n',
@@ -74,8 +68,7 @@ class ClientScript(Plugin):
addEvent('clientscript.get_styles', self.getStyles)
addEvent('clientscript.get_scripts', self.getScripts)
if not Env.get('dev'):
addEvent('app.load', self.minify)
addEvent('app.load', self.compile)
self.addCore()
@@ -91,7 +84,7 @@ class ClientScript(Plugin):
else:
self.registerStyle(core_url, file_path, position = 'front')
def minify(self):
def compile(self):
# Create cache dir
cache = Env.get('cache_dir')
@@ -102,47 +95,43 @@ class ClientScript(Plugin):
for file_type in ['style', 'script']:
ext = 'js' if file_type is 'script' else 'css'
positions = self.paths.get(file_type, {})
positions = self.original_paths.get(file_type, {})
for position in positions:
files = positions.get(position)
self._minify(file_type, files, position, position + '.' + ext)
self._compile(file_type, files, position, position + '.' + ext)
def _minify(self, file_type, files, position, out):
def _compile(self, file_type, paths, position, out):
cache = Env.get('cache_dir')
out_name = out
out = os.path.join(cache, 'minified', out_name)
minified_dir = os.path.join(cache, 'minified')
data_combined = ''
new_paths = []
for x in paths:
file_path, url_path = x
raw = []
for file_path in files:
f = open(file_path, 'r').read()
if file_type == 'script':
data = jsmin(f)
else:
data = self.prefix(f)
data = cssmin(data)
data = data.replace('../images/', '../static/images/')
data = data.replace('../fonts/', '../static/fonts/')
data = data.replace('../../static/', '../static/') # Replace inside plugins
if not Env.get('dev'):
data = f
raw.append({'file': file_path, 'date': int(os.path.getmtime(file_path)), 'data': data})
data_combined += self.comment.get(file_type) % (ss(file_path), int(os.path.getmtime(file_path)))
data_combined += data + '\n\n'
else:
new_paths.append(x)
# Combine all files together with some comments
data = ''
for r in raw:
data += self.comment.get(file_type) % (ss(r.get('file')), r.get('date'))
data += r.get('data') + '\n\n'
if not Env.get('dev'):
self.createFile(out, data.strip())
out_path = os.path.join(minified_dir, out_name)
self.createFile(out_path, data_combined.strip())
if not self.minified.get(file_type):
self.minified[file_type] = {}
if not self.minified[file_type].get(position):
self.minified[file_type][position] = []
minified_url = 'minified/%s?%s' % (out_name, tryInt(os.path.getmtime(out)))
new_paths.append((out_path, {'url': minified_url}))
minified_url = 'minified/%s?%s' % (out_name, tryInt(os.path.getmtime(out)))
self.minified[file_type][position].append(minified_url)
self.paths[file_type][position] = new_paths
def getStyles(self, *args, **kwargs):
return self.get('style', *args, **kwargs)
@@ -150,22 +139,12 @@ class ClientScript(Plugin):
def getScripts(self, *args, **kwargs):
return self.get('script', *args, **kwargs)
def get(self, type, as_html = False, location = 'head'):
def get(self, type, location = 'head'):
if type in self.paths and location in self.paths[type]:
paths = self.paths[type][location]
return [x[1] for x in paths]
data = '' if as_html else []
try:
try:
if not Env.get('dev'):
return self.minified[type][location]
except:
pass
return self.urls[type][location]
except:
log.error('Error getting minified %s, %s: %s', (type, location, traceback.format_exc()))
return data
return []
def registerStyle(self, api_path, file_path, position = 'head'):
self.register(api_path, file_path, 'style', position)
@@ -177,36 +156,10 @@ class ClientScript(Plugin):
api_path = '%s?%s' % (api_path, tryInt(os.path.getmtime(file_path)))
if not self.urls[type].get(location):
self.urls[type][location] = []
self.urls[type][location].append(api_path)
if not self.original_paths[type].get(location):
self.original_paths[type][location] = []
self.original_paths[type][location].append((file_path, api_path))
if not self.paths[type].get(location):
self.paths[type][location] = []
self.paths[type][location].append(file_path)
prefix_properties = ['border-radius', 'transform', 'transition', 'box-shadow']
prefix_tags = ['ms', 'moz', 'webkit']
def prefix(self, data):
trimmed_data = re.sub('(\t|\n|\r)+', '', data)
new_data = ''
colon_split = trimmed_data.split(';')
for splt in colon_split:
curl_split = splt.strip().split('{')
for curly in curl_split:
curly = curly.strip()
for prop in self.prefix_properties:
if curly[:len(prop) + 1] == prop + ':':
for tag in self.prefix_tags:
new_data += ' -%s-%s; ' % (tag, curly)
new_data += curly + (' { ' if len(curl_split) > 1 else ' ')
new_data += '; '
new_data = new_data.replace('{ ;', '; ').replace('} ;', '} ')
return new_data
self.paths[type][location].append((file_path, api_path))

View File

@@ -25,6 +25,7 @@ class DownloaderBase(Provider):
status_support = True
torrent_sources = [
'https://zoink.it/torrent/%s.torrent',
'http://torrage.com/torrent/%s.torrent',
'https://torcache.net/torrent/%s.torrent',
]
@@ -72,6 +73,9 @@ class DownloaderBase(Provider):
return
return self.download(data = data, media = media, filedata = filedata)
def download(self, *args, **kwargs):
return False
def _getAllDownloadStatus(self, download_ids):
if self.isDisabled(manual = True, data = {}):
return

View File

@@ -16,8 +16,8 @@ var DownloadersBase = new Class({
var setting_page = App.getPage('Settings');
setting_page.addEvent('create', function(){
Object.each(setting_page.tabs.downloaders.groups, self.addTestButton.bind(self))
})
Object.each(setting_page.tabs.downloaders.groups, self.addTestButton.bind(self));
});
},
@@ -40,22 +40,23 @@ var DownloadersBase = new Class({
button.set('text', button_name);
var message;
if(json.success){
var message = new Element('span.success', {
message = new Element('span.success', {
'text': 'Connection successful'
}).inject(button, 'after')
}).inject(button, 'after');
}
else {
var msg_text = 'Connection failed. Check logs for details.';
if(json.hasOwnProperty('msg')) msg_text = json.msg;
var message = new Element('span.failed', {
message = new Element('span.failed', {
'text': msg_text
}).inject(button, 'after')
}).inject(button, 'after');
}
(function(){
message.destroy();
}).delay(3000)
}).delay(3000);
}
});
}

View File

@@ -33,9 +33,9 @@ class Scheduler(Plugin):
except:
pass
def doShutdown(self):
def doShutdown(self, *args, **kwargs):
self.stop()
return super(Scheduler, self).doShutdown()
return super(Scheduler, self).doShutdown(*args, **kwargs)
def stop(self):
if self.started:

View File

@@ -10,13 +10,13 @@ from threading import RLock
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import removePyc
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from dateutil.parser import parse
from git.repository import LocalRepository
from scandir import scandir
import version
from six.moves import filter
@@ -142,9 +142,11 @@ class Updater(Plugin):
'success': success
}
def doShutdown(self):
self.updater.deletePyc(show_logs = False)
return super(Updater, self).doShutdown()
def doShutdown(self, *args, **kwargs):
if not Env.get('dev') and not Env.get('desktop'):
removePyc(Env.get('app_dir'), show_logs = False)
return super(Updater, self).doShutdown(*args, **kwargs)
class BaseUpdater(Plugin):
@@ -180,30 +182,6 @@ class BaseUpdater(Plugin):
def check(self):
pass
def deletePyc(self, only_excess = True, show_logs = True):
for root, dirs, files in scandir.walk(ss(Env.get('app_dir'))):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
class GitUpdater(BaseUpdater):
@@ -227,19 +205,28 @@ class GitUpdater(BaseUpdater):
def getVersion(self):
if not self.version:
hash = None
date = None
branch = self.branch
try:
output = self.repo.getHead() # Yes, please
log.debug('Git version output: %s', output.hash)
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.repo.getCurrentBranch().name or self.branch, output.hash[:8], datetime.fromtimestamp(output.getDate())),
'hash': output.hash[:8],
'date': output.getDate(),
'type': 'git',
'branch': self.repo.getCurrentBranch().name
}
hash = output.hash[:8]
date = output.getDate()
branch = self.repo.getCurrentBranch().name
except Exception as e:
log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
return 'No GIT'
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'),
'hash': hash,
'date': date,
'type': 'git',
'branch': branch
}
return self.version
@@ -322,17 +309,18 @@ class SourceUpdater(BaseUpdater):
return False
def replaceWith(self, path):
app_dir = ss(Env.get('app_dir'))
data_dir = ss(Env.get('data_dir'))
path = sp(path)
app_dir = Env.get('app_dir')
data_dir = Env.get('data_dir')
# Get list of files we want to overwrite
self.deletePyc()
removePyc(app_dir)
existing_files = []
for root, subfiles, filenames in scandir.walk(app_dir):
for root, subfiles, filenames in os.walk(app_dir):
for filename in filenames:
existing_files.append(os.path.join(root, filename))
for root, subfiles, filenames in scandir.walk(path):
for root, subfiles, filenames in os.walk(path):
for filename in filenames:
fromfile = os.path.join(root, filename)
tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))

View File

@@ -27,7 +27,7 @@ var UpdaterBase = new Class({
App.trigger('message', ['No updates available']);
}
}
})
});
},
@@ -50,8 +50,8 @@ var UpdaterBase = new Class({
self.message.destroy();
}
}
})
}, (timeout || 0))
});
}, (timeout || 0));
},
@@ -84,7 +84,7 @@ var UpdaterBase = new Class({
'click': self.doUpdate.bind(self)
}
})
).inject(document.body)
).inject(document.body);
},
doUpdate: function(){
@@ -96,7 +96,7 @@ var UpdaterBase = new Class({
if(json.success)
self.updating();
else
App.unBlockPage()
App.unBlockPage();
}
});
},

View File

@@ -2,12 +2,15 @@ import json
import os
import time
import traceback
from sqlite3 import OperationalError
from CodernityDB.database import RecordNotFound
from CodernityDB.index import IndexException, IndexNotFoundException, IndexConflict
from couchpotato import CPLog
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getImdb, tryInt
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import getImdb, tryInt, randomString
log = CPLog(__name__)
@@ -15,19 +18,25 @@ log = CPLog(__name__)
class Database(object):
indexes = []
indexes = None
db = None
def __init__(self):
self.indexes = {}
addApiView('database.list_documents', self.listDocuments)
addApiView('database.reindex', self.reindex)
addApiView('database.compact', self.compact)
addApiView('database.document.update', self.updateDocument)
addApiView('database.document.delete', self.deleteDocument)
addEvent('database.setup.after', self.startup_compact)
addEvent('database.setup_index', self.setupIndex)
addEvent('database.delete_corrupted', self.deleteCorrupted)
addEvent('app.migrate', self.migrate)
addEvent('app.after_shutdown', self.close)
def getDB(self):
@@ -37,28 +46,50 @@ class Database(object):
return self.db
def close(self, **kwargs):
self.getDB().close()
def setupIndex(self, index_name, klass):
self.indexes.append(index_name)
self.indexes[index_name] = klass
db = self.getDB()
# Category index
index_instance = klass(db.path, index_name)
try:
db.add_index(index_instance)
db.reindex_index(index_name)
except:
previous = db.indexes_names[index_name]
previous_version = previous._version
current_version = klass._version
# Only edit index if versions are different
if previous_version < current_version:
log.debug('Index "%s" already exists, updating and reindexing', index_name)
db.destroy_index(previous)
# Make sure store and bucket don't exist
exists = []
for x in ['buck', 'stor']:
full_path = os.path.join(db.path, '%s_%s' % (index_name, x))
if os.path.exists(full_path):
exists.append(full_path)
if index_name not in db.indexes_names:
# Remove existing buckets if index isn't there
for x in exists:
os.unlink(x)
# Add index (will restore buckets)
db.add_index(index_instance)
db.reindex_index(index_name)
else:
# Previous info
previous = db.indexes_names[index_name]
previous_version = previous._version
current_version = klass._version
# Only edit index if versions are different
if previous_version < current_version:
log.debug('Index "%s" already exists, updating and reindexing', index_name)
db.destroy_index(previous)
db.add_index(index_instance)
db.reindex_index(index_name)
except:
log.error('Failed adding index %s: %s', (index_name, traceback.format_exc()))
def deleteDocument(self, **kwargs):
@@ -118,6 +149,17 @@ class Database(object):
return results
def deleteCorrupted(self, _id, traceback_error = ''):
db = self.getDB()
try:
log.debug('Deleted corrupted document "%s": %s', (_id, traceback_error))
corrupted = db.get('id', _id, with_storage = False)
db._delete_id_index(corrupted.get('_id'), corrupted.get('_rev'), None)
except:
log.debug('Failed deleting corrupted: %s', traceback.format_exc())
def reindex(self, **kwargs):
success = True
@@ -132,20 +174,108 @@ class Database(object):
'success': success
}
def compact(self, **kwargs):
def compact(self, try_repair = True, **kwargs):
success = False
db = self.getDB()
# Removing left over compact files
db_path = sp(db.path)
for f in os.listdir(sp(db.path)):
for x in ['_compact_buck', '_compact_stor']:
if f[-len(x):] == x:
os.unlink(os.path.join(db_path, f))
success = True
try:
db = self.getDB()
start = time.time()
size = float(db.get_db_details().get('size', 0))
log.debug('Compacting database, current size: %sMB', round(size/1048576, 2))
db.compact()
new_size = float(db.get_db_details().get('size', 0))
log.debug('Done compacting database in %ss, new size: %sMB, saved: %sMB', (round(time.time()-start, 2), round(new_size/1048576, 2), round((size-new_size)/1048576, 2)))
success = True
except (IndexException, AttributeError):
if try_repair:
log.error('Something wrong with indexes, trying repair')
# Remove all indexes
old_indexes = self.indexes.keys()
for index_name in old_indexes:
try:
db.destroy_index(index_name)
except IndexNotFoundException:
pass
except:
log.error('Failed removing old index %s', index_name)
# Add them again
for index_name in self.indexes:
klass = self.indexes[index_name]
# Category index
index_instance = klass(db.path, index_name)
try:
db.add_index(index_instance)
db.reindex_index(index_name)
except IndexConflict:
pass
except:
log.error('Failed adding index %s', index_name)
raise
self.compact(try_repair = False)
else:
log.error('Failed compact: %s', traceback.format_exc())
except:
log.error('Failed compact: %s', traceback.format_exc())
success = False
return {
'success': success
}
# Compact on start
def startup_compact(self):
from couchpotato import Env
db = self.getDB()
# Try fix for migration failures on desktop
if Env.get('desktop'):
try:
list(db.all('profile', with_doc = True))
except RecordNotFound:
failed_location = '%s_failed' % db.path
old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db.old')
if not os.path.isdir(failed_location) and os.path.isfile(old_db):
log.error('Corrupt database, trying migrate again')
db.close()
# Rename database folder
os.rename(db.path, '%s_failed' % db.path)
# Rename .old database to try another migrate
os.rename(old_db, old_db[:-4])
fireEventAsync('app.restart')
else:
log.error('Migration failed and couldn\'t recover database. Please report on GitHub, with this message.')
db.reindex()
return
# Check size and compact if needed
size = db.get_db_details().get('size')
prop_name = 'last_db_compact'
last_check = int(Env.prop(prop_name, default = 0))
if size > 26214400 and last_check < time.time()-604800: # 25MB / 7 days
self.compact()
Env.prop(prop_name, value = int(time.time()))
def migrate(self):
from couchpotato import Env
@@ -182,301 +312,328 @@ class Database(object):
}
migrate_data = {}
rename_old = False
c = conn.cursor()
try:
for ml in migrate_list:
migrate_data[ml] = {}
rows = migrate_list[ml]
c = conn.cursor()
try:
c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml))
except:
# ignore faulty destination_id database
if ml == 'category':
migrate_data[ml] = {}
for ml in migrate_list:
migrate_data[ml] = {}
rows = migrate_list[ml]
try:
c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml))
except:
# ignore faulty destination_id database
if ml == 'category':
migrate_data[ml] = {}
else:
rename_old = True
raise
for p in c.fetchall():
columns = {}
for row in migrate_list[ml]:
columns[row] = p[rows.index(row)]
if not migrate_data[ml].get(p[0]):
migrate_data[ml][p[0]] = columns
else:
if not isinstance(migrate_data[ml][p[0]], list):
migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
migrate_data[ml][p[0]].append(columns)
conn.close()
log.info('Getting data took %s', time.time() - migrate_start)
db = self.getDB()
if not db.opened:
return
# Use properties
properties = migrate_data['properties']
log.info('Importing %s properties', len(properties))
for x in properties:
property = properties[x]
Env.prop(property.get('identifier'), property.get('value'))
# Categories
categories = migrate_data.get('category', [])
log.info('Importing %s categories', len(categories))
category_link = {}
for x in categories:
c = categories[x]
new_c = db.insert({
'_t': 'category',
'order': c.get('order', 999),
'label': toUnicode(c.get('label', '')),
'ignored': toUnicode(c.get('ignored', '')),
'preferred': toUnicode(c.get('preferred', '')),
'required': toUnicode(c.get('required', '')),
'destination': toUnicode(c.get('destination', '')),
})
category_link[x] = new_c.get('_id')
# Profiles
log.info('Importing profiles')
new_profiles = db.all('profile', with_doc = True)
new_profiles_by_label = {}
for x in new_profiles:
# Remove default non core profiles
if not x['doc'].get('core'):
db.delete(x['doc'])
else:
raise
new_profiles_by_label[x['doc']['label']] = x['_id']
for p in c.fetchall():
columns = {}
for row in migrate_list[ml]:
columns[row] = p[rows.index(row)]
profiles = migrate_data['profile']
profile_link = {}
for x in profiles:
p = profiles[x]
if not migrate_data[ml].get(p[0]):
migrate_data[ml][p[0]] = columns
exists = new_profiles_by_label.get(p.get('label'))
# Update existing with order only
if exists and p.get('core'):
profile = db.get('id', exists)
profile['order'] = tryInt(p.get('order'))
profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
db.update(profile)
profile_link[x] = profile.get('_id')
else:
if not isinstance(migrate_data[ml][p[0]], list):
migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
migrate_data[ml][p[0]].append(columns)
conn.close()
log.info('Getting data took %s', time.time() - migrate_start)
db = self.getDB()
# Use properties
properties = migrate_data['properties']
log.info('Importing %s properties', len(properties))
for x in properties:
property = properties[x]
Env.prop(property.get('identifier'), property.get('value'))
# Categories
categories = migrate_data.get('category', [])
log.info('Importing %s categories', len(categories))
category_link = {}
for x in categories:
c = categories[x]
new_c = db.insert({
'_t': 'category',
'order': c.get('order', 999),
'label': toUnicode(c.get('label', '')),
'ignored': toUnicode(c.get('ignored', '')),
'preferred': toUnicode(c.get('preferred', '')),
'required': toUnicode(c.get('required', '')),
'destination': toUnicode(c.get('destination', '')),
})
category_link[x] = new_c.get('_id')
# Profiles
log.info('Importing profiles')
new_profiles = db.all('profile', with_doc = True)
new_profiles_by_label = {}
for x in new_profiles:
# Remove default non core profiles
if not x['doc'].get('core'):
db.delete(x['doc'])
else:
new_profiles_by_label[x['doc']['label']] = x['_id']
profiles = migrate_data['profile']
profile_link = {}
for x in profiles:
p = profiles[x]
exists = new_profiles_by_label.get(p.get('label'))
# Update existing with order only
if exists and p.get('core'):
profile = db.get('id', exists)
profile['order'] = tryInt(p.get('order'))
profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
db.update(profile)
profile_link[x] = profile.get('_id')
else:
new_profile = {
'_t': 'profile',
'label': p.get('label'),
'order': int(p.get('order', 999)),
'core': p.get('core', False),
'qualities': [],
'wait_for': [],
'finish': []
}
types = migrate_data['profiletype']
for profile_type in types:
p_type = types[profile_type]
if types[profile_type]['profile_id'] == p['id']:
new_profile['finish'].append(p_type['finish'])
new_profile['wait_for'].append(p_type['wait_for'])
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
new_profile.update(db.insert(new_profile))
profile_link[x] = new_profile.get('_id')
# Qualities
log.info('Importing quality sizes')
new_qualities = db.all('quality', with_doc = True)
new_qualities_by_identifier = {}
for x in new_qualities:
new_qualities_by_identifier[x['doc']['identifier']] = x['_id']
qualities = migrate_data['quality']
quality_link = {}
for x in qualities:
q = qualities[x]
q_id = new_qualities_by_identifier[q.get('identifier')]
quality = db.get('id', q_id)
quality['order'] = q.get('order')
quality['size_min'] = tryInt(q.get('size_min'))
quality['size_max'] = tryInt(q.get('size_max'))
db.update(quality)
quality_link[x] = quality
# Titles
titles = migrate_data['librarytitle']
titles_by_library = {}
for x in titles:
title = titles[x]
if title.get('default'):
titles_by_library[title.get('libraries_id')] = title.get('title')
# Releases
releaseinfos = migrate_data['releaseinfo']
for x in releaseinfos:
info = releaseinfos[x]
# Skip if release doesn't exist for this info
if not migrate_data['release'].get(info.get('release_id')):
continue
if not migrate_data['release'][info.get('release_id')].get('info'):
migrate_data['release'][info.get('release_id')]['info'] = {}
migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')
releases = migrate_data['release']
releases_by_media = {}
for x in releases:
release = releases[x]
if not releases_by_media.get(release.get('movie_id')):
releases_by_media[release.get('movie_id')] = []
releases_by_media[release.get('movie_id')].append(release)
# Type ids
types = migrate_data['filetype']
type_by_id = {}
for t in types:
type = types[t]
type_by_id[type.get('id')] = type
# Media
log.info('Importing %s media items', len(migrate_data['movie']))
statuses = migrate_data['status']
libraries = migrate_data['library']
library_files = migrate_data['library_files__file_library']
releases_files = migrate_data['release_files__file_release']
all_files = migrate_data['file']
poster_type = migrate_data['filetype']['poster']
medias = migrate_data['movie']
for x in medias:
m = medias[x]
status = statuses.get(m['status_id']).get('identifier')
l = libraries[m['library_id']]
# Only migrate wanted movies, Skip if no identifier present
if not getImdb(l.get('identifier')): continue
profile_id = profile_link.get(m['profile_id'])
category_id = category_link.get(m['category_id'])
title = titles_by_library.get(m['library_id'])
releases = releases_by_media.get(x, [])
info = json.loads(l.get('info', ''))
files = library_files.get(m['library_id'], [])
if not isinstance(files, list):
files = [files]
added_media = fireEvent('movie.add', {
'info': info,
'identifier': l.get('identifier'),
'profile_id': profile_id,
'category_id': category_id,
'title': title
}, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)
if not added_media:
log.error('Failed adding media %s: %s', (l.get('identifier'), info))
continue
added_media['files'] = added_media.get('files', {})
for f in files:
ffile = all_files[f.get('file_id')]
# Only migrate posters
if ffile.get('type_id') == poster_type.get('id'):
if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
added_media['files']['image_poster'] = [ffile.get('path')]
break
if 'image_poster' in added_media['files']:
db.update(added_media)
for rel in releases:
empty_info = False
if not rel.get('info'):
empty_info = True
rel['info'] = {}
quality = quality_link[rel.get('quality_id')]
release_status = statuses.get(rel.get('status_id')).get('identifier')
if rel['info'].get('download_id'):
status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True']
rel['info']['download_info'] = {
'id': rel['info'].get('download_id'),
'downloader': rel['info'].get('download_downloader'),
'status_support': status_support,
new_profile = {
'_t': 'profile',
'label': p.get('label'),
'order': int(p.get('order', 999)),
'core': p.get('core', False),
'qualities': [],
'wait_for': [],
'finish': []
}
# Add status to keys
rel['info']['status'] = release_status
if not empty_info:
fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
else:
release = {
'_t': 'release',
'identifier': rel.get('identifier'),
'media_id': added_media.get('_id'),
'quality': quality.get('identifier'),
'status': release_status,
'last_edit': int(time.time()),
'files': {}
}
types = migrate_data['profiletype']
for profile_type in types:
p_type = types[profile_type]
if types[profile_type]['profile_id'] == p['id']:
if p_type['quality_id']:
new_profile['finish'].append(p_type['finish'])
new_profile['wait_for'].append(p_type['wait_for'])
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
# Add downloader info if provided
try:
release['download_info'] = rel['info']['download_info']
del rel['download_info']
except:
pass
if len(new_profile['qualities']) > 0:
new_profile.update(db.insert(new_profile))
profile_link[x] = new_profile.get('_id')
else:
log.error('Corrupt profile list for "%s", using default.', p.get('label'))
# Add files
release_files = releases_files.get(rel.get('id'), [])
if not isinstance(release_files, list):
release_files = [release_files]
# Qualities
log.info('Importing quality sizes')
new_qualities = db.all('quality', with_doc = True)
new_qualities_by_identifier = {}
for x in new_qualities:
new_qualities_by_identifier[x['doc']['identifier']] = x['_id']
if len(release_files) == 0:
qualities = migrate_data['quality']
quality_link = {}
for x in qualities:
q = qualities[x]
q_id = new_qualities_by_identifier[q.get('identifier')]
quality = db.get('id', q_id)
quality['order'] = q.get('order')
quality['size_min'] = tryInt(q.get('size_min'))
quality['size_max'] = tryInt(q.get('size_max'))
db.update(quality)
quality_link[x] = quality
# Titles
titles = migrate_data['librarytitle']
titles_by_library = {}
for x in titles:
title = titles[x]
if title.get('default'):
titles_by_library[title.get('libraries_id')] = title.get('title')
# Releases
releaseinfos = migrate_data['releaseinfo']
for x in releaseinfos:
info = releaseinfos[x]
# Skip if release doesn't exist for this info
if not migrate_data['release'].get(info.get('release_id')):
continue
if not migrate_data['release'][info.get('release_id')].get('info'):
migrate_data['release'][info.get('release_id')]['info'] = {}
migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')
releases = migrate_data['release']
releases_by_media = {}
for x in releases:
release = releases[x]
if not releases_by_media.get(release.get('movie_id')):
releases_by_media[release.get('movie_id')] = []
releases_by_media[release.get('movie_id')].append(release)
# Type ids
types = migrate_data['filetype']
type_by_id = {}
for t in types:
type = types[t]
type_by_id[type.get('id')] = type
# Media
log.info('Importing %s media items', len(migrate_data['movie']))
statuses = migrate_data['status']
libraries = migrate_data['library']
library_files = migrate_data['library_files__file_library']
releases_files = migrate_data['release_files__file_release']
all_files = migrate_data['file']
poster_type = migrate_data['filetype']['poster']
medias = migrate_data['movie']
for x in medias:
m = medias[x]
status = statuses.get(m['status_id']).get('identifier')
l = libraries.get(m['library_id'])
# Only migrate wanted movies, Skip if no identifier present
if not l or not getImdb(l.get('identifier')): continue
profile_id = profile_link.get(m['profile_id'])
category_id = category_link.get(m['category_id'])
title = titles_by_library.get(m['library_id'])
releases = releases_by_media.get(x, [])
info = json.loads(l.get('info', ''))
files = library_files.get(m['library_id'], [])
if not isinstance(files, list):
files = [files]
added_media = fireEvent('movie.add', {
'info': info,
'identifier': l.get('identifier'),
'profile_id': profile_id,
'category_id': category_id,
'title': title
}, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)
if not added_media:
log.error('Failed adding media %s: %s', (l.get('identifier'), info))
continue
added_media['files'] = added_media.get('files', {})
for f in files:
ffile = all_files[f.get('file_id')]
# Only migrate posters
if ffile.get('type_id') == poster_type.get('id'):
if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
added_media['files']['image_poster'] = [ffile.get('path')]
break
if 'image_poster' in added_media['files']:
db.update(added_media)
for rel in releases:
empty_info = False
if not rel.get('info'):
empty_info = True
rel['info'] = {}
quality = quality_link.get(rel.get('quality_id'))
if not quality:
continue
for f in release_files:
rfile = all_files[f.get('file_id')]
file_type = type_by_id.get(rfile.get('type_id')).get('identifier')
release_status = statuses.get(rel.get('status_id')).get('identifier')
if not release['files'].get(file_type):
release['files'][file_type] = []
if rel['info'].get('download_id'):
status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True']
rel['info']['download_info'] = {
'id': rel['info'].get('download_id'),
'downloader': rel['info'].get('download_downloader'),
'status_support': status_support,
}
release['files'][file_type].append(rfile.get('path'))
# Add status to keys
rel['info']['status'] = release_status
if not empty_info:
fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
else:
release = {
'_t': 'release',
'identifier': rel.get('identifier'),
'media_id': added_media.get('_id'),
'quality': quality.get('identifier'),
'status': release_status,
'last_edit': int(time.time()),
'files': {}
}
try:
rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
rls.update(release)
db.update(rls)
except:
db.insert(release)
# Add downloader info if provided
try:
release['download_info'] = rel['info']['download_info']
del rel['download_info']
except:
pass
# Add files
release_files = releases_files.get(rel.get('id'), [])
if not isinstance(release_files, list):
release_files = [release_files]
if len(release_files) == 0:
continue
for f in release_files:
rfile = all_files.get(f.get('file_id'))
if not rfile:
continue
file_type = type_by_id.get(rfile.get('type_id')).get('identifier')
if not release['files'].get(file_type):
release['files'][file_type] = []
release['files'][file_type].append(rfile.get('path'))
try:
rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
rls.update(release)
db.update(rls)
except:
db.insert(release)
log.info('Total migration took %s', time.time() - migrate_start)
log.info('=' * 30)
rename_old = True
except OperationalError:
log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc())
rename_old = True
except:
log.error('Migration failed: %s', traceback.format_exc())
log.info('Total migration took %s', time.time() - migrate_start)
log.info('=' * 30)
# rename old database
log.info('Renaming old database to %s ', old_db + '.old')
os.rename(old_db, old_db + '.old')
if rename_old:
random = randomString()
log.info('Renaming old database to %s ', '%s.%s_old' % (old_db, random))
os.rename(old_db, '%s.%s_old' % (old_db, random))
if os.path.isfile(old_db + '-wal'):
os.rename(old_db + '-wal', old_db + '-wal.old')
if os.path.isfile(old_db + '-shm'):
os.rename(old_db + '-shm', old_db + '-shm.old')
if os.path.isfile(old_db + '-wal'):
os.rename(old_db + '-wal', '%s-wal.%s_old' % (old_db, random))
if os.path.isfile(old_db + '-shm'):
os.rename(old_db + '-shm', '%s-shm.%s_old' % (old_db, random))

View File

@@ -20,14 +20,31 @@ class Blackhole(DownloaderBase):
status_support = False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
# The folder needs to exist
if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('protocol'))
else:
try:
# Filedata can be empty, which probably means it a magnet link
if not filedata or len(filedata) < 50:
try:
if data.get('protocol') == 'torrent_magnet':
@@ -36,13 +53,16 @@ class Blackhole(DownloaderBase):
except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
# If it's still empty, don't know what to do!
if not filedata or len(filedata) < 50:
log.error('No nzb/torrent available: %s', data.get('url'))
return False
# Create filename with imdb id and other nice stuff
file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name)
# People want thinks nice and tidy, create a subdir
if self.conf('create_subdir'):
try:
new_path = os.path.splitext(full_path)[0]
@@ -53,6 +73,8 @@ class Blackhole(DownloaderBase):
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try:
# Make sure the file doesn't exist yet, no need in overwriting it
if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f:
@@ -74,6 +96,10 @@ class Blackhole(DownloaderBase):
return False
def test(self):
""" Test and see if the directory is writable
:return: boolean
"""
directory = self.conf('directory')
if directory and os.path.isdir(directory):
@@ -88,6 +114,10 @@ class Blackhole(DownloaderBase):
return False
def getEnabledProtocol(self):
""" What protocols is this downloaded used for
:return: list with protocols
"""
if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
@@ -96,6 +126,12 @@ class Blackhole(DownloaderBase):
return ['nzb']
def isEnabled(self, manual = False, data = None):
""" Check if protocol is used (and enabled)
:param manual: The user has clicked to download a link through the webUI
:param data: dict returned from provider
Contains the release information
:return: boolean
"""
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):

View File

@@ -25,8 +25,18 @@ class Deluge(DownloaderBase):
drpc = None
def connect(self, reconnect = False):
""" Connect to the delugeRPC, re-use connection when already available
:param reconnect: force reconnect
:return: DelugeRPC instance
"""
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
# Force host assignment
if len(host) == 1:
host.append(80)
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
@@ -37,6 +47,20 @@ class Deluge(DownloaderBase):
return self.drpc
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -91,11 +115,21 @@ class Deluge(DownloaderBase):
return self.downloadReturnId(remote_torrent)
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True) and self.drpc.test():
return True
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Deluge download status.')

View File

@@ -0,0 +1,427 @@
from base64 import b16encode, b32decode, b64encode
from distutils.version import LooseVersion
from hashlib import sha1
import httplib
import json
import os
import re
import urllib2
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from bencode import bencode as benc, bdecode
log = CPLog(__name__)
autoload = 'Hadouken'
class Hadouken(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
hadouken_api = None
def connect(self):
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.conf('api_key'):
log.error('Config properties are not filled in correctly, API key is missing.')
return False
self.hadouken_api = HadoukenAPI(host[0], port = host[1], api_key = self.conf('api_key'))
return True
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol')))
if not self.connect():
return False
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
torrent_filename = self.createFileName(data, filedata, media)
if data.get('protocol') == 'torrent_magnet':
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = self.torrent_trackers
torrent_params['name'] = torrent_filename
else:
info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to Hadouken
if data.get('protocol') == 'torrent_magnet':
self.hadouken_api.add_magnet_link(data.get('url'), torrent_params)
else:
self.hadouken_api.add_file(filedata, torrent_params)
return self.downloadReturnId(torrent_hash)
def test(self):
""" Tests the given host:port and API key """
if not self.connect():
return False
version = self.hadouken_api.get_version()
if not version:
log.error('Could not get Hadouken version.')
return False
# The minimum required version of Hadouken is 4.5.6.
if LooseVersion(version) >= LooseVersion('4.5.6'):
return True
log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version)
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Hadouken download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.hadouken_api.get_by_hash_list(ids)
if not queue:
return []
for torrent in queue:
if torrent is None:
continue
torrent_filelist = self.hadouken_api.get_files_by_hash(torrent['InfoHash'])
torrent_files = []
save_path = torrent['SavePath']
# The 'Path' key for each file_item contains
# the full path to the single file relative to the
# torrents save path.
# For a single file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "file1.iso"
# Resulting path: "C:\Downloads\file1.iso"
# For a multi file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "dirname/file1.iso"
# Resulting path: "C:\Downloads\dirname/file1.iso"
for file_item in torrent_filelist:
torrent_files.append(sp(os.path.join(save_path, file_item['Path'])))
release_downloads.append({
'id': torrent['InfoHash'].upper(),
'name': torrent['Name'],
'status': self.get_torrent_status(torrent),
'seed_ratio': self.get_seed_ratio(torrent),
'original_status': torrent['State'],
'timeleft': -1,
'folder': sp(save_path if len(torrent_files == 1) else os.path.join(save_path, torrent['Name'])),
'files': torrent_files
})
return release_downloads
def get_seed_ratio(self, torrent):
""" Returns the seed ratio for a given torrent.
Keyword arguments:
torrent -- The torrent to calculate seed ratio for.
"""
up = torrent['TotalUploadedBytes']
down = torrent['TotalDownloadedBytes']
if up > 0 and down > 0:
return up / down
return 0
def get_torrent_status(self, torrent):
""" Returns the CouchPotato status for a given torrent.
Keyword arguments:
torrent -- The torrent to translate status for.
"""
if torrent['IsSeeding'] and torrent['IsFinished'] and torrent['Paused']:
return 'completed'
if torrent['IsSeeding']:
return 'seeding'
return 'busy'
def pause(self, release_download, pause = True):
""" Pauses or resumes the torrent specified by the ID field
in release_download.
Keyword arguments:
release_download -- The CouchPotato release_download to pause/resume.
pause -- Boolean indicating whether to pause or resume.
"""
if not self.connect():
return False
return self.hadouken_api.pause(release_download['id'], pause)
def removeFailed(self, release_download):
""" Removes a failed torrent and also remove the data associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
"""
log.info('%s failed downloading, deleting...', release_download['name'])
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = True)
def processComplete(self, release_download, delete_files = False):
""" Removes the completed torrent from Hadouken and optionally removes the data
associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
delete_files: Boolean indicating whether to remove the associated data.
"""
log.debug('Requesting Hadouken to remove the torrent %s%s.',
(release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = delete_files)
class HadoukenAPI(object):
def __init__(self, host = 'localhost', port = 7890, api_key = None):
self.url = 'http://' + str(host) + ':' + str(port)
self.api_key = api_key
self.requestId = 0;
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent', 'couchpotato-hadouken-client/1.0'), ('Accept', 'application/json')]
if not api_key:
log.error('API key missing.')
def add_file(self, filedata, torrent_params):
""" Add a file to Hadouken with the specified parameters.
Keyword arguments:
filedata -- The binary torrent data.
torrent_params -- Additional parameters for the file.
"""
data = {
'method': 'torrents.addFile',
'params': [b64encode(filedata), torrent_params]
}
return self._request(data)
def add_magnet_link(self, magnetLink, torrent_params):
""" Add a magnet link to Hadouken with the specified parameters.
Keyword arguments:
magnetLink -- The magnet link to send.
torrent_params -- Additional parameters for the magnet link.
"""
data = {
'method': 'torrents.addUrl',
'params': [magnetLink, torrent_params]
}
return self._request(data)
def get_by_hash_list(self, infoHashList):
""" Gets a list of torrents filtered by the given info hash list.
Keyword arguments:
infoHashList -- A list of info hashes.
"""
data = {
'method': 'torrents.getByInfoHashList',
'params': [infoHashList]
}
return self._request(data)
def get_files_by_hash(self, infoHash):
""" Gets a list of files for the torrent identified by the
given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to return files for.
"""
data = {
'method': 'torrents.getFiles',
'params': [infoHash]
}
return self._request(data)
def get_version(self):
""" Gets the version, commitish and build date of Hadouken. """
data = {
'method': 'core.getVersion',
'params': None
}
result = self._request(data)
if not result:
return False
return result['Version']
def pause(self, infoHash, pause):
""" Pauses/unpauses the torrent identified by the given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to operate on.
pause -- If true, pauses the torrent. Otherwise resumes.
"""
data = {
'method': 'torrents.pause',
'params': [infoHash]
}
if not pause:
data['method'] = 'torrents.resume'
return self._request(data)
def remove(self, infoHash, remove_data = False):
""" Removes the torrent identified by the given info hash and
optionally removes the data as well.
Keyword arguments:
infoHash -- The info hash of the torrent to remove.
remove_data -- If true, removes the data associated with the torrent.
"""
data = {
'method': 'torrents.remove',
'params': [infoHash, remove_data]
}
return self._request(data)
def _request(self, data):
self.requestId += 1
data['jsonrpc'] = '2.0'
data['id'] = self.requestId
request = urllib2.Request(self.url + '/jsonrpc', data = json.dumps(data))
request.add_header('Authorization', 'Token ' + self.api_key)
request.add_header('Content-Type', 'application/json')
try:
f = self.opener.open(request)
response = f.read()
f.close()
obj = json.loads(response)
if not 'error' in obj.keys():
return obj['result']
log.error('JSONRPC error, %s: %s', obj['error']['code'], obj['error']['message'])
except httplib.InvalidURL as err:
log.error('Invalid Hadouken host, check your config %s', err)
except urllib2.HTTPError as err:
if err.code == 401:
log.error('Invalid Hadouken API key, check your config')
else:
log.error('Hadouken HTTPError: %s', err)
except urllib2.URLError as err:
log.error('Unable to connect to Hadouken %s', err)
return False
config = [{
'name': 'hadouken',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'hadouken',
'label': 'Hadouken',
'description': 'Use <a href="http://www.hdkn.net">Hadouken</a> (>= v4.5.6) to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent'
},
{
'name': 'host',
'default': 'localhost:7890'
},
{
'name': 'api_key',
'label': 'API key',
'type': 'password'
},
{
'name': 'label',
'description': 'Label to add torrent as.'
}
]
}
]
}]

View File

@@ -23,6 +23,20 @@ class NZBGet(DownloaderBase):
rpc = 'xmlrpc'
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -71,6 +85,10 @@ class NZBGet(DownloaderBase):
return False
def test(self):
""" Check if connection works
:return: bool
"""
rpc = self.getRPC()
try:
@@ -91,6 +109,13 @@ class NZBGet(DownloaderBase):
return True
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking NZBGet download status.')
@@ -163,12 +188,12 @@ class NZBGet(DownloaderBase):
nzb_id = nzb['NZBID']
if nzb_id in ids:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed',
'original_status': nzb['Status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})

View File

@@ -1,16 +1,10 @@
from base64 import b64encode
from urllib2 import URLError
import os
from uuid import uuid4
import hashlib
import httplib
import json
import os
import socket
import ssl
import sys
import time
import traceback
import urllib2
from requests import HTTPError
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, sp
@@ -30,23 +24,45 @@ class NZBVortex(DownloaderBase):
session_id = None
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, media)
self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
nzb_filename = self.createFileName(data, filedata, media, unique_tag = True)
response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = {
'name': nzb_filename,
'groupname': self.conf('group')
})
time.sleep(10)
raw_statuses = self.call('nzb')
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if os.path.basename(nzb['nzbFileName']) == nzb_filename][0]
return self.downloadReturnId(nzb_id)
if response and response.get('result', '').lower() == 'ok':
return self.downloadReturnId(nzb_filename)
log.error('Something went wrong sending the NZB file. Response: %s', response)
return False
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False
def test(self):
""" Check if connection works
:return: bool
"""
try:
login_result = self.login()
except:
@@ -55,12 +71,20 @@ class NZBVortex(DownloaderBase):
return login_result
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
raw_statuses = self.call('nzb')
release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []):
if nzb['id'] in ids:
nzb_id = os.path.basename(nzb['nzbFileName'])
if nzb_id in ids:
# Check status
status = 'busy'
@@ -70,7 +94,8 @@ class NZBVortex(DownloaderBase):
status = 'failed'
release_downloads.append({
'id': nzb['id'],
'temp_id': nzb['id'],
'id': nzb_id,
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
@@ -85,7 +110,7 @@ class NZBVortex(DownloaderBase):
log.info('%s failed downloading, deleting...', release_download['name'])
try:
self.call('nzb/%s/cancel' % release_download['id'])
self.call('nzb/%s/cancel' % release_download['temp_id'])
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
@@ -114,7 +139,7 @@ class NZBVortex(DownloaderBase):
log.error('Login failed, please check you api-key')
return False
def call(self, call, parameters = None, repeat = False, auth = True, *args, **kwargs):
def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs):
# Login first
if not parameters: parameters = {}
@@ -127,19 +152,20 @@ class NZBVortex(DownloaderBase):
params = tryUrlencode(parameters)
url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api/' + call
url = cleanHost(self.conf('host')) + 'api/' + call
try:
data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs)
if data:
return json.loads(data)
except URLError as e:
if hasattr(e, 'code') and e.code == 403:
return data
except HTTPError as e:
sc = e.response.status_code
if sc == 403:
# Try login and do again
if not repeat:
if not is_repeat:
self.login()
return self.call(call, parameters = parameters, repeat = True, **kwargs)
return self.call(call, parameters = parameters, is_repeat = True, **kwargs)
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
except:
@@ -151,13 +177,12 @@ class NZBVortex(DownloaderBase):
if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
try:
data = self.urlopen(url, show_error = False)
self.api_level = float(json.loads(data).get('apilevel'))
except URLError as e:
if hasattr(e, 'code') and e.code == 403:
data = self.call('app/apilevel', auth = False)
self.api_level = float(data.get('apilevel'))
except HTTPError as e:
sc = e.response.status_code
if sc == 403:
log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
else:
log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
@@ -169,29 +194,6 @@ class NZBVortex(DownloaderBase):
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
class HTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if sys.version_info < (2, 6, 7):
if hasattr(self, '_tunnel_host'):
self.sock = sock
self._tunnel()
else:
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1)
class HTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnection, req)
config = [{
'name': 'nzbvortex',
'groups': [
@@ -211,20 +213,18 @@ config = [{
},
{
'name': 'host',
'default': 'localhost:4321',
'description': 'Hostname with port. Usually <strong>localhost:4321</strong>',
},
{
'name': 'ssl',
'default': 1,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
'default': 'https://localhost:4321',
'description': 'Hostname with port. Usually <strong>https://localhost:4321</strong>',
},
{
'name': 'api_key',
'label': 'Api Key',
},
{
'name': 'group',
'label': 'Group',
'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.',
},
{
'name': 'manual',
'default': False,

View File

@@ -19,6 +19,20 @@ class Pneumatic(DownloaderBase):
status_support = False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -63,6 +77,10 @@ class Pneumatic(DownloaderBase):
return False
def test(self):
""" Check if connection works
:return: bool
"""
directory = self.conf('directory')
if directory and os.path.isdir(directory):

View File

@@ -0,0 +1,68 @@
from .main import PutIO
def autoload():
return PutIO()
config = [{
'name': 'putio',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'putio',
'label': 'put.io',
'description': 'This will start a torrent download on <a href="http://put.io">Put.io</a>.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'oauth_token',
'label': 'oauth_token',
'description': 'This is the OAUTH_TOKEN from your putio API',
'advanced': True,
},
{
'name': 'folder',
'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'),
'default': 0,
},
{
'name': 'callback_host',
'description': 'External reachable url to CP so put.io can do it\'s thing',
},
{
'name': 'download',
'description': 'Set this to have CouchPotato download the file from Put.io',
'type': 'bool',
'default': 0,
},
{
'name': 'delete_file',
'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'),
'type': 'bool',
'default': 0,
},
{
'name': 'download_dir',
'type': 'directory',
'label': 'Download Directory',
'description': 'The Directory to download files to, does nothing if you don\'t select download',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]

View File

@@ -0,0 +1,181 @@
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEventAsync
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from pio import api as pio
import datetime
log = CPLog(__name__)
autoload = 'Putiodownload'
class PutIO(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
downloading_list = []
oauth_authenticate = 'https://api.couchpota.to/authorize/putio/'
def __init__(self):
addApiView('downloader.putio.getfrom', self.getFromPutio, docs = {
'desc': 'Allows you to download file from prom Put.io',
})
addApiView('downloader.putio.auth_url', self.getAuthorizationUrl)
addApiView('downloader.putio.credentials', self.getCredentials)
addEvent('putio.download', self.putioDownloader)
return super(PutIO, self).__init__()
# This is a recusive function to check for the folders
def recursionFolder(self, client, folder = 0, tfolder = ''):
files = client.File.list(folder)
for f in files:
if f.content_type == 'application/x-directory':
if f.name == tfolder:
return f.id
else:
result = self.recursionFolder(client, f.id, tfolder)
if result != 0:
return result
return 0
# This will check the root for the folder, and kick of recusively checking sub folder
def convertFolder(self, client, folder):
if folder == 0:
return 0
else:
return self.recursionFolder(client, 0, folder)
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to put.io', data.get('name'))
url = data.get('url')
client = pio.Client(self.conf('oauth_token'))
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('putioFolder ID is %s', putioFolder)
# It might be possible to call getFromPutio from the renamer if we can then we don't need to do this.
# Note callback_host is NOT our address, it's the internet host that putio can call too
callbackurl = None
if self.conf('download'):
callbackurl = 'http://' + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder)
log.debug('resp is %s', resp.id);
return self.downloadReturnId(resp.id)
def test(self):
try:
client = pio.Client(self.conf('oauth_token'))
if client.File.list():
return True
except:
log.info('Failed to get file listing, check OAUTH_TOKEN')
return False
def getAuthorizationUrl(self, host = None, **kwargs):
callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/'))
log.debug('callback_url is %s', callback_url)
target_url = self.oauth_authenticate + "?target=" + callback_url
log.debug('target_url is %s', target_url)
return {
'success': True,
'url': target_url,
}
def getCredentials(self, **kwargs):
try:
oauth_token = kwargs.get('oauth')
except:
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
log.debug('oauth_token is: %s', oauth_token)
self.conf('oauth_token', value = oauth_token);
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
def getAllDownloadStatus(self, ids):
log.debug('Checking putio download status.')
client = pio.Client(self.conf('oauth_token'))
transfers = client.Transfer.list()
log.debug(transfers);
release_downloads = ReleaseDownloadList(self)
for t in transfers:
if t.id in ids:
log.debug('downloading list is %s', self.downloading_list)
if t.status == "COMPLETED" and self.conf('download') == False :
status = 'completed'
# So check if we are trying to download something
elif t.status == "COMPLETED" and self.conf('download') == True:
# Assume we are done
status = 'completed'
if not self.downloading_list:
now = datetime.datetime.utcnow()
date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S")
# We need to make sure a race condition didn't happen
if (now - date_time) < datetime.timedelta(minutes=5):
# 5 minutes haven't passed so we wait
status = 'busy'
else:
# If we have the file_id in the downloading_list mark it as busy
if str(t.file_id) in self.downloading_list:
status = 'busy'
else:
status = 'busy'
release_downloads.append({
'id' : t.id,
'name': t.name,
'status': status,
'timeleft': t.estimated_time,
})
return release_downloads
def putioDownloader(self, fid):
log.info('Put.io Real downloader called with file_id: %s',fid)
client = pio.Client(self.conf('oauth_token'))
log.debug('About to get file List')
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('PutioFolderID is %s', putioFolder)
files = client.File.list(parent_id=putioFolder)
downloaddir = self.conf('download_dir')
for f in files:
if str(f.id) == str(fid):
client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file'))
# Once the download is complete we need to remove it from the running list.
self.downloading_list.remove(fid)
return True
def getFromPutio(self, **kwargs):
try:
file_id = str(kwargs.get('file_id'))
except:
return {
'success' : False,
}
log.info('Put.io Download has been called file_id is %s', file_id)
if file_id not in self.downloading_list:
self.downloading_list.append(file_id)
fireEventAsync('putio.download',fid = file_id)
return {
'success': True,
}
return {
'success': False,
}

View File

@@ -0,0 +1,68 @@
var PutIODownloader = new Class({
initialize: function(){
var self = this;
App.addEvent('loadSettings', self.addRegisterButton.bind(self));
},
addRegisterButton: function(){
var self = this;
var setting_page = App.getPage('Settings');
setting_page.addEvent('create', function(){
var fieldset = setting_page.tabs.downloaders.groups.putio,
l = window.location;
var putio_set = 0;
fieldset.getElements('input[type=text]').each(function(el){
putio_set += +(el.get('value') !== '');
});
new Element('.ctrlHolder').adopt(
// Unregister button
(putio_set > 0) ?
[
self.unregister = new Element('a.button.red', {
'text': 'Unregister "'+fieldset.getElement('input[name*=oauth_token]').get('value')+'"',
'events': {
'click': function(){
fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change');
self.unregister.destroy();
self.unregister_or.destroy();
}
}
}),
self.unregister_or = new Element('span[text=or]')
]
: null,
// Register button
new Element('a.button', {
'text': putio_set > 0 ? 'Register a different account' : 'Register your put.io account',
'events': {
'click': function(){
Api.request('downloader.putio.auth_url', {
'data': {
'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '')
},
'onComplete': function(json){
window.location = json.url;
}
});
}
}
})
).inject(fieldset.getElement('.test_button'), 'before');
});
}
});
window.addEvent('domready', function(){
new PutIODownloader();
});

View File

@@ -41,12 +41,30 @@ class qBittorrent(DownloaderBase):
return self.qb
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect():
return True
return False
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -95,6 +113,14 @@ class qBittorrent(DownloaderBase):
return 'busy'
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking qBittorrent download status.')
if not self.connect():

View File

@@ -5,14 +5,12 @@ from urlparse import urlparse
import os
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.logger import CPLog
from bencode import bencode, bdecode
from rtorrent import RTorrent
from scandir import scandir
log = CPLog(__name__)
@@ -86,6 +84,10 @@ class rTorrent(DownloaderBase):
return self.rt
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True):
return True
@@ -96,6 +98,20 @@ class rTorrent(DownloaderBase):
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -154,21 +170,23 @@ class rTorrent(DownloaderBase):
return False
def getTorrentStatus(self, torrent):
if torrent.hashing or torrent.hash_checking or torrent.message:
return 'busy'
if not torrent.complete:
return 'busy'
if not torrent.open:
return 'completed'
if torrent.state and torrent.active:
if torrent.open:
return 'seeding'
return 'busy'
return 'completed'
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking rTorrent download status.')
if not self.connect():
@@ -244,7 +262,7 @@ class rTorrent(DownloaderBase):
if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
# Remove empty directories bottom up
try:
for path, _, _ in scandir.walk(torrent.directory, topdown = False):
for path, _, _ in os.walk(sp(torrent.directory), topdown = False):
os.rmdir(path)
except OSError:
log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)

View File

@@ -21,6 +21,21 @@ class Sabnzbd(DownloaderBase):
protocol = ['nzb']
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -69,6 +84,11 @@ class Sabnzbd(DownloaderBase):
return False
def test(self):
""" Check if connection works
Return message if an old version of SAB is used
:return: bool
"""
try:
sab_data = self.call({
'mode': 'version',
@@ -89,6 +109,13 @@ class Sabnzbd(DownloaderBase):
return True
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking SABnzbd download status.')

View File

@@ -19,6 +19,21 @@ class Synology(DownloaderBase):
status_support = False
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -50,6 +65,10 @@ class Synology(DownloaderBase):
return self.downloadReturnId('') if response else False
def test(self):
""" Check if connection works
:return: bool
"""
host = cleanHost(self.conf('host'), protocol = False).split(':')
try:
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
@@ -90,6 +109,7 @@ class SynologyRPC(object):
self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
self.sid = None
self.username = username
self.password = password
self.destination = destination
@@ -117,7 +137,7 @@ class SynologyRPC(object):
def _req(self, url, args, files = None):
response = {'success': False}
try:
req = requests.post(url, data = args, files = files)
req = requests.post(url, data = args, files = files, verify = False)
req.raise_for_status()
response = json.loads(req.text)
if response['success']:

View File

@@ -23,19 +23,32 @@ class Transmission(DownloaderBase):
log = CPLog(__name__)
trpc = None
def connect(self, reconnect = False):
def connect(self):
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1)
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.trpc or reconnect:
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
return self.trpc
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -80,19 +93,32 @@ class Transmission(DownloaderBase):
log.error('Failed sending torrent to Transmission')
return False
data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate')
# Change settings of added torrents
if torrent_params:
self.trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
self.trpc.set_torrent(data['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
return self.downloadReturnId(data['hashString'])
def test(self):
if self.connect(True) and self.trpc.get_session():
""" Check if connection works
:return: bool
"""
if self.connect() and self.trpc.get_session():
return True
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Transmission download status.')
@@ -121,6 +147,8 @@ class Transmission(DownloaderBase):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] == 16 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'
@@ -164,18 +192,18 @@ class Transmission(DownloaderBase):
class TransmissionRPC(object):
"""TransmissionRPC lite library"""
def __init__(self, host = 'localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
def __init__(self, host = 'http://localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
super(TransmissionRPC, self).__init__()
self.url = 'http://' + host + ':' + str(port) + '/' + rpc_url + '/rpc'
self.url = host + ':' + str(port) + '/' + rpc_url + '/rpc'
self.tag = 0
self.session_id = 0
self.session = {}
if username and password:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
password_manager.add_password(realm = 'Transmission', uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager))
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
urllib2.install_opener(opener)
elif username or password:
@@ -276,8 +304,8 @@ config = [{
},
{
'name': 'host',
'default': 'localhost:9091',
'description': 'Hostname with port. Usually <strong>localhost:9091</strong>',
'default': 'http://localhost:9091',
'description': 'Hostname with port. Usually <strong>http://localhost:9091</strong>',
},
{
'name': 'rpc_url',

View File

@@ -51,6 +51,21 @@ class uTorrent(DownloaderBase):
return self.utorrent_api
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
@@ -120,6 +135,10 @@ class uTorrent(DownloaderBase):
return self.downloadReturnId(torrent_hash)
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect():
build_version = self.utorrent_api.get_build()
if not build_version:
@@ -131,6 +150,13 @@ class uTorrent(DownloaderBase):
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking uTorrent download status.')
@@ -168,7 +194,7 @@ class uTorrent(DownloaderBase):
status = 'busy'
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & self.status_flags['ERROR']):
elif torrent[1] & self.status_flags['ERROR']:
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
@@ -229,7 +255,6 @@ class uTorrentAPI(object):
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager))
self.opener.add_handler(urllib2.HTTPDigestAuthHandler(password_manager))
elif username or password:
log.debug('User or password missing, not using authentication.')
self.token = self.get_token()

View File

@@ -90,7 +90,7 @@ def fireEvent(name, *args, **kwargs):
else:
e = Event(name = name, threads = 10, exc_info = True, traceback = True, lock = threading.RLock())
e = Event(name = name, threads = 10, exc_info = True, traceback = True)
for event in events[name]:
e.handle(event['handler'], priority = event['priority'])

View File

@@ -5,6 +5,7 @@ import re
import traceback
import unicodedata
from chardet import detect
from couchpotato.core.logger import CPLog
import six
@@ -35,13 +36,19 @@ def toUnicode(original, *args):
return six.text_type(original, *args)
except:
try:
detected = detect(original)
try:
if detected.get('confidence') > 0.8:
return original.decode(detected.get('encoding'))
except:
pass
return ek(original, *args)
except:
raise
except:
log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc()))
ascii_text = str(original).encode('string_escape')
return toUnicode(ascii_text)
return 'ERROR DECODING STRING'
def ss(original, *args):
@@ -52,7 +59,10 @@ def ss(original, *args):
return u_original.encode(Env.get('encoding'))
except Exception as e:
log.debug('Failed ss encoding char, force UTF8: %s', e)
return u_original.encode('UTF-8')
try:
return u_original.encode(Env.get('encoding'), 'replace')
except:
return u_original.encode('utf-8', 'replace')
def sp(path, *args):
@@ -78,14 +88,14 @@ def sp(path, *args):
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
path = re.sub('^//', '/', path)
return toUnicode(path)
return path
def ek(original, *args):
if isinstance(original, (str, unicode)):
try:
from couchpotato.environment import Env
return original.decode(Env.get('encoding'))
return original.decode(Env.get('encoding'), 'ignore')
except UnicodeDecodeError:
raise

113
couchpotato/core/helpers/variable.py Normal file → Executable file
View File

@@ -1,4 +1,5 @@
import collections
import ctypes
import hashlib
import os
import platform
@@ -6,8 +7,9 @@ import random
import re
import string
import sys
import traceback
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
from couchpotato.core.logger import CPLog
import six
from six.moves import map, zip, filter
@@ -39,11 +41,11 @@ def symlink(src, dst):
def getUserDir():
try:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.geteuid()).pw_dir
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
except:
pass
return os.path.expanduser('~')
return sp(os.path.expanduser('~'))
def getDownloadDir():
@@ -290,9 +292,14 @@ def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
# Returns True if sub_folder is the same as or inside base_folder
def isSubFolder(sub_folder, base_folder):
# Returns True if sub_folder is the same as or inside base_folder
return base_folder and sub_folder and ss(os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep) in ss(os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep)
if base_folder and sub_folder:
base = sp(os.path.realpath(base_folder)) + os.path.sep
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
return os.path.commonprefix([subfolder, base]) == base
return False
# From SABNZBD
@@ -307,3 +314,99 @@ def scanForPassword(name):
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def removePyc(folder, only_excess = True, show_logs = True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getFreeSpace(directories):
single = not isinstance(directories, (tuple, list))
if single:
directories = [directories]
free_space = {}
for folder in directories:
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
return [total.value, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
if single: return size
free_space[folder] = size
return free_space
def getSize(paths):
single = not isinstance(paths, (tuple, list))
if single:
paths = [paths]
total_size = 0
for path in paths:
path = sp(path)
if os.path.isdir(path):
total_size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
elif os.path.isfile(path):
total_size += os.path.getsize(path)
return total_size / 1048576 # MB
def find(func, iterable):
for item in iterable:
if func(item):
return item
return None

View File

@@ -25,6 +25,12 @@ class CPLog(object):
self.Env = Env
self.is_develop = Env.get('dev')
from couchpotato.core.event import addEvent
addEvent('app.after_shutdown', self.close)
def close(self, *args, **kwargs):
logging.shutdown()
def info(self, msg, replace_tuple = ()):
self.logger.info(self.addContext(msg, replace_tuple))
@@ -53,15 +59,14 @@ class CPLog(object):
msg = ss(msg)
try:
msg = msg % replace_tuple
except:
try:
if isinstance(replace_tuple, tuple):
msg = msg % tuple([ss(x) for x in list(replace_tuple)])
else:
msg = msg % ss(replace_tuple)
except Exception as e:
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
if isinstance(replace_tuple, tuple):
msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)])
elif isinstance(replace_tuple, dict):
msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems())
else:
msg = msg % ss(replace_tuple)
except Exception as e:
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
self.setup()
if not self.is_develop:

37
couchpotato/core/media/__init__.py Normal file → Executable file
View File

@@ -1,9 +1,10 @@
import os
import traceback
from couchpotato import get_db, CPLog
from couchpotato import CPLog, md5
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.plugins.base import Plugin
import six
@@ -25,11 +26,10 @@ class MediaBase(Plugin):
def onComplete():
try:
db = get_db()
media = fireEvent('media.get', media_id, single = True)
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id))
if media:
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
@@ -40,9 +40,9 @@ class MediaBase(Plugin):
def notifyFront():
try:
media = fireEvent('media.get', media_id, single = True)
event_name = '%s.update' % media.get('type')
fireEvent('notify.frontend', type = event_name, data = media)
if media:
event_name = '%s.update' % media.get('type')
fireEvent('notify.frontend', type = event_name, data = media)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())
@@ -66,10 +66,13 @@ class MediaBase(Plugin):
return def_title or 'UNKNOWN'
def getPoster(self, image_urls, existing_files):
image_type = 'poster'
def getPoster(self, media, image_urls):
if 'files' not in media:
media['files'] = {}
# Remove non-existing files
existing_files = media['files']
image_type = 'poster'
file_type = 'image_%s' % image_type
# Make existing unique
@@ -90,10 +93,18 @@ class MediaBase(Plugin):
if not isinstance(image, (str, unicode)):
continue
if file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
# Check if it has top image
filename = '%s.%s' % (md5(image), getExt(image))
existing = existing_files.get(file_type, [])
has_latest = False
for x in existing:
if filename in x:
has_latest = True
if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
existing_files[file_type] = [file_path]
existing_files[file_type] = [toUnicode(file_path)]
break
else:
break

110
couchpotato/core/media/_base/library/main.py Normal file → Executable file
View File

@@ -1,10 +1,47 @@
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.library.base import LibraryBase
log = CPLog(__name__)
class Library(LibraryBase):
def __init__(self):
addEvent('library.title', self.title)
addEvent('library.related', self.related)
addEvent('library.tree', self.tree)
addEvent('library.root', self.root)
addApiView('library.query', self.queryView)
addApiView('library.related', self.relatedView)
addApiView('library.tree', self.treeView)
def queryView(self, media_id, **kwargs):
db = get_db()
media = db.get('id', media_id)
return {
'result': fireEvent('library.query', media, single = True)
}
def relatedView(self, media_id, **kwargs):
db = get_db()
media = db.get('id', media_id)
return {
'result': fireEvent('library.related', media, single = True)
}
def treeView(self, media_id, **kwargs):
db = get_db()
media = db.get('id', media_id)
return {
'result': fireEvent('library.tree', media, single = True)
}
def title(self, library):
return fireEvent(
@@ -16,3 +53,76 @@ class Library(LibraryBase):
include_identifier = False,
single = True
)
def related(self, media):
result = {self.key(media['type']): media}
db = get_db()
cur = media
while cur and cur.get('parent_id'):
cur = db.get('id', cur['parent_id'])
result[self.key(cur['type'])] = cur
children = db.get_many('media_children', media['_id'], with_doc = True)
for item in children:
key = self.key(item['doc']['type']) + 's'
if key not in result:
result[key] = []
result[key].append(item['doc'])
return result
def root(self, media):
db = get_db()
cur = media
while cur and cur.get('parent_id'):
cur = db.get('id', cur['parent_id'])
return cur
def tree(self, media = None, media_id = None):
db = get_db()
if media:
result = media
elif media_id:
result = db.get('id', media_id, with_doc = True)
else:
return None
# Find children
items = db.get_many('media_children', result['_id'], with_doc = True)
keys = []
# Build children arrays
for item in items:
key = self.key(item['doc']['type']) + 's'
if key not in result:
result[key] = {}
elif type(result[key]) is not dict:
result[key] = {}
if key not in keys:
keys.append(key)
result[key][item['_id']] = fireEvent('library.tree', item['doc'], single = True)
# Unique children
for key in keys:
result[key] = result[key].values()
# Include releases
result['releases'] = fireEvent('release.for_media', result['_id'], single = True)
return result
def key(self, media_type):
parts = media_type.split('.')
return parts[-1]

View File

@@ -40,7 +40,7 @@ class Matcher(MatcherBase):
return False
def correctTitle(self, chain, media):
root_library = media['library']['root_library']
root = fireEvent('library.root', media, single = True)
if 'show_name' not in chain.info or not len(chain.info['show_name']):
log.info('Wrong: missing show name in parsed result')
@@ -50,10 +50,10 @@ class Matcher(MatcherBase):
chain_words = [x.lower() for x in chain.info['show_name']]
# Build a list of possible titles of the media we are searching for
titles = root_library['info']['titles']
titles = root['info']['titles']
# Add year suffix titles (will result in ['<name_one>', '<name_one> <suffix_one>', '<name_two>', ...])
suffixes = [None, root_library['info']['year']]
suffixes = [None, root['info']['year']]
titles = [
title + ((' %s' % suffix) if suffix else '')

View File

@@ -99,7 +99,7 @@ from couchpotato.core.helpers.encoding import simplifyString"""
class TitleIndex(TreeBasedIndex):
_version = 2
_version = 4
custom_header = """from CodernityDB.tree_index import TreeBasedIndex
from string import ascii_letters
@@ -123,16 +123,16 @@ from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
nr_prefix = '' if title and len(title) > 0 and title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
for prefix in ['the ', 'an ', 'a ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return str(nr_prefix + title).ljust(32, '_')[:32]
return str(nr_prefix + title).ljust(32, ' ')[:32]
class StartsWithIndex(TreeBasedIndex):
_version = 2
_version = 3
custom_header = """from CodernityDB.tree_index import TreeBasedIndex
from string import ascii_letters
@@ -153,7 +153,7 @@ from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
title = toUnicode(title)
title = simplifyString(title)
for prefix in ['the ']:
for prefix in ['the ', 'an ', 'a ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
@@ -176,3 +176,24 @@ class MediaChildrenIndex(TreeBasedIndex):
if data.get('_t') == 'media' and data.get('parent_id'):
return data.get('parent_id'), None
class MediaTagIndex(MultiTreeBasedIndex):
_version = 2
custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex"""
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(MediaTagIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'media' and data.get('tags') and len(data.get('tags', [])) > 0:
tags = set()
for tag in data.get('tags', []):
tags.add(self.make_key(tag))
return list(tags), None
def make_key(self, key):
return md5(key).hexdigest()

245
couchpotato/core/media/_base/media/main.py Normal file → Executable file
View File

@@ -1,6 +1,9 @@
from datetime import timedelta
import time
import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound, RecordDeleted
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
@@ -8,7 +11,7 @@ from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex
log = CPLog(__name__)
@@ -20,6 +23,7 @@ class MediaPlugin(MediaBase):
'media': MediaIndex,
'media_search_title': TitleSearchIndex,
'media_status': MediaStatusIndex,
'media_tag': MediaTagIndex,
'media_by_type': MediaTypeIndex,
'media_title': TitleIndex,
'media_startswith': StartsWithIndex,
@@ -39,15 +43,15 @@ class MediaPlugin(MediaBase):
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
'status': {'type': 'array or csv', 'desc': 'Filter media by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter media by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the media list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all media starting with the letter "a"'},
'search': {'desc': 'Search media title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'empty': bool, any media returned or not,
'media': array, media found,
}"""}
})
@@ -73,6 +77,7 @@ class MediaPlugin(MediaBase):
addEvent('app.load', self.addSingleListView, priority = 100)
addEvent('app.load', self.addSingleCharView, priority = 100)
addEvent('app.load', self.addSingleDeleteView, priority = 100)
addEvent('app.load', self.cleanupFaults)
addEvent('media.get', self.get)
addEvent('media.with_status', self.withStatus)
@@ -80,6 +85,20 @@ class MediaPlugin(MediaBase):
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
addEvent('media.tag', self.tag)
addEvent('media.untag', self.unTag)
# Wrongly tagged media files
def cleanupFaults(self):
medias = fireEvent('media.with_status', 'ignored', single = True) or []
db = get_db()
for media in medias:
try:
media['status'] = 'done'
db.update(media)
except:
pass
def refresh(self, id = '', **kwargs):
handlers = []
@@ -102,13 +121,12 @@ class MediaPlugin(MediaBase):
try:
media = get_db().get('id', media_id)
event = '%s.update_info' % media.get('type')
event = '%s.update' % media.get('type')
def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
if handler:
return handler
return handler
except:
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
@@ -120,25 +138,30 @@ class MediaPlugin(MediaBase):
def get(self, media_id):
db = get_db()
try:
db = get_db()
imdb_id = getImdb(str(media_id))
imdb_id = getImdb(str(media_id))
media = None
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
return media
except (RecordNotFound, RecordDeleted):
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
@@ -149,30 +172,45 @@ class MediaPlugin(MediaBase):
'media': media,
}
def withStatus(self, status, with_doc = True):
def withStatus(self, status, types = None, with_doc = True):
db = get_db()
if types and not isinstance(types, (list, tuple)):
types = [types]
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('media_status', s, with_doc = with_doc):
yield ms['doc'] if with_doc else ms
for ms in db.get_many('media_status', s):
if with_doc:
try:
doc = db.get('id', ms['_id'])
if types and doc.get('type') not in types:
continue
yield doc
except (RecordDeleted, RecordNotFound):
log.debug('Record not found, skipping: %s', ms['_id'])
except (ValueError, EOFError):
fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0))
else:
yield ms
def withIdentifiers(self, identifiers, with_doc = False):
db = get_db()
for x in identifiers:
try:
media = db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
return media
return db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
except:
pass
log.debug('No media found with identifiers: %s', identifiers)
return False
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, starts_with = None, search = None):
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None):
db = get_db()
@@ -183,6 +221,8 @@ class MediaPlugin(MediaBase):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
if with_tags and not isinstance(with_tags, (list, tuple)):
with_tags = [with_tags]
# query media ids
if types:
@@ -209,11 +249,17 @@ class MediaPlugin(MediaBase):
# Add search filters
if starts_with:
filter_by['starts_with'] = set()
starts_with = toUnicode(starts_with.lower())[0]
starts_with = starts_with if starts_with in ascii_lowercase else '#'
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
# Add tag filter
if with_tags:
filter_by['with_tags'] = set()
for tag in with_tags:
for x in db.get_many('media_tag', tag):
filter_by['with_tags'].add(x['_id'])
# Filter with search query
if search:
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
@@ -249,6 +295,10 @@ class MediaPlugin(MediaBase):
media = fireEvent('media.get', media_id, single = True)
# Skip if no media has been found
if not media:
continue
# Merge releases with movie dict
medias.append(media)
@@ -266,6 +316,7 @@ class MediaPlugin(MediaBase):
release_status = splitString(kwargs.get('release_status')),
status_or = kwargs.get('status_or') is not None,
limit_offset = kwargs.get('limit_offset'),
with_tags = splitString(kwargs.get('with_tags')),
starts_with = kwargs.get('starts_with'),
search = kwargs.get('search')
)
@@ -280,9 +331,22 @@ class MediaPlugin(MediaBase):
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
def tempList(*args, **kwargs):
return self.listView(types = media_type, **kwargs)
addApiView('%s.list' % media_type, tempList)
tempList = lambda *args, **kwargs : self.listView(type = media_type, **kwargs)
addApiView('%s.list' % media_type, tempList, docs = {
'desc': 'List media',
'params': {
'status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the ' + media_type + ' list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all ' + media_type + 's starting with the letter "a"'},
'search': {'desc': 'Search ' + media_type + ' title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any """ + media_type + """s returned or not,
'media': array, media found,
}"""}
})
def availableChars(self, types = None, status = None, release_status = None):
@@ -328,7 +392,7 @@ class MediaPlugin(MediaBase):
if x['_id'] in media_ids:
chars.add(x['key'])
if len(chars) == 25:
if len(chars) == 27:
break
return list(chars)
@@ -349,8 +413,7 @@ class MediaPlugin(MediaBase):
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
def tempChar(*args, **kwargs):
return self.charView(types = media_type, **kwargs)
tempChar = lambda *args, **kwargs : self.charView(type = media_type, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
@@ -361,13 +424,18 @@ class MediaPlugin(MediaBase):
media = db.get('id', media_id)
if media:
deleted = False
media_releases = fireEvent('release.for_media', media['_id'], single = True)
if delete_from == 'all':
# Delete connected releases
for release in media_releases:
db.delete(release)
db.delete(media)
deleted = True
else:
media_releases = fireEvent('release.for_media', media['_id'], single = True)
total_releases = len(media_releases)
total_deleted = 0
new_media_status = None
@@ -379,16 +447,23 @@ class MediaPlugin(MediaBase):
total_deleted += 1
new_media_status = 'done'
elif delete_from == 'manage':
if release.get('status') == 'done':
if release.get('status') == 'done' or media.get('status') == 'done':
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active'):
if (total_releases == total_deleted) or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:
media['status'] = new_media_status
# Remove profile (no use for in manage)
if new_media_status == 'done':
media['profile_id'] = None
db.update(media)
fireEvent('media.untag', media['_id'], 'recent', single = True)
else:
fireEvent('media.restatus', media.get('_id'), single = True)
@@ -412,11 +487,16 @@ class MediaPlugin(MediaBase):
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
def tempDelete(*args, **kwargs):
return self.deleteView(types = media_type, *args, **kwargs)
addApiView('%s.delete' % media_type, tempDelete)
tempDelete = lambda *args, **kwargs : self.deleteView(type = media_type, **kwargs)
addApiView('%s.delete' % media_type, tempDelete, docs = {
'desc': 'Delete a ' + media_type + ' from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete ' + media_type + ' from this page', 'type': 'string: all (default), wanted, manage'},
}
})
def restatus(self, media_id):
def restatus(self, media_id, tag_recent = True, allowed_restatus = None):
try:
db = get_db()
@@ -428,24 +508,77 @@ class MediaPlugin(MediaBase):
if not m['profile_id']:
m['status'] = 'done'
else:
move_to_wanted = True
m['status'] = 'active'
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
try:
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
done_releases = [release for release in media_releases if release.get('status') == 'done']
for q_identifier in profile['qualities']:
index = profile['qualities'].index(q_identifier)
if done_releases:
for release in media_releases:
if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]):
move_to_wanted = False
# Check if we are finished with the media
for release in done_releases:
if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True):
m['status'] = 'done'
break
m['status'] = 'active' if move_to_wanted else 'done'
elif previous_status == 'done':
m['status'] = 'done'
except RecordNotFound:
log.debug('Failed restatus, keeping previous: %s', traceback.format_exc())
m['status'] = previous_status
# Only update when status has changed
if previous_status != m['status']:
if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus):
db.update(m)
# Tag media as recent
if tag_recent:
self.tag(media_id, 'recent', update_edited = True)
return m['status']
except:
log.error('Failed restatus: %s', traceback.format_exc())
def tag(self, media_id, tag, update_edited = False):
try:
db = get_db()
m = db.get('id', media_id)
if update_edited:
m['last_edit'] = int(time.time())
tags = m.get('tags') or []
if tag not in tags:
tags.append(tag)
m['tags'] = tags
db.update(m)
return True
except:
log.error('Failed restatus: %s', traceback.format_exc())
log.error('Failed tagging: %s', traceback.format_exc())
return False
def unTag(self, media_id, tag):
try:
db = get_db()
m = db.get('id', media_id)
tags = m.get('tags') or []
if tag in tags:
new_tags = list(set(tags))
new_tags.remove(tag)
m['tags'] = new_tags
db.update(m)
return True
except:
log.error('Failed untagging: %s', traceback.format_exc())
return False

View File

@@ -88,10 +88,16 @@ class Provider(Plugin):
if data and len(data) > 0:
try:
data = XMLTree.fromstring(ss(data))
data = XMLTree.fromstring(data)
return self.getElements(data, item_path)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
try:
data = XMLTree.fromstring(ss(data))
return self.getElements(data, item_path)
except XMLTree.ParseError:
log.error('Invalid XML returned, check "%s" manually for issues', url)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
@@ -125,6 +131,9 @@ class YarrProvider(Provider):
else:
return []
def buildUrl(self, *args, **kwargs):
pass
def login(self):
# Check if we are still logged in every hour
@@ -177,7 +186,7 @@ class YarrProvider(Provider):
try:
return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'
@@ -200,7 +209,7 @@ class YarrProvider(Provider):
self._search(media, quality, results)
# Search possible titles
else:
media_title = fireEvent('library.query', media, single = True)
media_title = fireEvent('library.query', media, include_year = False, single = True)
for title in possibleTitles(media_title):
self._searchOnTitle(title, media, quality, results)
@@ -298,7 +307,7 @@ class ResultList(list):
old_score = new_result['score']
new_result['score'] = int(old_score * is_correct_weight)
log.info('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
is_correct_weight,
old_score,
new_result['score']

View File

@@ -2,7 +2,7 @@ import re
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.helpers.variable import tryInt, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
@@ -50,8 +50,8 @@ class Base(NZBProvider):
def extra_check(item):
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
total = tryInt(parts.group('total'))
parts = tryInt(parts.group('parts'))
total = float(tryInt(parts.group('total')))
parts = float(tryInt(parts.group('parts')))
if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))):
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
@@ -65,7 +65,7 @@ class Base(NZBProvider):
results.append({
'id': nzb_id,
'name': title.text,
'name': simplifyString(title.text),
'age': tryInt(age),
'size': self.parseSize(size_match.group('size')),
'url': self.urls['download'] % nzb_id,
@@ -100,6 +100,7 @@ config = [{
'name': 'binsearch',
'description': 'Free provider, less accurate. See <a href="https://www.binsearch.info/">BinSearch</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAATklEQVQY02NwQAMMWAXOnz+PKvD//3/CAvM//z+fgiwAAs+RBab4PP//vwbFjPlAffgEChzOo2r5fBuIfRAC5w8D+QUofkkp8MHjOWQAAM3Sbogztg2wAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -1,8 +1,7 @@
from urllib2 import HTTPError
from urlparse import urlparse
import time
import traceback
import urllib2
import re
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.rss import RSS
@@ -12,6 +11,7 @@ from couchpotato.core.media._base.providers.base import ResultList
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
from requests import HTTPError
log = CPLog(__name__)
@@ -20,10 +20,11 @@ log = CPLog(__name__)
class Base(NZBProvider, RSS):
urls = {
'detail': 'details&id=%s',
'detail': 'details/%s',
'download': 't=get&id=%s'
}
passwords_regex = 'password|wachtwoord'
limits_reached = {}
http_time_between_calls = 1 # Seconds
@@ -43,10 +44,8 @@ class Base(NZBProvider, RSS):
def _searchOnHost(self, host, media, quality, results):
query = self.buildUrl(media, host['api_key'])
url = '%s&%s' % (self.getUrl(host['host']), query)
query = self.buildUrl(media, host)
url = '%s%s' % (self.getUrl(host['host']), query)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
for nzb in nzbs:
@@ -69,8 +68,12 @@ class Base(NZBProvider, RSS):
if not date:
date = self.getTextElement(nzb, 'pubDate')
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
name = self.getTextElement(nzb, 'title')
detail_url = self.getTextElement(nzb, 'guid')
nzb_id = detail_url.split('/')[-1:].pop()
if '://' not in detail_url:
detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)
if not name:
continue
@@ -79,6 +82,23 @@ class Base(NZBProvider, RSS):
if spotter:
name_extra = spotter
description = ''
if "@spot.net" in nzb_id:
try:
# Get details for extended description to retrieve passwords
query = self.buildDetailsUrl(nzb_id, host['api_key'])
url = '%s%s' % (self.getUrl(host['host']), query)
nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0]
description = self.getTextElement(nzb_details, 'description')
# Extract a password from the description
password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1)
if password:
name += ' {{%s}}' % password.strip()
except:
log.debug('Error getting details of "%s": %s', (name, traceback.format_exc()))
results.append({
'id': nzb_id,
'provider_extra': urlparse(host['host']).hostname or host['host'],
@@ -87,8 +107,9 @@ class Base(NZBProvider, RSS):
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)),
'detail_url': detail_url,
'content': self.getTextElement(nzb, 'description'),
'description': description,
'score': host['extra_score'],
})
@@ -166,24 +187,16 @@ class Base(NZBProvider, RSS):
return 'try_next'
try:
# Get final redirected url
log.debug('Checking %s for redirects.', url)
req = urllib2.Request(url)
req.add_header('User-Agent', self.user_agent)
res = urllib2.urlopen(req)
finalurl = res.geturl()
if finalurl != url:
log.debug('Redirect url used: %s', finalurl)
data = self.urlopen(finalurl, show_error = False)
data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
self.limits_reached[host] = False
return data
except HTTPError as e:
if e.code == 503:
sc = e.response.status_code
if sc in [503, 429]:
response = e.read().lower()
if 'maximum api' in response or 'download limit' in response:
if sc == 429 or 'maximum api' in response or 'download limit' in response:
if not self.limits_reached.get(host):
log.error('Limit reached for newznab provider: %s', host)
log.error('Limit reached / to many requests for newznab provider: %s', host)
self.limits_reached[host] = time.time()
return 'try_next'
@@ -191,6 +204,15 @@ class Base(NZBProvider, RSS):
return 'try_next'
def buildDetailsUrl(self, nzb_id, api_key):
query = tryUrlencode({
't': 'details',
'id': nzb_id,
'apikey': api_key,
})
return query
config = [{
'name': 'newznab',
@@ -203,8 +225,9 @@ config = [{
'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \
<a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>, <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>, \
<a href="https://smackdownonyou.com" target="_blank">SmackDown</a>, <a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
<a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
@@ -213,30 +236,30 @@ config = [{
},
{
'name': 'use',
'default': '0,0,0,0,0,0'
'default': '0,0,0,0,0'
},
{
'name': 'host',
'default': 'api.nzb.su,dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws',
'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://api.nzbgeek.info,https://www.nzbfinder.ws',
'description': 'The hostname of your newznab provider',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'default': '0,0,0,0,0,0',
'default': '0,0,0,0,0',
'description': 'Starting score for each release found via this provider.',
},
{
'name': 'custom_tag',
'advanced': True,
'label': 'Custom tag',
'default': ',,,,,',
'default': ',,,,',
'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
},
{
'name': 'api_key',
'default': ',,,,,',
'default': ',,,,',
'label': 'Api Key',
'description': 'Can be found on your profile page',
'type': 'combined',

View File

@@ -80,6 +80,7 @@ config = [{
'name': 'NZBClub',
'description': 'Free provider, less accurate. See <a href="https://www.nzbclub.com/">NZBClub</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACEUlEQVQ4y3VSMWgUQRR9/8/s7OzeJSdnTsVGghLEYBNQjBpQiRBFhIB2EcHG1kbs0murhZAmVocExEZQ0c7CxkLINYcJJpoYj9wZcnu72fF21uJSXMzuhyne58/j/fcf4b+KokgBIOSU53lxP5b9oNVqDT36dH+5UjoiKvIwPFEEgWBshGZ3E7/NOupL9fMjx0e+ZhKsrq+c/FPZKJi0w4FsQXMBDEJsd7BNW9h2tuyP9vfTALIJkMIu1hYRtINM+dpzcWc0sbkreK4fUEogyraAmKGF3+7vcT/wtR9QwkCabSAzQQuvk0uglAo5YaQ5DASGYjfMXcHVOqKu6NmR7iehlKAdHWUqWPv1c3i+9uwVdRlEBGaGEAJCCrDo9ShhvF6qPq8tL57bp+DbRn2sHtUuCY9YphLMu5921VhrwYJ5tbt0tt6sjQP4vEfB2Ikz7/ytwbeR6ljHkXCUA6UcOLtPOg4MYhtH8ZcLw5er+xQMDAwEURRNl96X596Y6oxFwsw9fmtTOAr2Ik19nL365FZpsLSdnQPPM8aYewc+lDcX4rkHqbQMAGTJXulOLzycmr1bKBTi3DOGYagajcahiaOT89fbM0/dxEsUu3aidfPljWO3HzebzYNBELi5Z5RSJlrrHd/3w8lT114MrVTWOn875fHRiYVisRhorWMpZXdvNnLKGCOstb0AMlulVJI19w/+nceU4D0aCwAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -1,125 +0,0 @@
import re
import time
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from dateutil.parser import parse
log = CPLog(__name__)
class Base(NZBProvider, RSS):
urls = {
'download': 'https://www.nzbindex.com/download/',
'search': 'https://www.nzbindex.com/rss/?%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media, quality))
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
title = self.getTextElement(nzb, "title")
match = fireEvent('matcher.parse', title, parser='usenet', single = True)
if not match.chains:
log.info('Unable to parse release with title "%s"', title)
continue
# TODO should we consider other lower-weight chains here?
info = fireEvent('matcher.flatten_info', match.chains[0].info, single = True)
release_name = fireEvent('matcher.construct_from_raw', info.get('release_name'), single = True)
file_name = info.get('detail', {}).get('file_name')
file_name = file_name[0] if file_name else None
title = release_name or file_name
# Strip extension from parsed title (if one exists)
ext_pos = title.rfind('.')
# Assume extension if smaller than 4 characters
# TODO this should probably be done a better way
if len(title[ext_pos + 1:]) <= 4:
title = title[:ext_pos]
if not title:
log.info('Unable to find release name from match')
continue
try:
description = self.getTextElement(nzb, "description")
except:
description = ''
def extra_check(item):
if '#c20000' in item['description'].lower():
log.info('Wrong: Seems to be passworded: %s', item['name'])
return False
return True
results.append({
'id': nzbindex_id,
'name': title,
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': enclosure['url'].replace('/download/', '/release/'),
'description': description,
'get_more_info': self.getMoreInfo,
'extra_check': extra_check,
})
def getMoreInfo(self, item):
try:
if '/nfo/' in item['description'].lower():
nfo_url = re.search('href=\"(?P<nfo>.+)\" ', item['description']).group('nfo')
full_description = self.getCache('nzbindex.%s' % item['id'], url = nfo_url, cache_timeout = 25920000)
html = BeautifulSoup(full_description)
item['description'] = toUnicode(html.find('pre', attrs = {'id': 'nfo0'}).text)
except:
pass
config = [{
'name': 'nzbindex',
'groups': [
{
'tab': 'searcher',
'list': 'nzb_providers',
'name': 'nzbindex',
'description': 'Free provider, less accurate. See <a href="https://www.nzbindex.com/">NZBIndex</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]

View File

@@ -1,13 +1,9 @@
from urlparse import urlparse, parse_qs
import time
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from dateutil.parser import parse
log = CPLog(__name__)
@@ -16,27 +12,19 @@ log = CPLog(__name__)
class Base(NZBProvider, RSS):
urls = {
'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s',
'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s',
'search': 'https://api.omgwtfnzbs.org/json/?%s',
}
http_time_between_calls = 1 # Seconds
cat_ids = [
([15], ['dvdrip']),
([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']),
([15, 16], ['brrip']),
([16], ['720p', '1080p', 'bd50']),
([17], ['dvdr']),
]
cat_backup_id = 'movie'
def search(self, movie, quality):
if quality['identifier'] in fireEvent('quality.pre_releases', single = True):
return []
return super(Base, self).search(movie, quality)
def _searchOnTitle(self, title, movie, quality, results):
q = '%s %s' % (title, movie['info']['year'])
@@ -47,22 +35,20 @@ class Base(NZBProvider, RSS):
'api': self.conf('api_key', default = ''),
})
nzbs = self.getRSSData(self.urls['search'] % params)
nzbs = self.getJsonData(self.urls['search'] % params)
for nzb in nzbs:
if isinstance(nzbs, list):
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]
results.append({
'id': nzb_id,
'name': toUnicode(self.getTextElement(nzb, 'title')),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': self.urls['detail_url'] % nzb_id,
'description': self.getTextElement(nzb, 'description')
})
results.append({
'id': nzb.get('nzbid'),
'name': toUnicode(nzb.get('release')),
'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
'url': nzb.get('getnzb'),
'detail_url': nzb.get('details'),
'description': nzb.get('weblink')
})
config = [{
@@ -74,6 +60,7 @@ config = [{
'name': 'OMGWTFNZBs',
'description': 'See <a href="http://omgwtfnzbs.org/">OMGWTFNZBs</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAIAAADAAbR1AAADbElEQVR4AZ2UW0ybZRiAy/OvdHaLYvB0YTRIFi7GkM44zRLmIfNixkWdiRMyYoxRE8/TC7MYvXCGEBmr3mxLwVMwY0wYA7e6Wso4lB6h/U9taSlMGIfBXLYlJMyo0S///2dJI5lxN8/F2/f9nu9737e/jYmXr6KTbN9BGG9HE/NotQ76UWziNzrXFiETk/5ARUNH+7+0kW7fSgTl0VKGOLZzidOkmuuIo7q2oTArNLPIzhdIkqXkerFOm2CaD/5bcKrjIL2c3fkhPxOq93Kcb91v46fV9TQKF4TgV/TbUsQtzfCaK6jMOd5DJrguSIIhexmqqVxN0FXbRR8/ND/LYTTj6J7nl2gnL47OkDW4KJhnQHCa6JpKVNJGA3OC58nwBJoZ//ebbIyKpBxjrr0o1q1FMRkrKXZnHWF85VvxMrJxibwhGyd0f5bLnKzqJs1k0Sfo+EU8hdAUvkbcwKEgs2D0OiV4jmmD1zb+Tp6er0JMMvDxPo5xev9zTBF683NS+N56n1YiB95B5crr93KRuKhKI0tb0Kw2mgLLqTjLEWO8424i9IvURaYeOckwf3+/yCC9e3bQQ/MuD+Monk0k+XFXMUfx7z5EEP+XlXi5tLlMxH8zLppw7idJrugcus30kC86gc7UrQqjLIukM8zWHOACeU+TiMxXN6ExVOkgz4lvPEzice1GIVhxhG4CrZvpl6TH55giKWqXGLy9hZh5aUtgDSew/msSyCKpl+DDNfxJc8NBIsxUxUnz14O/oONu+IIIvso9TLBQ1SY5rUhuSzUhAqJ2mRXBLDOCeUtgUZXsaObT8BffhUJPqWgiV+3zKKzYH0ClvTRLhD77HIqVkyh5jThnivehoG+qJctIRSPn6bxvO4FCgTl9c1DmbpjLajbQFE8aW5SU3rg+zOPGUjTUF9NFpLEbH2c/KmGYlY69/GQJVtGMSUcEp9eCbB1nctbxHTLRdTUkGDf+B02uGWRG3OvpJ/zSMwzif+oxVBID3cQKBavLCiPmB2PM2UuSCUPgrX4VDb97AwEG67bh4+KTOlncvu3M31BwA5rLHbCfEjwkNDky9e/SSbSxnD46Pg0RJtpXRvhmBSZHpRjWtKwFybjuQeXaKxto4WjLZZZvVmC17pZLJFkwxm5++PS2Mrwc7nyIMYZe/IzoP5d6QgEybqTXAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -61,7 +61,7 @@ class Base(TorrentProvider):
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(entry.find('size').get_text()),
'size': tryInt(entry.find('size').get_text()) / 1048576,
'seeders': tryInt(entry.find('seeders').get_text()),
'leechers': tryInt(entry.find('leechers').get_text()),
'score': torrentscore
@@ -78,8 +78,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'Awesome-HD',
'description': 'See <a href="https://awesome-hd.net">AHD</a>',
'description': '<a href="https://awesome-hd.net">AHD</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+UlEQVR4AV1SO0y6dxQ9H4g8CoIoohZ5NA0aR2UgkYpNB5uocTSaLlrDblMH09Gt8d90r3YpJkanxjA4GGkbO7RNxSABq8jDGnkpD+UD5NV7Bxvbk9wvv+/3uPece66A/yEWi42FQqHVfD7/cbPZtIEglUpjOp3uZHR0dBvAn3gDIRqNgjE4OKj0+Xzf3NzcfD4wMCCjf5TLZbTbbajVatzf3+Pu7q5uNpt35ufnvwBQAScQRREEldfr9RWLxan+/n5YrVa+jFarhVfQQyQSCU4EhULhX15engEgSrjC0dHRVqlUmjQYDBgaGgKtuTqz4mTgIoVCASaTCX19fajVapOHh4dbFJBks9mxcDi8qtFoJEajkfVyJWi1WkxMTMDhcIAT8x6D7/Dd6+vr1fHx8TGp2+3+iqo5+YCzBwIBToK5ubl/mQwPDyMSibAs2Gw2UHNRrValz8/PDUk8Hv9EqVRCr9fj4uICTNflcqFer+Pg4AB7e3uoVCq8x9Rxfn6O7u5uqFQq8FspZXxHTekggByA3W4Hr9PpNDeRL3I1cMhkMrBrnZ2dyGQyvNYIs7OzVbJNPjIyAraLwYdcjR8wXl5eIJfLwRIFQQDLYkm3t7c1CdGPPT4+cpOImp4PODMeaK+n10As2jBbrHifHOjS6qAguVFimkqlwAMmIQnHV1dX4NDQhVwuhyZTV6pgIktzDzkkk0lEwhEEzs7ASQr5Ai4vL1nuccfCwsLO/v6+p9FoyJhF6ekJro/cPCzIZLNQa7rQoK77/SdgWWpKkCaJ5EB9aWnpe6nH40nRMBnJV4f5gw+FX3/5GX/8/htXRZdOzzqhJWn6nl6YbTZqqhrhULD16fT0d8FgcFtYW1vD5uamfGVl5cd4IjldKhZACdkJvKfWUANrxEaJV4hiGVaL1b+7653hXzwRZQr2X76xsfG1xWIRaZzbNPv/CdrjEL9cX/+WXFBSgEPgzxuwG3Yans9OT0+naBZMIJDNfzudzp8WFxd/APAX3uAf9WOTxOPLdosAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -44,7 +44,8 @@ class TorrentProvider(YarrProvider):
prop_name = 'proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
if last_check > time.time() - 86400:
continue
data = ''

View File

@@ -13,11 +13,11 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://www.bit-hdtv.com/',
'login': 'http://www.bit-hdtv.com/takelogin.php',
'login_check': 'http://www.bit-hdtv.com/messages.php',
'detail': 'http://www.bit-hdtv.com/details.php?id=%s',
'search': 'http://www.bit-hdtv.com/torrents.php?',
'test': 'https://www.bit-hdtv.com/',
'login': 'https://www.bit-hdtv.com/takelogin.php',
'login_check': 'https://www.bit-hdtv.com/messages.php',
'detail': 'https://www.bit-hdtv.com/details.php?id=%s',
'search': 'https://www.bit-hdtv.com/torrents.php?',
}
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
@@ -25,7 +25,7 @@ class Base(TorrentProvider):
def _search(self, media, quality, results):
query = self.buildUrl(media)
query = self.buildUrl(media, quality)
url = "%s&%s" % (self.urls['search'], query)
@@ -93,8 +93,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'BiT-HDTV',
'description': 'See <a href="http://bit-hdtv.com">BiT-HDTV</a>',
'description': '<a href="https://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -1,7 +1,6 @@
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
from bs4 import BeautifulSoup, SoupStrainer
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -16,25 +15,23 @@ class Base(TorrentProvider):
'test': 'https://www.bitsoup.me/',
'login': 'https://www.bitsoup.me/takelogin.php',
'login_check': 'https://www.bitsoup.me/my.php',
'search': 'https://www.bitsoup.me/browse.php?',
'search': 'https://www.bitsoup.me/browse.php?%s',
'baseurl': 'https://www.bitsoup.me/%s',
}
http_time_between_calls = 1 # Seconds
only_tables_tags = SoupStrainer('table')
torrent_name_cell = 1
torrent_download_cell = 2
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s' % (simplifyString(title), movie['info']['year'])
arguments = tryUrlencode({
'search': q,
})
url = "%s&%s" % (self.urls['search'], arguments)
url = self.urls['search'] % self.buildUrl(movie, quality)
url = self.urls['search'] % self.buildUrl(title, movie, quality)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
try:
result_table = html.find('table', attrs = {'class': 'koptekst'})
@@ -46,8 +43,8 @@ class Base(TorrentProvider):
all_cells = result.find_all('td')
torrent = all_cells[1].find('a')
download = all_cells[3].find('a')
torrent = all_cells[self.torrent_name_cell].find('a')
download = all_cells[self.torrent_download_cell].find('a')
torrent_id = torrent['href']
torrent_id = torrent_id.replace('details.php?id=', '')
@@ -55,9 +52,9 @@ class Base(TorrentProvider):
torrent_name = torrent.getText()
torrent_size = self.parseSize(all_cells[7].getText())
torrent_seeders = tryInt(all_cells[9].getText())
torrent_leechers = tryInt(all_cells[10].getText())
torrent_size = self.parseSize(all_cells[8].getText())
torrent_seeders = tryInt(all_cells[10].getText())
torrent_leechers = tryInt(all_cells[11].getText())
torrent_url = self.urls['baseurl'] % download['href']
torrent_detail_url = self.urls['baseurl'] % torrent['href']
@@ -94,8 +91,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'Bitsoup',
'description': 'See <a href="https://bitsoup.me">Bitsoup</a>',
'description': '<a href="https://bitsoup.me">Bitsoup</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB8ElEQVR4AbWSS2sTURiGz3euk0mswaE37HhNhIrajQheFgF3rgR/lAt/gOBCXNZlo6AbqfUWRVCxi04wqUnTRibpJLaJzdzOOZ6WUumyC5/VHOb9eN/FA91uFx0FjI4IPfgiGLTWH73tn348GKmN7ijD0d2b41fO5qJEaX24AWNIUrVQCTTJ3Llx6vbV6Vtzk7Gi9+ebi996guFDDYAQAVj4FExP5qdOZB49W62t/zH3hECcwsPnbWeMXz6Xi2K1f0ApeK3hMCHHbP5gvvoriBgFAAQJEAxhjJ4u+YWTNsVI6b1JgtPWZkoIefKy4fcii2OTw2BABs7wj3bYDlLL4rvjGWOdTser1j5Xf7c3Q/MbHQYApxItvnm31mhQQ71eX2vUB76/vsWB2hg0QuogrMwLIG8P3InM2/eVGXeDViqVwWB79vRU2lgJYmdHcgXCTAXQFJTN5HguvDCR2Hxsxe8EvT54nlcul5vNpqDIEgwRQanAhAAABgRIyiQcjpIkkTOuWyqVoN/vSylX67XXH74uV1vHRUyxxFqbLBCSmBpiXSq6xcL5QrGYzWZ3XQIAwdlOJB+/aL764ucdmncYs0WsCI7kvTnn+qyDMEnTVCn1Tz5KsBFg6fvWcmsUAcnYNC/g2hnromvvqbHvxv+39S+MX+bWkFXwAgAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -0,0 +1,130 @@
import re
import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://hdaccess.net/',
'detail': 'https://hdaccess.net/details.php?id=%s',
'search': 'https://hdaccess.net/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s',
'download': 'https://hdaccess.net/grab.php?torrent=%s&apikey=%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only')))
if data:
try:
#for result in data[]:
for key, result in data.iteritems():
if tryInt(result['total_results']) == 0:
return
torrentscore = self.conf('extra_score')
releasegroup = result['releasegroup']
resolution = result['resolution']
encoding = result['encoding']
freeleech = tryInt(result['freeleech'])
seeders = tryInt(result['seeders'])
torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)
if freeleech > 0 and self.conf('prefer_internal'):
torrent_desc += '/ Internal'
torrentscore += 200
if seeders == 0:
torrentscore = 0
name = result['release_name']
year = tryInt(result['year'])
results.append({
'id': tryInt(result['torrentid']),
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
'url': self.urls['download'] % (result['torrentid'], self.conf('apikey')),
'detail_url': self.urls['detail'] % result['torrentid'],
'size': tryInt(result['size']),
'seeders': tryInt(result['seeders']),
'leechers': tryInt(result['leechers']),
'age': tryInt(result['age']),
'score': torrentscore
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
config = [{
'name': 'hdaccess',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HDAccess',
'wizard': True,
'description': '<a href="https://hdaccess.net">HDAccess</a>',
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAADuUlEQVQ4yz3T209bdQAH8O/vnNNzWno5FIpAKZdSLi23gWMDtumWuSXOyzJj9M1kyIOPS1xiYuKe9GUPezZZnGIiMTqTxS1bdIuYkG2MWKBAKYVszOgKFkrbA+259HfO+fli/PwPHzI+Pg5CCEAI2VcUlEsl1tHdU7P5bGOkWChEaaUCwvHpmkD93POn6bwgCMQGAMYYYwyCruuQnE7SPzjIstvb8l+bm5fXkokJSmlQEkUQAIpSRH5vd0tyum7I/sA1Z5VH2ctmiGWZjHw4McE1NAZtQ9fD25kXt1VN7es7dNjuGRjiJFeVpWo6slsZPhF/Ys/PPeIs2056ff7zIOS5rpU5/viJEwwEnu3Mi18dojjw0aWP6amz57h9RSE/35zinq2nuGjvIQwOj7K2SKeZWkk0auXSSZ+/ZopSy+CbW1pQKpWu6Jr2/qVPPqWRjm6HWi6Tm999g3RyGbndLCqGgVBrO3F7fHykK0YX47NNtGLYlBq/c+H2iD+3k704dHQUDcFmQVXLyP6zhfTqCl45fQYjx17FemoJunoAk1bQFGoVhkdPwNC0ix2dMT+3llodM02rKdo7gN3dHAEhuH/vNgDg3Pl3cPaNt2GZJpYX5lBbFwClBukfGobL5WrayW6NccVCISY4HIQxYts2Q3J5CXOPHuLlo6NoCoXQ2hbG0JFRpJYWcVDIQ5ZlyL5qW5b9hNlWjKsYBgzDgKppMCoGHty7A0orOHbyNNweL+obGnDm9TdhWSYS8Vn4a2shOZ0QJRGSKIHjeGGtWNhjqqpyG+k04k8eozPai9ZwByavf4kfpyZxZGwMfYOHsbwQx34hB5dL4syKweRq/xpXHwzNapqWSSYWMDszzYqFPEaOn4KiKJiZfoCZ6d8Am+GtC++iXCpjaf4P9vefT8HzfKarp3eWRKMxCILwuWXSz977YIK2RTodDoGH1+OG1+tDlbsKkuiAJEngeWBjNUUnv7rucIiOLyzTvMKJTgnVtbVXLctK3L31g+NAUajL5bEptaDpOnTdgGkzVHl9drms0ju3fnJIkphoaQtfbQiFwAcCAY5wnCE5Xff3i8XX4o9nGksH+8zl9hAGZlWMCivkc9z0L3fZ999+LTCGZKi55YJTFHfye3sc6e/vB88LpK6+iWlqSS4WcpcNXZtwOp3B6mo/REmCSSkEgd+qq3vpRkt75Fp9Y1BZWZwnhq4zEovF/u/MATAti4U7umvyu9kR27aikihC9vvTnV2xufVUMu/2uIksy/9tZvgX49fLmAMx3bsAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
'description': 'Enter your site username.',
},
{
'name': 'apikey',
'default': '',
'label': 'API Key',
'description': 'Enter your site api key. This can be find on <a href="https://hdaccess.net/usercp.php?action=security">Profile Security</a>',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 0,
'description': 'Will not be (re)moved until this seed ratio is met. HDAccess minimum is 1:1.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 0,
'description': 'Will not be (re)moved until this seed time (in hours) is met. HDAccess minimum is 48 hours.',
},
{
'name': 'prefer_internal',
'advanced': True,
'type': 'bool',
'default': 1,
'description': 'Favors internal releases over non-internal releases.',
},
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDAccess internal',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]

View File

@@ -29,6 +29,9 @@ class Base(TorrentProvider):
}
post_data.update(params)
if self.conf('internal_only'):
post_data.update({'origin': [1]})
try:
result = self.getJsonData(self.urls['api'], data = json.dumps(post_data))
@@ -71,7 +74,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HDBits',
'description': 'See <a href="http://hdbits.org">HDBits</a>',
'wizard': True,
'description': '<a href="http://hdbits.org">HDBits</a>',
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
@@ -108,6 +113,14 @@ config = [{
'default': 0,
'description': 'Starting score for each release found via this provider.',
},
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDBits internal'
}
],
},
],

View File

@@ -3,7 +3,7 @@ import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -15,7 +15,7 @@ class Base(TorrentProvider):
urls = {
'download': 'https://www.ilovetorrents.me/%s',
'detail': 'https//www.ilovetorrents.me/%s',
'detail': 'https://www.ilovetorrents.me/%s',
'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s',
'test': 'https://www.ilovetorrents.me/',
'login': 'https://www.ilovetorrents.me/takelogin.php',
@@ -47,17 +47,24 @@ class Base(TorrentProvider):
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
results_table = None
data_split = splitString(data, '<table')
soup = None
for x in data_split:
soup = BeautifulSoup(x)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
if results_table:
break
if not results_table:
return
try:
pagelinks = soup.findAll(href = re.compile('page'))
pageNumbers = [int(re.search('page=(?P<pageNumber>.+'')', i['href']).group('pageNumber')) for i in pagelinks]
total_pages = max(pageNumbers)
page_numbers = [int(re.search('page=(?P<page_number>.+'')', i['href']).group('page_number')) for i in pagelinks]
total_pages = max(page_numbers)
except:
pass
@@ -139,6 +146,7 @@ config = [{
'name': 'ILoveTorrents',
'description': 'Where the Love of Torrents is Born',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACPUlEQVR4AYWM0U9SbxjH3+v266I/oNvWZTfd2J1d0ZqbZEFwWrUImOKs4YwtumFKZvvlJJADR2TCQQlMPKg5NmpREgaekAPnBATKgmK1LqQlx6awHnZWF1Tr2Xfvvs+7z+dB0mlO7StpAh+M4S/2jbo3w8+xvJvlnSneEt+10zwer5ujNUOoChjALWFw5XOwdCAk/P57cGvPl+Oht0W7VJHN5NC1uW1BON4hGjXbwpVWMZhsy9v7sEIXAsDNYBXgdkEoIKyWD2CF8ut/aOXTZc/fBSgLWw1BgA4BDHOV0GkT90cBQpXahU5TFomsb38XhJC5/Tbh1P8c6rJlBeGfAeyMhUFwNVcs9lxV9Ot0dwmyd+mrNvRtbJ2fSPC6Z3Vsvub2z3sDFACAAYzk0+kUyxEkyfN7PopqNBro55A+P6yPKIrL5zF1HwjdeBJJCObIsZO79bo3sHhWhglo5WMV3mazuVPb4fLvSL8/FAkB1hK6rXQPwYhMyROK8VK5LAiH/jsMt0HQjxiN4/ePdoilllcqDyt3Mkg8mRBNbIhMb8RERkowQA/p76g0/UDDdCoNmDminM0qSK5vlpE5kugCHhNPxntwWmJPYTMZtYcFR6ABHQsVRlYLukVORaaULvqKI46keFSCv77kSPS6kxrPptLNDHgz16fWBtyxe6v5h08LUy+KI8ushqTPWWIX8Sg6b45IrGtyW6zXFb/hpQf9m3oqfWuB0fpSw0uZ4WB69En69uOk2rmO2V52PXj+A/mI4ESKpb2HAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -14,11 +14,11 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://www.iptorrents.com/',
'base_url': 'https://www.iptorrents.com',
'login': 'https://www.iptorrents.com/torrents/',
'login_check': 'https://www.iptorrents.com/inbox.php',
'search': 'https://www.iptorrents.com/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
'test': 'https://iptorrents.eu/',
'base_url': 'https://iptorrents.eu',
'login': 'https://iptorrents.eu/torrents/',
'login_check': 'https://iptorrents.eu/inbox.php',
'search': 'https://iptorrents.eu/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
}
http_time_between_calls = 1 # Seconds
@@ -120,8 +120,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'IPTorrents',
'description': 'See <a href="http://www.iptorrents.com">IPTorrents</a>',
'description': '<a href="https://iptorrents.eu">IPTorrents</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -32,8 +32,11 @@ class Base(TorrentMagnetProvider):
proxy_list = [
'https://kickass.to',
'http://kickass.pw',
'http://www.kickassunblock.info',
'http://www.kickassproxy.info',
'http://kickassto.come.in',
'http://katproxy.ws',
'http://kickass.bitproxy.eu',
'http://katph.eu',
'http://kickassto.come.in',
]
def _search(self, media, quality, results):
@@ -65,12 +68,13 @@ class Base(TorrentMagnetProvider):
if column_name:
if column_name == 'name':
link = td.find('div', {'class': 'torrentname'}).find_all('a')[1]
new['id'] = temp.get('id')[-8:]
link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
new['id'] = temp.get('id')[-7:]
new['name'] = link.text
new['url'] = td.find('a', 'imagnet')['href']
new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
new['score'] = 20 if td.find('a', 'iverif') else 0
new['verified'] = True if td.find('a', 'iverify') else False
new['score'] = 100 if new['verified'] else 0
elif column_name is 'size':
new['size'] = self.parseSize(td.text)
elif column_name is 'age':
@@ -82,6 +86,10 @@ class Base(TorrentMagnetProvider):
nr += 1
# Only store verified torrents
if self.conf('only_verified') and not new['verified']:
continue
results.append(new)
except:
log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())
@@ -123,8 +131,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'KickAssTorrents',
'description': 'See <a href="https://kat.ph/">KickAssTorrents</a>',
'description': '<a href="https://kat.ph/">KickAssTorrents</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
@@ -151,6 +160,13 @@ config = [{
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'only_verified',
'advanced': True,
'type': 'bool',
'default': False,
'description': 'Only search for verified releases.'
},
{
'name': 'extra_score',
'advanced': True,

View File

@@ -64,6 +64,10 @@ class Base(TorrentProvider):
torrentdesc += ' HQ'
if self.conf('prefer_golden'):
torrentscore += 5000
if 'FreeleechType' in torrent:
torrentdesc += ' Freeleech'
if self.conf('prefer_freeleech'):
torrentscore += 7000
if 'Scene' in torrent and torrent['Scene']:
torrentdesc += ' Scene'
if self.conf('prefer_scene'):
@@ -187,8 +191,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'PassThePopcorn',
'description': 'See <a href="https://passthepopcorn.me">PassThePopcorn.me</a>',
'description': '<a href="https://passthepopcorn.me">PassThePopcorn.me</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f532+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',
@@ -222,6 +227,14 @@ config = [{
'default': 1,
'description': 'Favors Golden Popcorn-releases over all other releases.'
},
{
'name': 'prefer_freeleech',
'advanced': True,
'type': 'bool',
'label': 'Prefer Freeleech',
'default': 1,
'description': 'Favors torrents marked as freeleech over all other releases.'
},
{
'name': 'prefer_scene',
'advanced': True,

View File

@@ -1,136 +0,0 @@
from urlparse import parse_qs
import re
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
import six
log = CPLog(__name__)
class Base(TorrentMagnetProvider):
urls = {
'test': 'https://publichd.se',
'detail': 'https://publichd.se/index.php?page=torrent-details&id=%s',
'search': 'https://publichd.se/index.php',
}
http_time_between_calls = 0
def search(self, movie, quality):
if not quality.get('hd', False):
return []
return super(Base, self).search(movie, quality)
def _search(self, media, quality, results):
query = self.buildUrl(media)
params = tryUrlencode({
'page': 'torrents',
'search': query,
'active': 1,
})
data = self.getHTMLData('%s?%s' % (self.urls['search'], params))
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'bgtorrlist2'})
entries = results_table.find_all('tr')
for result in entries[2:len(entries) - 1]:
info_url = result.find(href = re.compile('torrent-details'))
download = result.find(href = re.compile('magnet:'))
if info_url and download:
url = parse_qs(info_url['href'])
results.append({
'id': url['id'][0],
'name': six.text_type(info_url.string),
'url': download['href'],
'detail_url': self.urls['detail'] % url['id'][0],
'size': self.parseSize(result.find_all('td')[7].string),
'seeders': tryInt(result.find_all('td')[4].string),
'leechers': tryInt(result.find_all('td')[5].string),
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getMoreInfo(self, item):
cache_key = 'publichd.%s' % item['id']
description = self.getCache(cache_key)
if not description:
try:
full_description = self.urlopen(item['detail_url'])
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'id': 'torrmain'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
except:
log.error('Failed getting more info for %s', item['name'])
description = ''
self.setCache(cache_key, description, timeout = 25920000)
item['description'] = description
return item
config = [{
'name': 'publichd',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'PublicHD',
'description': 'Public Torrent site with only HD content. See <a href="https://publichd.se/">PublicHD</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]

View File

@@ -24,9 +24,9 @@ class Base(TorrentProvider):
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
url = self.buildUrl(media, quality)
url = self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
if data:
@@ -42,6 +42,7 @@ class Base(TorrentProvider):
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '')
@@ -51,7 +52,7 @@ class Base(TorrentProvider):
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
'seeders': tryInt(result.find('td', attrs = {'class': 'ttr_seeders'}).find('a').string),
'seeders': tryInt(seeders.string) if seeders else 0,
'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo,
})
@@ -89,8 +90,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'SceneAccess',
'description': 'See <a href="https://sceneaccess.eu/">SceneAccess</a>',
'description': '<a href="https://sceneaccess.eu/">SceneAccess</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -24,15 +24,18 @@ class Base(TorrentMagnetProvider):
http_time_between_calls = 0
proxy_list = [
'https://tpb.ipredator.se',
'https://dieroschtibay.org',
'https://thebay.al',
'https://thepiratebay.se',
'http://pirateproxy.ca',
'http://tpb.al',
'http://www.tpb.gr',
'http://bayproxy.me',
'http://proxybay.eu',
'http://www.getpirate.com',
'http://piratebay.io',
'http://thepiratebay.se.net',
'http://thebootlegbay.com',
'http://tpb.ninja.so',
'http://proxybay.fr',
'http://pirateproxy.in',
'http://piratebay.skey.sk',
'http://pirateproxy.be',
'http://bayproxy.li',
'http://proxybay.pw',
]
def _search(self, media, quality, results):
@@ -65,7 +68,7 @@ class Base(TorrentMagnetProvider):
pass
entries = results_table.find_all('tr')
for result in entries[2:]:
for result in entries[1:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
@@ -109,7 +112,11 @@ class Base(TorrentMagnetProvider):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'class': 'nfo'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
description = ''
try:
description = toUnicode(nfo_pre.text)
except:
pass
item['description'] = description
return item
@@ -122,8 +129,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'ThePirateBay',
'description': 'The world\'s largest bittorrent tracker. See <a href="http://fucktimkuik.org/">ThePirateBay</a>',
'description': 'The world\'s largest bittorrent tracker. <a href="http://fucktimkuik.org/">ThePirateBay</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAA3UlEQVQY02P4DwT/YADIZvj//7qnozMYODmtAAusZoCDELDAegYGViZhAWZmRoYoqIDupfhNN1M3dTBEggXWMZg9jZRXV77YxhAOFpjDwMAPMoCXmcHsF1SAQZ6bQY2VgUEbKHClcAYzg3mINEO8jSCD478/DPsZmvqWblu1bOmStes3Pp0ezVDF4Gif0Hfx9///74/ObRZ2YNiZ47C8XIRBxFJR0jbSSUud4f9zAQWn8NTuziAt2zy5xIMM/z8LFX0E+fD/x0MRDCeA1v7Z++Y/FDzyvAtyBxIA+h8A8ZKLeT+lJroAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -1,7 +1,7 @@
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -56,11 +56,12 @@ class Base(TorrentProvider):
full_id = link['href'].replace('details.php?id=', '')
torrent_id = full_id[:6]
name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip()
results.append({
'id': torrent_id,
'name': link.contents[0],
'url': self.urls['download'] % (torrent_id, link.contents[0]),
'name': name,
'url': self.urls['download'] % (torrent_id, name),
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
'seeders': tryInt(cells[8].find('span').contents[0]),
@@ -90,8 +91,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentBytes',
'description': 'See <a href="http://torrentbytes.net">TorrentBytes</a>',
'description': '<a href="http://torrentbytes.net">TorrentBytes</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAeFBMVEUAAAAAAEQAA1QAEmEAKnQALHYAMoEAOokAQpIASYsASZgAS5UATZwATosATpgAVJ0AWZwAYZ4AZKAAaZ8Ab7IAcbMAfccAgcQAgcsAhM4AiscAjMkAmt0AoOIApecAp/EAqvQAs+kAt+wA3P8A4f8A//8VAAAfDbiaAl08AAAAjUlEQVQYGQXBO04DQRAFwHqz7Z8sECIl5f73ISRD5GBs7UxTlWfg9vYXnvJRQJqOL88D6BAwJtMMumHUVCl60aa6H93IrIv0b+157f1lpk+fm87lMWrZH0vncKbXdRUQrRmrh9C6Iwkq6rg4PXZcyXmbizzeV/g+rDra0rGve8jPKLSOJNi2AQAwAGjwD7ApPkEHdtPQAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -1,3 +1,4 @@
import re
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -8,19 +9,19 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://www.td.af/',
'login': 'http://www.td.af/torrents/',
'login_check': 'http://www.torrentday.com/userdetails.php',
'detail': 'http://www.td.af/details.php?id=%s',
'search': 'http://www.td.af/V3/API/API.php',
'download': 'http://www.td.af/download.php/%s/%s',
'test': 'https://torrentday.eu/',
'login': 'https://torrentday.eu/torrents/',
'login_check': 'https://torrentday.eu/userdetails.php',
'detail': 'https://torrentday.eu/details.php?id=%s',
'search': 'https://torrentday.eu/V3/API/API.php',
'download': 'https://torrentday.eu/download.php/%s/%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
query = self.buildUrl(media)
query = '"%s" %s' % (title, media['info']['year'])
data = {
'/browse.php?': None,
@@ -55,6 +56,10 @@ class Base(TorrentProvider):
}
def loginSuccess(self, output):
often = re.search('You tried too often, please wait .*</div>', output)
if often:
raise Exception(often.group(0)[:-6].strip())
return 'Password not correct' not in output
def loginCheckSuccess(self, output):
@@ -68,8 +73,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentDay',
'description': 'See <a href="http://www.td.af/">TorrentDay</a>',
'description': '<a href="https://torrentday.eu/">TorrentDay</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -13,20 +13,20 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://www.torrentleech.org/',
'login': 'http://www.torrentleech.org/user/account/login/',
'login_check': 'http://torrentleech.org/user/messages',
'detail': 'http://www.torrentleech.org/torrent/%s',
'search': 'http://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d',
'download': 'http://www.torrentleech.org%s',
'test': 'https://www.torrentleech.org/',
'login': 'https://www.torrentleech.org/user/account/login/',
'login_check': 'https://torrentleech.org/user/messages',
'detail': 'https://www.torrentleech.org/torrent/%s',
'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%s',
'download': 'https://www.torrentleech.org%s',
}
http_time_between_calls = 1 # Seconds
cat_backup_id = None
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
url = self.urls['search'] % self.buildUrl(media, quality)
url = self.urls['search'] % self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
@@ -80,8 +80,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentLeech',
'description': 'See <a href="http://torrentleech.org">TorrentLeech</a>',
'description': '<a href="http://torrentleech.org">TorrentLeech</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -134,6 +134,7 @@ config = [{
'order': 10,
'description': 'CouchPotato torrent provider. Checkout <a href="https://github.com/RuudBurger/CouchPotatoServer/wiki/CouchPotato-Torrent-Provider">the wiki page about this provider</a> for more info.',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABSElEQVR4AZ2Nz0oCURTGv8t1YMpqUxt9ARFxoQ/gQtppgvUKcu/sxB5iBJkogspaBC6iVUplEC6kv+oiiKDNhAtt16roP0HQgdsMLgaxfvy4nHP4Pi48qE2g4v91JOqT1CH/UnA7w7icUlLawyEdj+ZI/7h6YluWbRiddHonHh9M70aj7VTKzuXuikUMci/EO/ACnAI15599oAk8AR/AgxBQNCzreD7bmpl+FOIVuAHqQDUcJo+AK+CZFKLt95/MpSmMt0TiW9POxse6UvYZ6zB2wFgjFiNpOGesR0rZ0PVPXf8KhUCl22CwClz4eN8weoZBb9c0bdPsOWvHx/cYu9Y0CoNoZTJrwAbn5DrnZc6XOV+igVbnsgo0IxEomlJuA1vUIYGyq3PZBChwmExCUSmVZgMBDIUCK4UCFIv5vHIhm/XUDeAf/ADbcpd5+aXSWQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -13,12 +13,12 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://torrentshack.net/',
'login': 'https://torrentshack.net/login.php',
'login_check': 'https://torrentshack.net/inbox.php',
'detail': 'https://torrentshack.net/torrent/%s',
'search': 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download': 'https://torrentshack.net/%s',
'test': 'https://theshack.us.to/',
'login': 'https://theshack.us.to/login.php',
'login_check': 'https://theshack.us.to/inbox.php',
'detail': 'https://theshack.us.to/torrent/%s',
'search': 'https://theshack.us.to/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download': 'https://theshack.us.to/%s',
}
http_time_between_calls = 1 # Seconds
@@ -42,15 +42,17 @@ class Base(TorrentProvider):
link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')
size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ')
tds = result.find_all('td')
results.append({
'id': link['href'].replace('torrents.php?torrentid=', ''),
'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(result.find_all('td')[4].string),
'seeders': tryInt(result.find_all('td')[6].string),
'leechers': tryInt(result.find_all('td')[7].string),
'size': self.parseSize(size),
'seeders': tryInt(tds[len(tds)-2].string),
'leechers': tryInt(tds[len(tds)-1].string),
})
except:
@@ -80,7 +82,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentShack',
'description': 'See <a href="https://www.torrentshack.net/">TorrentShack</a>',
'description': '<a href="http://torrentshack.eu/">TorrentShack</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -22,12 +22,12 @@ class Base(TorrentMagnetProvider, RSS):
http_time_between_calls = 0
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']
# Create search parameters
search_params = self.buildUrl(media)
search_params = self.buildUrl(title, media, quality)
smin = quality.get('size_min')
smax = quality.get('size_max')
@@ -80,11 +80,12 @@ config = [{
'name': 'Torrentz',
'description': 'Torrentz is a free, fast and powerful meta-search engine. <a href="https://torrentz.eu/">Torrentz</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
'default': True
},
{
'name': 'verified_only',

View File

@@ -11,19 +11,16 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': '%s/api',
'search': '%s/api/list.json?keywords=%s&quality=%s',
'detail': '%s/api/movie.json?id=%s'
'test': '%s/api/v2',
'search': '%s/api/v2/list_movies.json?limit=50&query_term=%s'
}
http_time_between_calls = 1 # seconds
proxy_list = [
'http://yify.unlocktorrent.com',
'http://yify-torrents.com.come.in',
'http://yts.re',
'http://yts.im'
'http://yify-torrents.im',
'https://yts.re',
'https://yts.wf',
'https://yts.im',
]
def search(self, movie, quality):
@@ -35,30 +32,35 @@ class Base(TorrentProvider):
def _search(self, movie, quality, results):
search_url = self.urls['search'] % (self.getDomain(), getIdentifier(movie), quality['identifier'])
domain = self.getDomain()
if not domain:
return
search_url = self.urls['search'] % (domain, getIdentifier(movie))
data = self.getJsonData(search_url)
data = data.get('data')
if data and data.get('MovieList'):
if isinstance(data, dict) and data.get('movies'):
try:
for result in data.get('MovieList'):
for result in data.get('movies'):
try:
title = result['TorrentUrl'].split('/')[-1][:-8].replace('_', '.').strip('._')
title = title.replace('.-.', '-')
title = title.replace('..', '.')
except:
continue
for release in result.get('torrents', []):
results.append({
'id': result['MovieID'],
'name': title,
'url': result['TorrentMagnetUrl'],
'detail_url': self.urls['detail'] % (self.getDomain(), result['MovieID']),
'size': self.parseSize(result['Size']),
'seeders': tryInt(result['TorrentSeeds']),
'leechers': tryInt(result['TorrentPeers'])
})
if release['quality'] and release['quality'] not in result['title_long']:
title = result['title_long'] + ' BRRip ' + release['quality']
else:
title = result['title_long'] + ' BRRip'
results.append({
'id': release['hash'],
'name': title,
'url': release['url'],
'detail_url': result['url'],
'size': self.parseSize(release['size']),
'seeders': tryInt(release['seeds']),
'leechers': tryInt(release['peers']),
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
@@ -77,6 +79,7 @@ config = [{
'name': 'Yify',
'description': 'Free provider, less accurate. Small HD movies, encoded by <a href="https://yify-torrents.com/">Yify</a>.',
'wizard': False,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqGiQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+WgEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mYYbjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9KumpjgvwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeROst0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nEKPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9eIlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTyiGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -1,6 +1,6 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.helpers.variable import mergeDicts, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -35,12 +35,21 @@ class Search(Plugin):
elif isinstance(types, (list, tuple, set)):
types = list(types)
imdb_identifier = getImdb(q)
if not types:
result = fireEvent('info.search', q = q, merge = True)
if imdb_identifier:
result = fireEvent('movie.info', identifier = imdb_identifier, merge = True)
result = {result['type']: [result]}
else:
result = fireEvent('info.search', q = q, merge = True)
else:
result = {}
for media_type in types:
result[media_type] = fireEvent('%s.search' % media_type)
if imdb_identifier:
result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier)
else:
result[media_type] = fireEvent('%s.search' % media_type, q = q)
return mergeDicts({
'success': True,

View File

@@ -1,278 +0,0 @@
.search_form {
display: inline-block;
vertical-align: middle;
position: absolute;
right: 105px;
top: 0;
text-align: right;
height: 100%;
transition: all .4s cubic-bezier(0.9,0,0.1,1);
position: absolute;
z-index: 20;
border: 0 solid transparent;
border-bottom-width: 4px;
}
.search_form:hover {
border-color: #047792;
}
@media all and (max-width: 480px) {
.search_form {
right: 44px;
}
}
.search_form.focused,
.search_form.shown {
border-color: #04bce6;
}
.search_form .input {
height: 100%;
overflow: hidden;
width: 45px;
transition: all .4s cubic-bezier(0.9,0,0.1,1);
}
.search_form.focused .input,
.search_form.shown .input {
width: 380px;
background: #4e5969;
}
.search_form .input input {
border-radius: 0;
display: block;
border: 0;
background: none;
color: #FFF;
font-size: 25px;
height: 100%;
width: 100%;
opacity: 0;
padding: 0 40px 0 10px;
transition: all .4s ease-in-out .2s;
}
.search_form.focused .input input,
.search_form.shown .input input {
opacity: 1;
}
.search_form input::-ms-clear {
width : 0;
height: 0;
}
@media all and (max-width: 480px) {
.search_form .input input {
font-size: 15px;
}
.search_form.focused .input,
.search_form.shown .input {
width: 277px;
}
}
.search_form .input a {
position: absolute;
top: 0;
right: 0;
width: 44px;
height: 100%;
cursor: pointer;
vertical-align: middle;
text-align: center;
line-height: 66px;
font-size: 15px;
color: #FFF;
}
.search_form .input a:after {
content: "\e03e";
}
.search_form.shown.filled .input a:after {
content: "\e04e";
}
@media all and (max-width: 480px) {
.search_form .input a {
line-height: 44px;
}
}
.search_form .results_container {
text-align: left;
position: absolute;
background: #5c697b;
margin: 4px 0 0;
width: 470px;
min-height: 50px;
box-shadow: 0 20px 20px -10px rgba(0,0,0,0.55);
display: none;
}
@media all and (max-width: 480px) {
.search_form .results_container {
width: 320px;
}
}
.search_form.focused.filled .results_container,
.search_form.shown.filled .results_container {
display: block;
}
.search_form .results {
max-height: 570px;
overflow-x: hidden;
}
.media_result {
overflow: hidden;
height: 50px;
position: relative;
}
.media_result .options {
position: absolute;
height: 100%;
top: 0;
left: 30px;
right: 0;
padding: 13px;
border: 1px solid transparent;
border-width: 1px 0;
border-radius: 0;
box-shadow: inset 0 1px 8px rgba(0,0,0,0.25);
}
.media_result .options > .in_library_wanted {
margin-top: -7px;
}
.media_result .options > div {
border: 0;
}
.media_result .options .thumbnail {
vertical-align: middle;
}
.media_result .options select {
vertical-align: middle;
display: inline-block;
margin-right: 10px;
}
.media_result .options select[name=title] { width: 170px; }
.media_result .options select[name=profile] { width: 90px; }
.media_result .options select[name=category] { width: 80px; }
@media all and (max-width: 480px) {
.media_result .options select[name=title] { width: 90px; }
.media_result .options select[name=profile] { width: 50px; }
.media_result .options select[name=category] { width: 50px; }
}
.media_result .options .button {
vertical-align: middle;
display: inline-block;
}
.media_result .options .message {
height: 100%;
font-size: 20px;
color: #fff;
line-height: 20px;
}
.media_result .data {
position: absolute;
height: 100%;
top: 0;
left: 30px;
right: 0;
background: #5c697b;
cursor: pointer;
border-top: 1px solid rgba(255,255,255, 0.08);
transition: all .4s cubic-bezier(0.9,0,0.1,1);
}
.media_result .data.open {
left: 100% !important;
}
.media_result:last-child .data { border-bottom: 0; }
.media_result .in_wanted, .media_result .in_library {
position: absolute;
bottom: 2px;
left: 14px;
font-size: 11px;
}
.media_result .thumbnail {
width: 34px;
min-height: 100%;
display: block;
margin: 0;
vertical-align: top;
}
.media_result .info {
position: absolute;
top: 20%;
left: 15px;
right: 7px;
vertical-align: middle;
}
.media_result .info h2 {
margin: 0;
font-weight: normal;
font-size: 20px;
padding: 0;
}
.search_form .info h2 {
position: absolute;
width: 100%;
}
.media_result .info h2 .title {
display: block;
margin: 0;
text-overflow: ellipsis;
overflow: hidden;
white-space: nowrap;
}
.search_form .info h2 .title {
position: absolute;
width: 88%;
}
.media_result .info h2 .year {
padding: 0 5px;
text-align: center;
position: absolute;
width: 12%;
right: 0;
}
@media all and (max-width: 480px) {
.search_form .info h2 .year {
font-size: 12px;
margin-top: 7px;
}
}
.search_form .mask,
.media_result .mask {
position: absolute;
height: 100%;
width: 100%;
left: 0;
top: 0;
}

View File

@@ -1,4 +1,4 @@
Block.Search = new Class({
var BlockSearch = new Class({
Extends: BlockBase,
@@ -9,42 +9,46 @@ Block.Search = new Class({
var focus_timer = 0;
self.el = new Element('div.search_form').adopt(
new Element('div.input').adopt(
self.input = new Element('input', {
'placeholder': 'Search & add a new media',
new Element('a.icon-search', {
'events': {
'click': self.clear.bind(self),
'touchend': self.clear.bind(self)
}
}),
new Element('div.wrapper').adopt(
self.result_container = new Element('div.results_container', {
'tween': {
'duration': 200
},
'events': {
'keyup': self.keyup.bind(self),
'focus': function(){
if(focus_timer) clearTimeout(focus_timer);
self.el.addClass('focused');
if(this.get('value'))
self.hideResults(false)
},
'blur': function(){
focus_timer = (function(){
self.el.removeClass('focused')
}).delay(100);
'mousewheel': function(e){
(e).stopPropagation();
}
}
}),
new Element('a.icon2', {
'events': {
'click': self.clear.bind(self),
'touchend': self.clear.bind(self)
}
})
),
self.result_container = new Element('div.results_container', {
'tween': {
'duration': 200
},
'events': {
'mousewheel': function(e){
(e).stopPropagation();
}
}
}).adopt(
self.results = new Element('div.results')
}).grab(
self.results = new Element('div.results')
),
new Element('div.input').grab(
self.input = new Element('input', {
'placeholder': 'Search & add a new media',
'events': {
'input': self.keyup.bind(self),
'paste': self.keyup.bind(self),
'change': self.keyup.bind(self),
'keyup': self.keyup.bind(self),
'focus': function(){
if(focus_timer) clearTimeout(focus_timer);
if(this.get('value'))
self.hideResults(false);
},
'blur': function(){
focus_timer = (function(){
self.el.removeClass('focused');
}).delay(100);
}
}
})
)
)
);
@@ -64,11 +68,12 @@ Block.Search = new Class({
self.last_q = '';
self.input.set('value', '');
self.el.addClass('focused');
self.input.focus();
self.media = {};
self.results.empty();
self.el.removeClass('filled')
self.el.removeClass('filled');
}
},
@@ -102,7 +107,7 @@ Block.Search = new Class({
self.api_request.cancel();
if(self.autocomplete_timer) clearTimeout(self.autocomplete_timer);
self.autocomplete_timer = self.autocomplete.delay(300, self)
self.autocomplete_timer = self.autocomplete.delay(300, self);
}
},
@@ -112,10 +117,10 @@ Block.Search = new Class({
if(!self.q()){
self.hideResults(true);
return
return;
}
self.list()
self.list();
},
list: function(){
@@ -136,7 +141,7 @@ Block.Search = new Class({
'q': q
},
'onComplete': self.fill.bind(self, q)
})
});
}
else
self.fill(q, cache);
@@ -155,30 +160,25 @@ Block.Search = new Class({
Object.each(json, function(media){
if(typeOf(media) == 'array'){
Object.each(media, function(m){
Object.each(media, function(me){
var m = new Block.Search[m.type.capitalize() + 'Item'](m);
var m = new window['BlockSearch' + me.type.capitalize() + 'Item'](me);
$(m).inject(self.results);
self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m;
if(q == m.imdb)
m.showOptions()
m.showOptions();
});
}
});
// Calculate result heights
var w = window.getSize(),
rc = self.result_container.getCoordinates();
self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px');
self.mask.fade('out')
self.mask.fade('out');
},
loading: function(bool){
this.el[bool ? 'addClass' : 'removeClass']('loading')
this.el[bool ? 'addClass' : 'removeClass']('loading');
},
q: function(){

View File

@@ -0,0 +1,242 @@
@import "couchpotato/static/style/mixins";
.search_form {
display: inline-block;
z-index: 200;
width: 44px;
position: relative;
.icon-search {
position: absolute;
z-index: 2;
top: 50%;
left: 0;
height: 100%;
cursor: pointer;
text-align: center;
color: #FFF;
font-size: 20px;
@include translateY(-50%);
}
.wrapper {
position: absolute;
left: 44px;
bottom: 0;
background: $primary_color;
border-radius: $border_radius 0 0 $border_radius;
display: none;
box-shadow: 0 0 15px 2px rgba(0,0,0,.15);
&:before {
@include transform(rotate(45deg));
content: '';
display: block;
position: absolute;
height: 10px;
width: 10px;
background: $primary_color;
left: -6px;
bottom: 16px;
z-index: 1;
}
}
.input {
background: $background_color;
border-radius: $border_radius 0 0 $border_radius;
position: relative;
left: 4px;
height: 44px;
overflow: hidden;
width: 100%;
input {
position: absolute;
top: 0;
left: 0;
height: 100%;
width: 100%;
z-index: 1;
&::-ms-clear {
width : 0;
height: 0;
}
}
}
&.focused,
&.shown {
border-color: #04bce6;
.wrapper {
display: block;
width: 380px;
}
.input {
input {
opacity: 1;
}
}
}
.results_container {
min-height: 50px;
text-align: left;
position: relative;
left: 4px;
display: none;
background: $background_color;
border-radius: $border_radius 0 0 0;
overflow: hidden;
.results {
max-height: 280px;
overflow-x: hidden;
.media_result {
overflow: hidden;
height: 50px;
position: relative;
.options {
position: absolute;
height: 100%;
top: 0;
left: 30px;
right: 0;
padding: 10px;
background: rgba(0,0,0,.3);
> .in_library_wanted {
margin-top: -7px;
}
> div {
border: 0;
@include flexbox();
}
.thumbnail {
vertical-align: middle;
}
select {
vertical-align: middle;
display: inline-block;
margin-right: 10px;
min-width: 70px;
@include flex(1 auto);
}
.button {
@include flex(1 auto);
vertical-align: middle;
display: inline-block;
}
.message {
height: 100%;
font-size: 20px;
color: #fff;
line-height: 20px;
}
}
.thumbnail {
width: 30px;
min-height: 100%;
display: block;
margin: 0;
vertical-align: top;
}
.data {
position: absolute;
height: 100%;
top: 0;
left: 30px;
right: 0;
cursor: pointer;
border-top: 1px solid rgba(255,255,255, 0.08);
transition: all .4s cubic-bezier(0.9,0,0.1,1);
@include translateX(0%);
background: $background_color;
&.open {
@include translateX(100%);
}
.in_wanted,
.in_library {
position: absolute;
bottom: 2px;
left: 14px;
font-size: 11px;
}
.info {
position: absolute;
top: 20%;
left: 15px;
right: 7px;
vertical-align: middle;
h2 {
margin: 0;
font-weight: 300;
font-size: 1.25em;
padding: 0;
position: absolute;
width: 100%;
@include flexbox();
.title {
display: inline-block;
margin: 0;
text-overflow: ellipsis;
overflow: hidden;
white-space: nowrap;
@include flex(1 auto);
}
.year {
opacity: .4;
padding: 0 5px;
width: auto;
display: none;
}
}
}
}
&:hover .info h2 .year {
display: inline-block;
}
&:last-child .data {
border-bottom: 0;
}
}
}
}
&.focused.filled,
&.shown.filled {
.results_container {
display: block;
}
.input {
border-radius: 0 0 0 $border_radius;
}
}
}

View File

@@ -73,4 +73,24 @@ config = [{
],
},
],
}, {
'name': 'torrent',
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'wizard': True,
'options': [
{
'name': 'minimum_seeders',
'advanced': True,
'label': 'Minimum seeders',
'description': 'Ignore torrents with seeders below this number',
'default': 1,
'type': 'int',
'unit': 'seeders'
},
],
},
],
}]

View File

@@ -87,31 +87,23 @@ class Searcher(SearcherBase):
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None):
if not preferred_quality: preferred_quality = {}
name = nzb['name']
size = nzb.get('size', 0)
nzb_words = re.split('\W+', simplifyString(name))
qualities = fireEvent('quality.all', single = True)
found = {}
for quality in qualities:
# Main in words
if quality['identifier'] in nzb_words:
found[quality['identifier']] = True
# Alt in words
if list(set(nzb_words) & set(quality['alternative'])):
found[quality['identifier']] = True
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
guess = fireEvent('quality.guess', files = [nzb.get('name')], size = nzb.get('size', None), single = True)
if guess:
found[guess['identifier']] = True
# Hack for older movies that don't contain quality tag
name = nzb['name']
size = nzb.get('size', 0)
year_name = fireEvent('scanner.name_year', name, single = True)
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
if size > 3000: # Assume dvdr
if size > 20000: # Assume bd50
log.info('Quality was missing in name, assuming it\'s a BR-Disk based on the size: %s', size)
found['bd50'] = True
elif size > 3000: # Assume dvdr
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size)
found['dvdr'] = True
else: # Assume dvdrip
@@ -123,7 +115,10 @@ class Searcher(SearcherBase):
if found.get(allowed):
del found[allowed]
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
if found.get(preferred_quality['identifier']) and len(found) == 1:
return False
return found
def correct3D(self, nzb, preferred_quality = None):
if not preferred_quality: preferred_quality = {}
@@ -134,7 +129,11 @@ class Searcher(SearcherBase):
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
return threed == guess.get('is_3d')
if guess:
return threed == guess.get('is_3d')
# If no quality guess, assume not 3d
else:
return threed == False
def correctYear(self, haystack, year, year_range):
@@ -179,6 +178,25 @@ class Searcher(SearcherBase):
return False
def containsWords(self, rel_name, rel_words, conf, media):
# Make sure it has required words
words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower())
try: words = removeDuplicate(words + splitString(media['category'][conf].lower()))
except: pass
req_match = 0
for req_set in words:
if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//':
if re.search(req_set[1:-1], rel_name):
log.debug('Regex match: %s', req_set[1:-1])
req_match += 1
else:
req = splitString(req_set, '&')
req_match += len(list(set(rel_words) & set(req))) == len(req)
return words, req_match > 0
def correctWords(self, rel_name, media):
media_title = fireEvent('searcher.get_search_title', media, single = True)
media_words = re.split('\W+', simplifyString(media_title))
@@ -186,31 +204,13 @@ class Searcher(SearcherBase):
rel_name = simplifyString(rel_name)
rel_words = re.split('\W+', rel_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = removeDuplicate(required_words + splitString(media['category']['required'].lower()))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(rel_words) & set(req))) == len(req)
if len(required_words) > 0 and req_match == 0:
required_words, contains_required = self.containsWords(rel_name, rel_words, 'required', media)
if len(required_words) > 0 and not contains_required:
log.info2('Wrong: Required word missing: %s', rel_name)
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = removeDuplicate(ignored_words + splitString(media['category']['ignored'].lower()))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored)
if len(ignored_words) > 0 and ignored_match:
ignored_words, contains_ignored = self.containsWords(rel_name, rel_words, 'ignored', media)
if len(ignored_words) > 0 and contains_ignored:
log.info2("Wrong: '%s' contains 'ignored words'", rel_name)
return False

80
couchpotato/core/media/movie/_base/main.py Normal file → Executable file
View File

@@ -1,7 +1,7 @@
import os
import traceback
import time
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
@@ -27,6 +27,10 @@ class MovieBase(MovieTypeBase):
addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'return': {'type': 'object', 'example': """{
'success': True,
'movie': object
}"""},
'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
@@ -45,7 +49,7 @@ class MovieBase(MovieTypeBase):
})
addEvent('movie.add', self.add)
addEvent('movie.update_info', self.updateInfo)
addEvent('movie.update', self.update)
addEvent('movie.update_release_dates', self.updateReleaseDate)
def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
@@ -61,7 +65,7 @@ class MovieBase(MovieTypeBase):
return False
elif not params.get('info'):
try:
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True)
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True)
if not is_movie:
msg = 'Can\'t add movie, seems to be a TV show.'
log.error(msg)
@@ -90,7 +94,7 @@ class MovieBase(MovieTypeBase):
# Default profile and category
default_profile = {}
if not params.get('profile_id'):
if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id')
@@ -105,7 +109,7 @@ class MovieBase(MovieTypeBase):
'imdb': params.get('identifier')
},
'status': status if status else 'active',
'profile_id': params.get('profile_id', default_profile.get('_id')),
'profile_id': params.get('profile_id') or default_profile.get('_id'),
'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
}
@@ -117,8 +121,17 @@ class MovieBase(MovieTypeBase):
media['info'] = info
new = False
previous_profile = None
try:
m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']
try:
db.get('id', m.get('profile_id'))
previous_profile = m.get('profile_id')
except RecordNotFound:
pass
except:
log.error('Failed getting previous profile: %s', traceback.format_exc())
except:
new = True
m = db.insert(media)
@@ -139,16 +152,16 @@ class MovieBase(MovieTypeBase):
# Clean snatched history
for release in fireEvent('release.for_media', m['_id'], single = True):
if release.get('status') in ['downloaded', 'snatched', 'done']:
if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
if params.get('ignore_previous', False):
release['status'] = 'ignored'
db.update(release)
fireEvent('release.update_status', release['_id'], status = 'ignored')
else:
fireEvent('release.delete', release['_id'], single = True)
m['profile_id'] = params.get('profile_id', default_profile.get('id'))
m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
m['last_edit'] = int(time.time())
m['tags'] = []
do_search = True
db.update(m)
@@ -161,7 +174,7 @@ class MovieBase(MovieTypeBase):
# Trigger update info
if added and update_after:
# Do full update to get images etc
fireEventAsync('movie.update_info', m['_id'], default_title = params.get('title'), on_complete = onComplete)
fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete)
# Remove releases
for rel in fireEvent('release.for_media', m['_id'], single = True):
@@ -169,6 +182,9 @@ class MovieBase(MovieTypeBase):
db.delete(rel)
movie_dict = fireEvent('media.get', m['_id'], single = True)
if not movie_dict:
log.debug('Failed adding media, can\'t find it anymore')
return False
if do_search and search_after:
onComplete = self.createOnComplete(m['_id'])
@@ -225,7 +241,7 @@ class MovieBase(MovieTypeBase):
db.update(m)
fireEvent('media.restatus', m['_id'])
fireEvent('media.restatus', m['_id'], single = True)
m = db.get('id', media_id)
@@ -245,7 +261,7 @@ class MovieBase(MovieTypeBase):
'success': False,
}
def updateInfo(self, media_id = None, identifier = None, default_title = None, extended = False):
def update(self, media_id = None, identifier = None, default_title = None, extended = False):
"""
Update movie information inside media['doc']['info']
@@ -258,6 +274,10 @@ class MovieBase(MovieTypeBase):
if self.shuttingDown():
return
lock_key = 'media.get.%s' % media_id if media_id else identifier
self.acquireLock(lock_key)
media = {}
try:
db = get_db()
@@ -301,42 +321,16 @@ class MovieBase(MovieTypeBase):
media['title'] = def_title
# Files
images = info.get('images', [])
media['files'] = media.get('files', {})
for image_type in ['poster']:
image_urls = info.get('images', [])
# Remove non-existing files
file_type = 'image_%s' % image_type
existing_files = list(set(media['files'].get(file_type, [])))
for ef in media['files'].get(file_type, []):
if not os.path.isfile(ef):
existing_files.remove(ef)
# Replace new files list
media['files'][file_type] = existing_files
if len(existing_files) == 0:
del media['files'][file_type]
# Loop over type
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
if file_type not in media['files'] or len(media['files'].get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
media['files'][file_type] = [file_path]
break
else:
break
self.getPoster(media, image_urls)
db.update(media)
return media
except:
log.error('Failed update media: %s', traceback.format_exc())
return {}
self.releaseLock(lock_key)
return media
def updateReleaseDate(self, media_id):
"""
@@ -352,7 +346,7 @@ class MovieBase(MovieTypeBase):
media = db.get('id', media_id)
if not media.get('info'):
media = self.updateInfo(media_id)
media = self.update(media_id)
dates = media.get('info', {}).get('release_date')
else:
dates = media.get('info').get('release_date')

View File

@@ -0,0 +1,52 @@
var MovieDetails = new Class({
Extends: BlockBase,
sections: null,
initialize: function(parent, options){
var self = this;
self.sections = {};
self.el = new Element('div',{
'class': 'page active movie_details level_' + (options.level || 0)
}).adopt(
self.overlay = new Element('div.overlay', {
'events': {
'click': self.close.bind(self)
}
}).grab(
new Element('a.close.icon-left-arrow')
),
self.content = new Element('div.content').grab(
new Element('h1', {
'text': parent.getTitle() + (parent.get('year') ? ' (' + parent.get('year') + ')' : '')
})
)
);
self.addSection('description', new Element('div', {
'text': parent.get('plot')
}));
},
addSection: function(name, section_el){
var self = this;
name = name.toLowerCase();
self.content.grab(
self.sections[name] = new Element('div', {
'class': 'section section_' + name
}).grab(section_el)
);
},
close: function(){
var self = this;
self.el.dispose();
}
});

View File

@@ -45,15 +45,16 @@ var MovieList = new Class({
}) : null
);
if($(window).getSize().x <= 480 && !self.options.force_view)
self.changeView('list');
else
self.changeView(self.getSavedView() || self.options.view || 'details');
self.changeView(self.getSavedView() || self.options.view || 'thumb');
// Create the alphabet nav
if(self.options.navigation)
self.createNavigation();
self.getMovies();
App.on('movie.added', self.movieAdded.bind(self));
App.on('movie.deleted', self.movieDeleted.bind(self))
App.on('movie.deleted', self.movieDeleted.bind(self));
},
movieDeleted: function(notification){
@@ -67,7 +68,7 @@ var MovieList = new Class({
self.setCounter(self.counter_count-1);
self.total_movies--;
}
})
});
}
self.checkIfEmpty();
@@ -89,15 +90,11 @@ var MovieList = new Class({
create: function(){
var self = this;
// Create the alphabet nav
if(self.options.navigation)
self.createNavigation();
if(self.options.load_more)
self.scrollspy = new ScrollSpy({
min: function(){
var c = self.load_more.getCoordinates();
return c.top - window.document.getSize().y - 300
return c.top - window.document.getSize().y - 300;
},
onEnter: self.loadMore.bind(self)
});
@@ -138,7 +135,7 @@ var MovieList = new Class({
self.empty_message = null;
}
if(self.total_movies && count == 0 && !self.empty_message){
if(self.total_movies && count === 0 && !self.empty_message){
var message = (self.filter.search ? 'for "'+self.filter.search+'"' : '') +
(self.filter.starts_with ? ' in <strong>'+self.filter.starts_with+'</strong>' : '');
@@ -230,30 +227,33 @@ var MovieList = new Class({
),
new Element('div.menus').adopt(
self.navigation_counter = new Element('span.counter[title=Total]'),
self.filter_menu = new Block.Menu(self, {
'class': 'filter'
self.filter_menu = new BlockMenu(self, {
'class': 'filter',
'button_class': 'icon-filter'
}),
self.navigation_actions = new Element('ul.actions', {
self.navigation_actions = new Element('div.actions', {
'events': {
'click:relay(li)': function(e, el){
'click': function(e, el){
(e).stop();
var new_view = self.current_view == 'list' ? 'thumb' : 'list';
var a = 'active';
self.navigation_actions.getElements('.'+a).removeClass(a);
self.changeView(el.get('data-view'));
this.addClass(a);
self.changeView(new_view);
self.navigation_actions.getElement('[data-view='+new_view+']')
.addClass(a);
el.inject(el.getParent(), 'top');
el.getSiblings().hide();
setTimeout(function(){
el.getSiblings().setStyle('display', null);
}, 100)
}
}
}),
self.navigation_menu = new Block.Menu(self, {
'class': 'extra'
self.navigation_menu = new BlockMenu(self, {
'class': 'extra',
'button_class': 'icon-dots'
})
)
).inject(self.el, 'top');
);
// Mass edit
self.mass_edit_select_class = new Form.Check(self.mass_edit_select);
@@ -261,7 +261,7 @@ var MovieList = new Class({
new Element('option', {
'value': profile.get('_id'),
'text': profile.get('label')
}).inject(self.mass_edit_quality)
}).inject(self.mass_edit_quality);
});
self.filter_menu.addLink(
@@ -273,7 +273,7 @@ var MovieList = new Class({
'change': self.search.bind(self)
}
})
).addClass('search');
).addClass('search icon-search');
var available_chars;
self.filter_menu.addEvent('open', function(){
@@ -289,8 +289,8 @@ var MovieList = new Class({
available_chars = json.chars;
available_chars.each(function(c){
self.letters[c.capitalize()].addClass('available')
})
self.letters[c.capitalize()].addClass('available');
});
}
});
@@ -301,23 +301,23 @@ var MovieList = new Class({
'events': {
'click:relay(li.available)': function(e, el){
self.activateLetter(el.get('data-letter'));
self.getMovies(true)
self.getMovies(true);
}
}
})
);
// Actions
['mass_edit', 'details', 'list'].each(function(view){
['thumb', 'list'].each(function(view){
var current = self.current_view == view;
new Element('li', {
'class': 'icon2 ' + view + (current ? ' active ' : ''),
new Element('a', {
'class': 'icon-' + view + (current ? ' active ' : ''),
'data-view': view
}).inject(self.navigation_actions, current ? 'top' : 'bottom');
});
// All
self.letters['all'] = new Element('li.letter_all.available.active', {
self.letters.all = new Element('li.letter_all.available.active', {
'text': 'ALL'
}).inject(self.navigation_alpha);
@@ -346,7 +346,7 @@ var MovieList = new Class({
var selected = 0,
movies = self.movies.length;
self.movies.each(function(movie){
selected += movie.isSelected() ? 1 : 0
selected += movie.isSelected() ? 1 : 0;
});
var indeterminate = selected > 0 && selected < movies,
@@ -373,6 +373,7 @@ var MovieList = new Class({
(e).preventDefault();
this.set('text', 'Deleting..');
Api.request('media.delete', {
'method': 'post',
'data': {
'id': ids.join(','),
'delete_from': self.options.identifier
@@ -413,6 +414,7 @@ var MovieList = new Class({
var ids = self.getSelectedMovies();
Api.request('movie.edit', {
'method': 'post',
'data': {
'id': ids.join(','),
'profile_id': self.mass_edit_quality.get('value')
@@ -426,6 +428,7 @@ var MovieList = new Class({
var ids = self.getSelectedMovies();
Api.request('media.refresh', {
'method': 'post',
'data': {
'id': ids.join(',')
}
@@ -438,10 +441,10 @@ var MovieList = new Class({
var ids = [];
self.movies.each(function(movie){
if (movie.isSelected())
ids.include(movie.get('_id'))
ids.include(movie.get('_id'));
});
return ids
return ids;
},
massEditToggleAll: function(){
@@ -450,10 +453,10 @@ var MovieList = new Class({
var select = self.mass_edit_select.get('checked');
self.movies.each(function(movie){
movie.select(select)
movie.select(select);
});
self.calculateSelected()
self.calculateSelected();
},
reset: function(){
@@ -490,12 +493,12 @@ var MovieList = new Class({
.addClass(new_view+'_list');
self.current_view = new_view;
Cookie.write(self.options.identifier+'_view2', new_view, {duration: 1000});
Cookie.write(self.options.identifier+'_view3', new_view, {duration: 1000});
},
getSavedView: function(){
var self = this;
return Cookie.read(self.options.identifier+'_view2');
return Cookie.read(self.options.identifier+'_view3');
},
search: function(){
@@ -534,7 +537,7 @@ var MovieList = new Class({
self.load_more.set('text', 'loading...');
}
if(self.movies.length == 0 && self.options.loader){
if(self.movies.length === 0 && self.options.loader){
self.loader_first = new Element('div.loading').adopt(
new Element('div.message', {'text': self.options.title ? 'Loading \'' + self.options.title + '\'' : 'Loading...'})
@@ -587,7 +590,7 @@ var MovieList = new Class({
loadMore: function(){
var self = this;
if(self.offset >= self.options.limit)
self.getMovies()
self.getMovies();
},
store: function(movies){
@@ -600,7 +603,7 @@ var MovieList = new Class({
checkIfEmpty: function(){
var self = this;
var is_empty = self.movies.length == 0 && (self.total_movies == 0 || self.total_movies === undefined);
var is_empty = self.movies.length === 0 && (self.total_movies === 0 || self.total_movies === undefined);
if(self.title)
self.title[is_empty ? 'hide' : 'show']();

View File

@@ -1,7 +1,8 @@
Page.Manage = new Class({
var MoviesManage = new Class({
Extends: PageBase,
order: 20,
name: 'manage',
title: 'Do stuff to your existing movies!',
@@ -125,12 +126,12 @@ Page.Manage = new Class({
(folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '')
}),
new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'})
).inject(self.progress_container)
).inject(self.progress_container);
});
}
}
})
});
}, 1000);
},
@@ -140,10 +141,10 @@ Page.Manage = new Class({
for (folder in progress_object) {
if (progress_object.hasOwnProperty(folder)) {
temp_array.push(folder)
temp_array.push(folder);
}
}
return temp_array.stableSort()
return temp_array.stableSort();
}
});

View File

@@ -2,7 +2,10 @@ var MovieAction = new Class({
Implements: [Options],
class_name: 'action icon2',
class_name: 'action',
label: 'UNKNOWN',
button: null,
details: null,
initialize: function(movie, options){
var self = this;
@@ -11,20 +14,33 @@ var MovieAction = new Class({
self.movie = movie;
self.create();
if(self.el)
self.el.addClass(self.class_name)
if(self.button)
self.button.addClass(self.class_name);
},
create: function(){},
getButton: function(){
return this.button || null;
},
getDetails: function(){
return this.details || null;
},
getLabel: function(){
return this.label;
},
disable: function(){
if(this.el)
this.el.addClass('disable')
this.el.addClass('disable');
},
enable: function(){
if(this.el)
this.el.removeClass('disable')
this.el.removeClass('disable');
},
getTitle: function(){
@@ -37,7 +53,7 @@ var MovieAction = new Class({
try {
return self.movie.original_title ? self.movie.original_title : self.movie.titles[0];
}
catch(e){
catch(e2){
return 'Unknown';
}
}
@@ -46,10 +62,10 @@ var MovieAction = new Class({
get: function(key){
var self = this;
try {
return self.movie.get(key)
return self.movie.get(key);
}
catch(e){
return self.movie[key]
return self.movie[key];
}
},
@@ -63,7 +79,7 @@ var MovieAction = new Class({
},
toElement: function(){
return this.el || null
return this.el || null;
}
});
@@ -78,9 +94,10 @@ MA.IMDB = new Class({
create: function(){
var self = this;
self.id = self.movie.get('imdb') || self.movie.get('identifier');
self.id = self.movie.getIdentifier ? self.movie.getIdentifier() : self.get('imdb');
self.el = new Element('a.imdb', {
self.button = new Element('a.imdb', {
'text': 'IMDB',
'title': 'Go to the IMDB page of ' + self.getTitle(),
'href': 'http://www.imdb.com/title/'+self.id+'/',
'target': '_blank'
@@ -94,47 +111,34 @@ MA.IMDB = new Class({
MA.Release = new Class({
Extends: MovieAction,
label: 'Releases',
create: function(){
var self = this;
self.el = new Element('a.releases.download', {
'title': 'Show the releases that are available for ' + self.getTitle(),
'events': {
'click': self.show.bind(self)
}
});
if(!self.movie.data.releases || self.movie.data.releases.length == 0)
self.el.hide();
else
self.showHelper();
App.on('movie.searcher.ended', function(notification){
if(self.movie.data._id != notification.data._id) return;
self.releases = null;
if(self.options_container){
self.options_container.destroy();
self.options_container = null;
// Releases are currently displayed
if(self.options_container.isDisplayed()){
self.options_container.destroy();
self.getDetails();
}
else {
self.options_container.destroy();
self.options_container = null;
}
}
});
},
show: function(e){
var self = this;
if(e)
(e).preventDefault();
self.createReleases();
},
createReleases: function(){
getDetails: function(refresh){
var self = this;
if(!self.options_container){
if(!self.options_container || refresh){
self.options_container = new Element('div.options').grab(
self.release_container = new Element('div.releases.table')
);
@@ -155,14 +159,14 @@ MA.Release = new Class({
var quality = Quality.getQuality(release.quality) || {},
info = release.info || {},
provider = self.get(release, 'provider') + (info['provider_extra'] ? self.get(release, 'provider_extra') : '');
provider = self.get(release, 'provider') + (info.provider_extra ? self.get(release, 'provider_extra') : '');
var release_name = self.get(release, 'name');
if(release.files && release.files.length > 0){
try {
var movie_file = release.files.filter(function(file){
var type = File.Type.get(file.type_id);
return type && type.identifier == 'movie'
return type && type.identifier == 'movie';
}).pick();
release_name = movie_file.path.split(Api.getOption('path_sep')).getLast();
}
@@ -170,19 +174,19 @@ MA.Release = new Class({
}
// Create release
release['el'] = new Element('div', {
release.el = new Element('div', {
'class': 'item '+release.status,
'id': 'release_'+release._id
}).adopt(
new Element('span.name', {'text': release_name, 'title': release_name}),
new Element('span.status', {'text': release.status, 'class': 'release_status '+release.status}),
new Element('span.status', {'text': release.status, 'class': 'status '+release.status}),
new Element('span.quality', {'text': quality.label + (release.is_3d ? ' 3D' : '') || 'n/a'}),
new Element('span.size', {'text': info['size'] ? Math.floor(self.get(release, 'size')) : 'n/a'}),
new Element('span.size', {'text': info.size ? Math.floor(self.get(release, 'size')) : 'n/a'}),
new Element('span.age', {'text': self.get(release, 'age')}),
new Element('span.score', {'text': self.get(release, 'score')}),
new Element('span.provider', { 'text': provider, 'title': provider }),
info['detail_url'] ? new Element('a.info.icon2', {
'href': info['detail_url'],
info.detail_url ? new Element('a.info.icon2', {
'href': info.detail_url,
'target': '_blank'
}) : new Element('a'),
new Element('a.download.icon2', {
@@ -252,7 +256,7 @@ MA.Release = new Class({
self.trynext_container.adopt(
new Element('span.or', {
'text': 'This movie is snatched, if anything went wrong, download'
'text': 'If anything went wrong, download'
}),
lr ? new Element('a.button.orange', {
'text': 'the same release again',
@@ -276,7 +280,7 @@ MA.Release = new Class({
new Element('span.or', {
'text': 'or pick one below'
})] : null
)
);
}
self.last_release = null;
@@ -284,9 +288,7 @@ MA.Release = new Class({
}
// Show it
self.options_container.inject(self.movie, 'top');
self.movie.slide('in', self.options_container);
return self.options_container;
},
@@ -302,7 +304,7 @@ MA.Release = new Class({
self.movie.data.releases.each(function(release){
if(has_available && has_snatched) return;
if(['snatched', 'downloaded', 'seeding'].contains(release.status))
if(['snatched', 'downloaded', 'seeding', 'done'].contains(release.status))
has_snatched = true;
if(['available'].contains(release.status))
@@ -335,13 +337,13 @@ MA.Release = new Class({
'click': self.markMovieDone.bind(self)
}
})
)
);
}
},
get: function(release, type){
return (release.info && release.info[type] !== undefined) ? release.info[type] : 'n/a'
return (release.info && release.info[type] !== undefined) ? release.info[type] : 'n/a';
},
download: function(release){
@@ -379,7 +381,7 @@ MA.Release = new Class({
'data': {
'id': release._id
}
})
});
},
@@ -396,7 +398,7 @@ MA.Release = new Class({
movie.set('tween', {
'duration': 300,
'onComplete': function(){
self.movie.destroy()
self.movie.destroy();
}
});
movie.tween('height', 0);
@@ -422,49 +424,35 @@ MA.Trailer = new Class({
Extends: MovieAction,
id: null,
label: 'Trailer',
create: function(){
getDetails: function(){
var self = this;
self.el = new Element('a.trailer', {
'title': 'Watch the trailer of ' + self.getTitle(),
'events': {
'click': self.watch.bind(self)
}
});
if(!self.player_container){
var id = 'trailer-'+randomString();
self.player_container = new Element('div.icon-play[id='+id+']', {
'events': {
'click': function(e){
self.watch(id);
}
}
});
self.container = new Element('div.trailer_container')
.grab(self.player_container);
}
return self.player_container;
},
watch: function(offset){
watch: function(){
var self = this;
var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18';
var url = data_url.substitute({
var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18',
url = data_url.substitute({
'title': encodeURI(self.getTitle()),
'year': self.get('year'),
'offset': offset || 1
}),
size = $(self.movie).getSize(),
height = self.options.height || (size.x/16)*9,
id = 'trailer-'+randomString();
self.player_container = new Element('div[id='+id+']');
self.container = new Element('div.hide.trailer_container')
.adopt(self.player_container)
.inject($(self.movie), 'top');
self.container.setStyle('height', 0);
self.container.removeClass('hide');
self.close_button = new Element('a.hide.hide_trailer', {
'text': 'Hide trailer',
'events': {
'click': self.stop.bind(self)
}
}).inject(self.movie);
self.container.setStyle('height', height);
$(self.movie).setStyle('height', height);
'year': self.get('year')
});
new Request.JSONP({
'url': url,
@@ -484,8 +472,6 @@ MA.Trailer = new Class({
}
});
self.close_button.removeClass('hide');
var quality_set = false;
var change_quality = function(state){
if(!quality_set && (state.data == 1 || state.data || 2)){
@@ -501,7 +487,9 @@ MA.Trailer = new Class({
self.player.addEventListener('onStateChange', change_quality);
}
}).send()
}).send();
return self.container;
},
@@ -516,7 +504,7 @@ MA.Trailer = new Class({
setTimeout(function(){
self.container.destroy();
self.close_button.destroy();
}, 1800)
}, 1800);
}
@@ -529,7 +517,8 @@ MA.Edit = new Class({
create: function(){
var self = this;
self.el = new Element('a.edit', {
self.button = new Element('a.edit', {
'text': 'Edit',
'title': 'Change movie information, like title and quality.',
'events': {
'click': self.editMovie.bind(self)
@@ -578,7 +567,7 @@ MA.Edit = new Class({
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
if(categories.length === 0)
self.category_select.hide();
else {
self.category_select.show();
@@ -652,7 +641,8 @@ MA.Refresh = new Class({
create: function(){
var self = this;
self.el = new Element('a.refresh', {
self.button = new Element('a.refresh', {
'text': 'Refresh',
'title': 'Refresh the movie info and do a forced search',
'events': {
'click': self.doRefresh.bind(self)
@@ -663,7 +653,7 @@ MA.Refresh = new Class({
doRefresh: function(e){
var self = this;
(e).preventDefault();
(e).stop();
Api.request('media.refresh', {
'data': {
@@ -679,17 +669,18 @@ MA.Readd = new Class({
Extends: MovieAction,
create: function(){
var self = this;
var self = this,
movie_done = self.movie.data.status == 'done',
snatched;
var movie_done = self.movie.data.status == 'done';
if(self.movie.data.releases && !movie_done)
var snatched = self.movie.data.releases.filter(function(release){
return release.status && (release.status == 'snatched' || release.status == 'downloaded' || release.status == 'done');
snatched = self.movie.data.releases.filter(function(release){
return release.status && (release.status == 'snatched' || release.status == 'seeding' || release.status == 'downloaded' || release.status == 'done');
}).length;
if(movie_done || snatched && snatched > 0)
self.el = new Element('a.readd', {
'title': 'Readd the movie and mark all previous snatched/downloaded as ignored',
'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored',
'events': {
'click': self.doReadd.bind(self)
}
@@ -703,7 +694,7 @@ MA.Readd = new Class({
Api.request('movie.add', {
'data': {
'identifier': self.movie.get('identifier'),
'identifier': self.movie.getIdentifier(),
'ignore_previous': 1
}
});
@@ -785,7 +776,7 @@ MA.Delete = new Class({
movie.set('tween', {
'duration': 300,
'onComplete': function(){
self.movie.destroy()
self.movie.destroy();
}
});
movie.tween('height', 0);
@@ -840,7 +831,7 @@ MA.Files = new Class({
new Element('div.file.item').adopt(
new Element('span.name', {'text': file}),
new Element('span.type', {'text': type})
).inject(rel)
).inject(rel);
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -2,22 +2,51 @@ var Movie = new Class({
Extends: BlockBase,
action: {},
actions: [],
details: null,
initialize: function(list, options, data){
var self = this;
self.data = data;
self.view = options.view || 'details';
self.list = list;
self.el = new Element('div.movie');
self.el = new Element('a.movie', {
'events': {
'click': function(e){
(e).stop();
self.openDetails();
}
}
});
self.profile = Quality.getProfile(data.profile_id) || {};
self.category = CategoryList.getCategory(data.category_id) || {};
self.parent(self, options);
self.addEvents();
if(data.identifiers.imdb == 'tt1228705')
self.openDetails();
},
openDetails: function(){
var self = this;
if(!self.details){
self.details = new MovieDetails(self, {
'level': 3
});
// Add action items
self.actions.each(function(action, nr){
var details = action.getDetails();
if(details)
self.details.addSection(action.getLabel(), details);
});
}
App.getPageContainer().grab(self.details);
},
addEvents: function(){
@@ -30,7 +59,6 @@ var Movie = new Class({
if(self.data._id != notification.data._id) return;
self.busy(false);
self.removeView();
self.update.delay(2000, self, notification);
};
App.on('movie.update', self.global_events['movie.update']);
@@ -47,20 +75,28 @@ var Movie = new Class({
// Remove spinner
self.global_events['movie.searcher.ended'] = function(notification){
if(notification.data && self.data._id == notification.data._id)
self.busy(false)
self.busy(false);
};
App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']);
// Reload when releases have updated
self.global_events['release.update_status'] = function(notification){
var data = notification.data;
if(data && self.data._id == data.movie_id){
if(data && self.data._id == data.media_id){
if(!self.data.releases)
self.data.releases = [];
self.data.releases.push({'quality': data.quality, 'status': data.status});
self.updateReleases();
var updated = false;
self.data.releases.each(function(release){
if(release._id == data._id){
release.status = data.status;
updated = true;
}
});
if(updated)
self.updateReleases();
}
};
@@ -94,12 +130,12 @@ var Movie = new Class({
if(self.mask)
self.mask.destroy();
if(self.spinner)
self.spinner.el.destroy();
self.spinner.destroy();
self.spinner = null;
self.mask = null;
}, timeout || 400);
}
}, timeout || 1000)
}, timeout || 1000);
}
else if(!self.spinner) {
self.createMask();
@@ -122,7 +158,6 @@ var Movie = new Class({
self.data = notification.data;
self.el.empty();
self.removeView();
self.profile = Quality.getProfile(self.data.profile_id) || {};
self.category = CategoryList.getCategory(self.data.category_id) || {};
@@ -136,15 +171,30 @@ var Movie = new Class({
self.el.addClass('status_'+self.get('status'));
var eta = null,
eta_date = null,
now = Math.round(+new Date()/1000);
if(self.data.info.release_date)
[self.data.info.release_date.dvd, self.data.info.release_date.theater].each(function(timestamp){
if (timestamp > 0 && (eta === null || Math.abs(timestamp - now) < Math.abs(eta - now)))
eta = timestamp;
});
if(eta){
eta_date = new Date(eta * 1000);
eta_date = eta_date.toLocaleString('en-us', { month: "long" }) + ' ' + eta_date.getFullYear();
}
self.el.adopt(
self.select_checkbox = new Element('input[type=checkbox].inlay', {
'events': {
'change': function(){
self.fireEvent('select')
self.fireEvent('select');
}
}
}),
self.thumbnail = (self.data.files && self.data.files.image_poster) ? new Element('img', {
self.thumbnail = (self.data.files && self.data.files.image_poster && self.data.files.image_poster.length > 0) ? new Element('img', {
'class': 'type_image poster',
'src': Api.createUrl('file.cache') + self.data.files.image_poster[0].split(Api.getOption('path_sep')).pop()
}): null,
@@ -158,27 +208,33 @@ var Movie = new Class({
'text': self.data.info.year || 'n/a'
})
),
self.description = new Element('div.description', {
'text': self.data.info.plot
}),
self.eta = eta_date && (now+8035200 > eta) ? new Element('div.eta', {
'text': eta_date,
'title': 'ETA'
}) : null,
self.quality = new Element('div.quality', {
'events': {
'click': function(e){
var releases = self.el.getElement('.actions .releases');
if(releases.isVisible())
releases.fireEvent('click', [e])
releases.fireEvent('click', [e]);
}
}
})
),
self.actions = new Element('div.actions')
self.actions_el = new Element('div.actions', {
'events': {
'click': function(e){
(e).stopPropagation();
}
}
})
)
);
if(!self.thumbnail)
self.el.addClass('no_thumbnail');
//self.changeView(self.view);
self.select_checkbox_class = new Form.Check(self.select_checkbox);
// Add profile
@@ -186,9 +242,9 @@ var Movie = new Class({
self.profile.getTypes().each(function(type){
var q = self.addQuality(type.get('quality'), type.get('3d'));
if((type.finish == true || type.get('finish')) && !q.hasClass('finish')){
if((type.finish === true || type.get('finish')) && !q.hasClass('finish')){
q.addClass('finish');
q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.')
q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.');
}
});
@@ -196,17 +252,20 @@ var Movie = new Class({
// Add releases
self.updateReleases();
Object.each(self.options.actions, function(action, key){
self.action[key.toLowerCase()] = action = new self.options.actions[key](self);
if(action.el)
self.actions.adopt(action)
self.options.actions.each(function(action){
var action = new action(self),
button = action.getButton();
if(button)
self.actions_el.grab(button);
self.actions.push(action);
});
},
updateReleases: function(){
var self = this;
if(!self.data.releases || self.data.releases.length == 0) return;
if(!self.data.releases || self.data.releases.length === 0) return;
self.data.releases.each(function(release){
@@ -218,7 +277,7 @@ var Movie = new Class({
if (q && !q.hasClass(status)){
q.addClass(status);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status)
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status);
}
});
@@ -244,65 +303,37 @@ var Movie = new Class({
else if(self.data.info.titles.length > 0)
return self.getUnprefixedTitle(self.data.info.titles[0]);
return 'Unknown movie'
return 'Unknown movie';
},
getUnprefixedTitle: function(t){
if(t.substr(0, 4).toLowerCase() == 'the ')
t = t.substr(4) + ', The';
else if(t.substr(0, 3).toLowerCase() == 'an ')
t = t.substr(3) + ', An';
else if(t.substr(0, 2).toLowerCase() == 'a ')
t = t.substr(2) + ', A';
return t;
},
slide: function(direction, el){
getIdentifier: function(){
var self = this;
if(direction == 'in'){
self.temp_view = self.view;
self.changeView('details');
self.el.addEvent('outerClick', function(){
self.removeView();
self.slide('out')
});
el.show();
self.data_container.addClass('hide_right');
try {
return self.get('identifiers').imdb;
}
else {
self.el.removeEvents('outerClick');
catch (e){ }
setTimeout(function(){
if(self.el)
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
}, 600);
self.data_container.removeClass('hide_right');
}
},
changeView: function(new_view){
var self = this;
if(self.el)
self.el
.removeClass(self.view+'_view')
.addClass(new_view+'_view');
self.view = new_view;
},
removeView: function(){
var self = this;
self.el.removeClass(self.view+'_view')
return self.get('imdb');
},
get: function(attr){
return this.data[attr] || this.data.info[attr]
return this.data[attr] || this.data.info[attr];
},
select: function(bool){
var self = this;
self.select_checkbox_class[bool ? 'check' : 'uncheck']()
self.select_checkbox_class[bool ? 'check' : 'uncheck']();
},
isSelected: function(){

View File

@@ -0,0 +1,367 @@
@import "couchpotato/static/style/mixins";
.page.movies {
z-index: 21; // Sets navigation above
bottom: auto;
}
.page.movies_wanted, .page.movies_manage {
top: $header_height;
padding: 0;
}
.list_list {
font-weight: 300;
.poster {
display: none;
}
.movie {
display: block;
border-top: 1px solid $theme_off;
position: relative;
cursor: pointer;
&:last-child {
border-bottom: none;
}
&:hover {
background: rgba(0,0,0,.1);
}
.data {
padding: $padding/2 $padding;
.info {
@include flexbox();
flex-flow: row nowrap;
.title {
@include flex(1 auto);
.year {
display: inline-block;
margin-left: 10px;
opacity: .5;
}
}
.quality span {
float: left;
color: #FFF;
font-size: .7em;
padding: 2px 4px;
background: rgba(0,0,0,.2);
border-radius: 1px;
margin: 2px 0 0 2px;
}
}
}
}
}
.thumb_list {
font-size: 12px;
padding: 0 $padding;
.movie {
@include span(6);
float: left;
margin-bottom: $padding;
position: relative;
&:nth-child(4n+4){
@include span(last);
}
&:nth-child(4n+5){
clear: both;
}
.poster {
border-radius: $border_radius;
overflow: hidden;
width: 100%;
float: left;
}
.data {
clear: both;
.info {
height: 44px;
.title {
@include flexbox();
padding: 3px 0;
span {
@include flex(1 auto);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.year {
display: inline-block;
margin-left: 5px;
opacity: .5;
}
}
.quality {
white-space: nowrap;
overflow: hidden;
span {
color: #FFF;
font-size: .8em;
padding: 2px 4px;
background: rgba(0,0,0,.2);
border-radius: 1px;
margin-right: 2px;
}
}
}
}
.actions {
position: absolute;
top: $padding / 2;
right: $padding / 2;
display: none;
a {
display: block;
background: $background_color;
padding: $padding / 3;
width: auto;
margin-bottom: 1px;
clear: both;
float: right;
}
}
&:hover .actions {
display: block;
}
.mask {
bottom: 44px;
border-radius: $border_radius;
}
}
}
.check {
position: absolute;
top: 0;
left: $padding;
display: none;
}
.eta {
display: none;
}
.page.movie_details {
$gab-width: $header_width/3;
.overlay {
position: fixed;
top: 0;
bottom: 0;
right: 0;
left: $header_width;
background: rgba(0,0,0,.6);
border-radius: 3px 0 0 3px;
.close {
display: inline-block;
text-align: center;
font-size: 60px;
line-height: $header_height;
color: #FFF;
width: $gab-width;
cursor: pointer;
height: 100%;
}
}
.content {
position: fixed;
top: 0;
bottom: 0;
right: 0;
left: $header_width + $gab-width;
background: $background_color;
z-index: 200;
border-radius: 3px 0 0 3px;
h1 {
margin: 0;
padding: 0 $padding;
font-size: 24px;
line-height: $header_height;
color: rgba(0,0,0,.5);
font-weight: 300;
}
.section {
padding: $padding $padding;
border-top: 1px solid rgba(0,0,0,.1);
}
}
.releases {
.buttons {
margin-bottom: $padding/2;
}
.item span {
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
text-align: center;
}
.item .name {
@include flex(1 auto);
text-align: left;
}
.status { min-width: 70px; max-width: 70px; }
.quality { min-width: 60px; max-width: 60px; }
.size { min-width: 40px; max-width: 40px; }
.age { min-width: 40px; max-width: 40px; }
.score { min-width: 45px; max-width: 45px; }
.provider { min-width: 110px; max-width: 110px; }
}
}
.alph_nav {
.mass_edit_form {
display: none;
}
.menus {
margin-right: $padding;
.button {
padding: 0 $padding/2;
line-height: $header_height;
color: rgba(0, 0, 0, 0.5);
}
.counter, .more_menu, .actions {
float: left;
}
.counter {
line-height: $header_height;
}
.actions {
a {
display: none;
}
.active {
display: inline-block;
}
}
.filter {
.wrapper {
width: 320px;
}
.button {
margin-top: -2px;
}
.search {
position: relative;
&:before {
position: absolute;
height: 100%;
line-height: 38px;
padding-left: $padding/2;
font-size: 16px;
opacity: .5;
}
input {
width: 100%;
padding: $padding/2 $padding/2 $padding/2 $padding*1.5;
background: $background_color;
border: none;
border-bottom: 1px solid $theme_off;
}
}
.numbers {
padding: $padding/2;
li {
float: left;
width: 10%;
height: 30px;
line-height: 30px;
text-align: center;
color: rgba(0,0,0,.2);
cursor: default;
&.active {
background: $theme_off;
}
&.available {
color: rgba(0,0,0,1);
cursor: pointer;
&:hover {
background: $theme_off;
}
}
}
}
}
.more_menu {
&.show .button {
color: rgba(0, 0, 0, 1);
}
.wrapper {
top: $header_height - 10px;
padding-top: 4px;
border-radius: $border_radius $border_radius 0 0;
&:before {
top: 0;
left: auto;
right: 22px;
}
ul {
border-radius: $border_radius $border_radius 0 0;
}
}
}
}
}

View File

@@ -0,0 +1,49 @@
Page.Movies = new Class({
Extends: PageBase,
name: 'movies',
sub_pages: ['Wanted', 'Manage'],
default_page: 'Wanted',
current_page: null,
initialize: function(parent, options){
var self = this;
self.parent(parent, options);
self.navigation = new BlockNavigation();
$(self.navigation).inject(self.el, 'top');
},
defaultAction: function(action, params){
var self = this;
if(self.current_page){
self.current_page.hide();
if(self.current_page.list && self.current_page.list.navigation)
self.current_page.list.navigation.dispose();
}
var route = new Route();
route.parse(action);
var page_name = route.getPage() != 'index' ? route.getPage().capitalize() : self.default_page;
var page = self.sub_pages.filter(function(page){
return page.name == page_name;
}).pick()['class'];
page.open(route.getAction() || 'index', params);
page.show();
if(page.list && page.list.navigation)
page.list.navigation.inject(self.navigation);
self.current_page = page;
self.navigation.activate(page_name.toLowerCase());
}
});

View File

@@ -1,4 +1,4 @@
Block.Search.MovieItem = new Class({
var BlockSearchMovieItem = new Class({
Implements: [Options, Events],
@@ -31,9 +31,11 @@ Block.Search.MovieItem = new Class({
}
}).adopt(
self.info_container = new Element('div.info').adopt(
new Element('h2').adopt(
new Element('h2', {
'title': self.getTitle()
}).adopt(
self.title = new Element('span.title', {
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
'text': self.getTitle()
}),
self.year = info.year ? new Element('span.year', {
'text': info.year
@@ -48,7 +50,7 @@ Block.Search.MovieItem = new Class({
self.alternativeTitle({
'title': title
});
})
});
},
alternativeTitle: function(alternative){
@@ -68,7 +70,7 @@ Block.Search.MovieItem = new Class({
},
get: function(key){
return this.info[key]
return this.info[key];
},
showOptions: function(){
@@ -77,7 +79,7 @@ Block.Search.MovieItem = new Class({
self.createOptions();
self.data_container.addClass('open');
self.el.addEvent('outerClick', self.closeOptions.bind(self))
self.el.addEvent('outerClick', self.closeOptions.bind(self));
},
@@ -85,7 +87,7 @@ Block.Search.MovieItem = new Class({
var self = this;
self.data_container.removeClass('open');
self.el.removeEvents('outerClick')
self.el.removeEvents('outerClick');
},
add: function(e){
@@ -132,10 +134,11 @@ Block.Search.MovieItem = new Class({
if(!self.options_el.hasClass('set')){
var in_library;
if(info.in_library){
var in_library = [];
in_library = [];
(info.in_library.releases || []).each(function(release){
in_library.include(release.quality)
in_library.include(release.quality);
});
}
@@ -171,14 +174,14 @@ Block.Search.MovieItem = new Class({
Array.each(self.alternative_titles, function(alt){
new Element('option', {
'text': alt.title
}).inject(self.title_select)
}).inject(self.title_select);
});
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
if(categories.length === 0)
self.category_select.hide();
else {
self.category_select.show();
@@ -199,12 +202,12 @@ Block.Search.MovieItem = new Class({
new Element('option', {
'value': profile.get('_id'),
'text': profile.get('label')
}).inject(self.profile_select)
}).inject(self.profile_select);
});
self.options_el.addClass('set');
if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
if(categories.length === 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
!(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
self.add();
@@ -218,12 +221,12 @@ Block.Search.MovieItem = new Class({
self.mask = new Element('div.mask').inject(self.el).fade('hide');
createSpinner(self.mask);
self.mask.fade('in')
self.mask.fade('in');
},
toElement: function(){
return this.el
return this.el;
}
});

View File

@@ -1,7 +1,8 @@
Page.Wanted = new Class({
var MoviesWanted = new Class({
Extends: PageBase,
order: 10,
name: 'wanted',
title: 'Gimmy gimmy gimmy!',
folder_browser: null,
@@ -9,7 +10,7 @@ Page.Wanted = new Class({
indexAction: function(){
var self = this;
if(!self.wanted){
if(!self.list){
self.manual_search = new Element('a', {
'title': 'Force a search for the full wanted list',
@@ -19,7 +20,6 @@ Page.Wanted = new Class({
}
});
self.scan_folder = new Element('a', {
'title': 'Scan a folder and rename all movies in it',
'text': 'Manual folder scan',
@@ -29,7 +29,7 @@ Page.Wanted = new Class({
});
// Wanted movies
self.wanted = new MovieList({
self.list = new MovieList({
'identifier': 'wanted',
'status': 'active',
'actions': [MA.IMDB, MA.Trailer, MA.Release, MA.Edit, MA.Refresh, MA.Readd, MA.Delete],
@@ -37,7 +37,7 @@ Page.Wanted = new Class({
'menu': [self.manual_search, self.scan_folder],
'on_empty_element': App.createUserscriptButtons().addClass('empty_wanted')
});
$(self.wanted).inject(self.el);
$(self.list).inject(self.el);
// Check if search is in progress
self.startProgressInterval.delay(4000, self);
@@ -90,7 +90,7 @@ Page.Wanted = new Class({
};
if(!self.folder_browser){
self.folder_browser = new Option['Directory']("Scan", "folder", "", options);
self.folder_browser = new Option.Directory("Scan", "folder", "", options);
self.folder_browser.save = function() {
var folder = self.folder_browser.getValue();

View File

@@ -22,11 +22,18 @@ config = [{
'description': 'Maximum number of items displayed from each chart.',
},
{
'name': 'update_interval',
'default': 12,
'type': 'int',
'name': 'hide_wanted',
'default': False,
'type': 'bool',
'advanced': True,
'description': '(hours)',
'description': 'Hide the chart movies that are already in your wanted list.',
},
{
'name': 'hide_library',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Hide the chart movies that are already in your library.',
},
],
},

View File

@@ -1,6 +1,5 @@
import time
from couchpotato import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent,fireEvent
@@ -13,13 +12,14 @@ log = CPLog(__name__)
class Charts(Plugin):
update_in_progress = False
update_interval = 72 # hours
def __init__(self):
addApiView('charts.view', self.automationView)
addEvent('app.load', self.setCrons)
def setCrons(self):
fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.conf('update_interval', default = 12))
fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.update_interval)
def automationView(self, force_update = False, **kwargs):
@@ -36,7 +36,6 @@ class Charts(Plugin):
'charts': charts
}
def updateViewCache(self):
if self.update_in_progress:
@@ -46,10 +45,14 @@ class Charts(Plugin):
if catched_charts:
return catched_charts
charts = []
try:
self.update_in_progress = True
charts = fireEvent('automation.get_chart_list', merge = True)
self.setCache('charts_cached', charts, timeout = 7200 * tryInt(self.conf('update_interval', default = 12)))
for chart in charts:
chart['hide_wanted'] = self.conf('hide_wanted')
chart['hide_library'] = self.conf('hide_library')
self.setCache('charts_cached', charts, timeout = self.update_interval * 3600)
except:
log.error('Failed refreshing charts')

View File

@@ -2,6 +2,8 @@ var Charts = new Class({
Implements: [Options, Events],
shown_once: false,
initialize: function(options){
var self = this;
self.setOptions(options);
@@ -22,9 +24,11 @@ var Charts = new Class({
'events': {
'click': function(e) {
e.preventDefault();
self.el.getChildren('div.chart').destroy();
self.el.getElements('.chart').destroy();
self.el_refreshing_text.show();
self.el_refresh_link.hide();
self.api_request = Api.request('charts.view', {
'data': { 'force_update': 1 },
'onComplete': self.fill.bind(self)
@@ -38,15 +42,12 @@ var Charts = new Class({
)
);
if( Cookie.read('suggestions_charts_menu_selected') === 'charts')
self.el.show();
if( Cookie.read('suggestions_charts_menu_selected') === 'charts'){
self.show();
}
else
self.el.hide();
self.api_request = Api.request('charts.view', {
'onComplete': self.fill.bind(self)
});
self.fireEvent.delay(0, self, 'created');
},
@@ -58,7 +59,7 @@ var Charts = new Class({
self.el_refreshing_text.hide();
self.el_refresh_link.show();
if(!json || json.count == 0){
if(!json || json.count === 0){
self.el_no_charts_enabled.show();
self.el_refresh_link.show();
self.el_refreshing_text.hide();
@@ -72,7 +73,7 @@ var Charts = new Class({
Object.each(json.charts, function(chart){
var c = new Element('div.chart').grab(
var c = new Element('div.chart.tiny_scroll').grab(
new Element('h3').grab( new Element('a', {
'text': chart.name,
'href': chart.url
@@ -83,17 +84,16 @@ var Charts = new Class({
Object.each(chart.list, function(movie){
var m = new Block.Search.MovieItem(movie, {
var m = new BlockSearchMovieItem(movie, {
'onAdded': function(){
self.afterAdded(m, movie)
self.afterAdded(m, movie);
}
});
var in_database_class = movie.in_wanted ? 'chart_in_wanted' : (movie.in_library ? 'chart_in_library' : ''),
var in_database_class = (chart.hide_wanted && movie.in_wanted) ? 'hidden' : (movie.in_wanted ? 'chart_in_wanted' : ((chart.hide_library && movie.in_library) ? 'hidden': (movie.in_library ? 'chart_in_library' : ''))),
in_database_title = movie.in_wanted ? 'Movie in wanted list' : (movie.in_library ? 'Movie in library' : '');
m.el
.addClass(in_database_class)
m.el.addClass(in_database_class)
.grab(
new Element('div.chart_number', {
'text': it++,
@@ -135,7 +135,7 @@ var Charts = new Class({
'text': plot,
'events': {
'click': function(){
this.toggleClass('full')
this.toggleClass('full');
}
}
}) : null
@@ -155,6 +155,24 @@ var Charts = new Class({
},
show: function(){
var self = this;
self.el.show();
if(!self.shown_once){
self.api_request = Api.request('charts.view', {
'onComplete': self.fill.bind(self)
});
self.shown_once = true;
}
},
hide: function(){
this.el.hide();
},
afterAdded: function(m){
$(m).getElement('div.chart_number')

View File

@@ -3,15 +3,21 @@
margin-bottom: 30px;
}
.charts > h2 {
height: 40px;
}
.charts > h2 {
height: 40px;
}
.charts .chart {
display: inline-block;
width: 50%;
vertical-align: top;
}
.charts .chart {
display: inline-block;
width: 50%;
vertical-align: top;
max-height: 510px;
scrollbar-base-color: #4e5969;
}
.charts .chart .media_result.hidden {
display: none;
}
.charts .refresh {
clear:both;
@@ -25,30 +31,30 @@
text-align:center;
}
.charts .refresh a {
text-align: center;
padding: 0;
display: none;
width: 30px;
height: 30px;
position: absolute;
right: 10px;
top: -40px;
opacity: .7;
}
.charts .refresh a {
text-align: center;
padding: 0;
display: none;
width: 30px;
height: 30px;
position: absolute;
right: 10px;
top: -40px;
opacity: .7;
}
.charts .refresh a:hover {
opacity: 1;
}
.charts .refresh a:hover {
opacity: 1;
}
.charts p.no_charts_enabled {
padding: 0.7em 1em;
display: none;
}
.charts p.no_charts_enabled {
padding: 0.7em 1em;
display: none;
}
.charts .chart h3 a {
color: #fff;
}
.charts .chart h3 a {
color: #fff;
}
.charts .chart .media_result {
@@ -137,7 +143,6 @@
padding: 0 3px 10px 0;
}
.charts .chart .media_result .data:before {
bottom: 0;
content: '';
display: block;
height: 10px;
@@ -259,3 +264,11 @@
height: 40px;
}
@media all and (max-width: 480px) {
.toggle_menu h2 {
font-size: 16px;
text-align: center;
height: 30px;
}
}

View File

@@ -1,4 +1,5 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.library.base import LibraryBase
@@ -17,7 +18,9 @@ class MovieLibraryPlugin(LibraryBase):
if media.get('type') != 'movie':
return
default_title = getTitle(media)
titles = media['info'].get('titles', [])
titles.insert(0, default_title)
# Add year identifier to titles
if include_year:

View File

@@ -1,9 +1,13 @@
import traceback
from bs4 import BeautifulSoup
from couchpotato import fireEvent
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'Bluray'
@@ -33,27 +37,49 @@ class Bluray(Automation, RSS):
try:
# Stop if the release year is before the minimal year
page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1]
if tryInt(page_year) < self.getMinimal('year'):
brk = False
h3s = soup.body.find_all('h3')
for h3 in h3s:
if h3.parent.name != 'a':
try:
page_year = tryInt(h3.get_text()[-4:])
if page_year > 0 and page_year < self.getMinimal('year'):
brk = True
except:
log.error('Failed determining page year: %s', traceback.format_exc())
brk = True
break
if brk:
break
for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]:
name = table.h3.get_text().lower().split('blu-ray')[0].strip()
year = table.small.get_text().split('|')[1].strip()
for h3 in h3s:
try:
if h3.parent.name == 'a':
name = h3.get_text().lower().split('blu-ray')[0].strip()
if not name.find('/') == -1: # make sure it is not a double movie release
continue
if not name.find('/') == -1: # make sure it is not a double movie release
continue
if tryInt(year) < self.getMinimal('year'):
continue
if not h3.parent.parent.small: # ignore non-movie tables
continue
imdb = self.search(name, year)
year = h3.parent.parent.small.get_text().split('|')[1].strip()
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
if tryInt(year) < self.getMinimal('year'):
continue
imdb = self.search(name, year)
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
except:
log.debug('Error parsing movie html: %s', traceback.format_exc())
break
except:
log.debug('Error loading page: %s', page)
log.debug('Error loading page %s: %s', (page, traceback.format_exc()))
break
self.conf('backlog', value = False)
@@ -82,6 +108,7 @@ class Bluray(Automation, RSS):
def getChartList(self):
# Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id)
movie_list = {'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': []}
movie_ids = []
max_items = int(self.conf('max_items', section='charts', default=5))
rss_movies = self.getRSSData(self.rss_url)
@@ -95,6 +122,15 @@ class Bluray(Automation, RSS):
movie = self.search(name, year)
if movie:
if movie.get('imdb') in movie_ids:
continue
is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True)
if not is_movie:
continue
movie_ids.append(movie.get('imdb'))
movie_list['list'].append( movie )
if len(movie_list['list']) >= max_items:
break
@@ -123,7 +159,7 @@ config = [{
{
'name': 'backlog',
'advanced': True,
'description': 'Parses the history until the minimum movie year is reached. (Will be disabled once it has completed)',
'description': ('Parses the history until the minimum movie year is reached. (Takes a while)', 'Will be disabled once it has completed'),
'default': False,
'type': 'bool',
},

View File

@@ -0,0 +1,89 @@
import re
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'CrowdAI'
class CrowdAI(Automation, RSS):
interval = 1800
def getIMDBids(self):
movies = []
urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
for url in urls:
if not urls[url]:
continue
rss_movies = self.getRSSData(url)
for movie in rss_movies:
description = self.getTextElement(movie, 'description')
grabs = 0
for item in movie:
if item.attrib.get('name') == 'grabs':
grabs = item.attrib.get('value')
break
if int(grabs) > tryInt(self.conf('number_grabs')):
title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
year = re.match(r'.*Year: (\d{4}).*', description).group(1)
imdb = self.search(title, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'crowdai',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'crowdai_automation',
'label': 'CrowdAI',
'description': 'Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie. Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
'default': '1',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
},
{
'name': 'number_grabs',
'default': '500',
'label': 'Grab threshold',
'description': 'Number of grabs required',
},
],
},
],
}]

View File

@@ -3,6 +3,7 @@ import re
from bs4 import BeautifulSoup
from couchpotato import fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt
from couchpotato.core.logger import CPLog
@@ -28,6 +29,39 @@ class IMDBBase(Automation, RSS):
def getInfo(self, imdb_id):
return fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
def getFromURL(self, url):
log.debug('Getting IMDBs from: %s', url)
html = self.getHTMLData(url)
try:
split = splitString(html, split_on = "<div class=\"list compact\">")[1]
html = splitString(split, split_on = "<div class=\"pages\">")[0]
except:
try:
split = splitString(html, split_on = "<div id=\"main\">")
if len(split) < 2:
log.error('Failed parsing IMDB page "%s", unexpected html.', url)
return []
html = BeautifulSoup(split[1])
for x in ['list compact', 'lister', 'list detail sub-list']:
html2 = html.find('div', attrs = {
'class': x
})
if html2:
html = html2.contents
html = ''.join([str(x) for x in html])
break
except:
log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))
html = ss(html)
imdbs = getImdb(html, multiple = True) if html else []
return imdbs
class IMDBWatchlist(IMDBBase):
@@ -65,16 +99,7 @@ class IMDBWatchlist(IMDBBase):
try:
w_url = '%s&start=%s' % (watchlist_url, start)
log.debug('Started IMDB watchlists: %s', w_url)
html = self.getHTMLData(w_url)
try:
split = splitString(html, split_on="<div class=\"list compact\">")[1]
html = splitString(split, split_on="<div class=\"pages\">")[0]
except:
pass
imdbs = getImdb(html, multiple = True) if html else []
imdbs = self.getFromURL(w_url)
for imdb in imdbs:
if imdb not in movies:
@@ -85,13 +110,14 @@ class IMDBWatchlist(IMDBBase):
log.debug('Found %s movies on %s', (len(imdbs), w_url))
if len(imdbs) < 250:
if len(imdbs) < 225:
break
start += 250
start = len(movies)
except:
log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc()))
break
return movies
@@ -100,95 +126,88 @@ class IMDBAutomation(IMDBBase):
enabled_option = 'automation_providers_enabled'
chart_urls = {
'theater': 'http://www.imdb.com/movies-in-theaters/',
'top250': 'http://www.imdb.com/chart/top',
'boxoffice': 'http://www.imdb.com/chart/',
charts = {
'theater': {
'order': 1,
'name': 'IMDB - Movies in Theaters',
'url': 'http://www.imdb.com/movies-in-theaters/',
},
'boxoffice': {
'order': 2,
'name': 'IMDB - Box Office',
'url': 'http://www.imdb.com/boxoffice/',
},
'rentals': {
'order': 3,
'name': 'IMDB - Top DVD rentals',
'url': 'http://www.imdb.com/boxoffice/rentals',
'type': 'json',
},
'top250': {
'order': 4,
'name': 'IMDB - Top 250 Movies',
'url': 'http://www.imdb.com/chart/top',
},
}
chart_names = {
'theater': 'IMDB - Movies in Theaters',
'top250': 'IMDB - Top 250 Movies',
'boxoffice': 'IMDB - Box Office',
}
chart_order = {
'theater': 2,
'top250': 4,
'boxoffice': 3,
}
first_table = ['boxoffice']
def getIMDBids(self):
movies = []
for url in self.chart_urls:
if self.conf('automation_charts_%s' % url):
data = self.getHTMLData(self.chart_urls[url])
if data:
html = BeautifulSoup(data)
for name in self.charts:
chart = self.charts[name]
url = chart.get('url')
try:
result_div = html.find('div', attrs = {'id': 'main'})
if self.conf('automation_charts_%s' % name):
imdb_ids = self.getFromURL(url)
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
try:
for imdb_id in imdb_ids:
info = self.getInfo(imdb_id)
if info and self.isMinimalMovie(info):
movies.append(imdb_id)
imdb_ids = getImdb(str(result_div), multiple = True)
if self.shuttingDown():
break
for imdb_id in imdb_ids:
info = self.getInfo(imdb_id)
if info and self.isMinimalMovie(info):
movies.append(imdb_id)
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
return movies
def getChartList(self):
# Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id)
movie_lists = []
max_items = int(self.conf('max_items', section='charts', default=5))
max_items = int(self.conf('max_items', section = 'charts', default=5))
for url in self.chart_urls:
if self.conf('chart_display_%s' % url):
movie_list = {'name': self.chart_names[url], 'url': self.chart_urls[url], 'order': self.chart_order[url], 'list': []}
data = self.getHTMLData(self.chart_urls[url])
if data:
html = BeautifulSoup(data)
for name in self.charts:
chart = self.charts[name].copy()
url = chart.get('url')
try:
result_div = html.find('div', attrs = {'id': 'main'})
if self.conf('chart_display_%s' % name):
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
chart['list'] = []
imdb_ids = getImdb(str(result_div), multiple = True)
imdb_ids = self.getFromURL(url)
for imdb_id in imdb_ids[0:max_items]:
info = self.getInfo(imdb_id)
movie_list['list'].append(info)
try:
for imdb_id in imdb_ids[0:max_items]:
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
is_movie = fireEvent('movie.is_movie', identifier = imdb_id, single = True)
if not is_movie:
continue
if movie_list['list']:
movie_lists.append(movie_list)
info = self.getInfo(imdb_id)
chart['list'].append(info)
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
if chart['list']:
movie_lists.append(chart)
return movie_lists
@@ -240,12 +259,19 @@ config = [{
'description': 'New Movies <a href="http://www.imdb.com/movies-in-theaters/">In-Theaters</a> chart',
'default': True,
},
{
'name': 'automation_charts_rentals',
'type': 'bool',
'label': 'DVD Rentals',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals">rentals</a> chart',
'default': True,
},
{
'name': 'automation_charts_top250',
'type': 'bool',
'label': 'TOP 250',
'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart',
'default': True,
'default': False,
},
{
'name': 'automation_charts_boxoffice',
@@ -282,6 +308,13 @@ config = [{
'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart',
'default': False,
},
{
'name': 'chart_display_rentals',
'type': 'bool',
'label': 'DVD Rentals',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals">rentals</a> chart',
'default': True,
},
{
'name': 'chart_display_boxoffice',
'type': 'bool',

View File

@@ -48,11 +48,12 @@ class Letterboxd(Automation):
soup = BeautifulSoup(self.getHTMLData(self.url % username))
for movie in soup.find_all('a', attrs = {'class': 'frame'}):
match = removeEmpty(self.pattern.split(movie['title']))
for movie in soup.find_all('li', attrs = {'class': 'poster-container'}):
img = movie.find('img', movie)
title = img.get('alt')
movies.append({
'title': match[0],
'year': match[1]
'title': title
})
return movies

View File

@@ -21,11 +21,15 @@ class Moviemeter(Automation, RSS):
for movie in rss_movies:
name_year = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
imdb = self.search(name_year.get('name'), name_year.get('year'))
title = self.getTextElement(movie, 'title')
name_year = fireEvent('scanner.name_year', title, single = True)
if name_year.get('name') and name_year.get('year'):
imdb = self.search(name_year.get('name'), name_year.get('year'))
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
else:
log.error('Failed getting name and year from: %s', title)
return movies

View File

@@ -0,0 +1,47 @@
from couchpotato import fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'PopularMovies'
class PopularMovies(Automation):
interval = 1800
url = 'https://s3.amazonaws.com/popular-movies/movies.json'
def getIMDBids(self):
movies = []
retrieved_movies = self.getJsonData(self.url)
for movie in retrieved_movies.get('movies'):
imdb_id = movie.get('imdb_id')
info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
if self.isMinimalMovie(info):
movies.append(imdb_id)
return movies
config = [{
'name': 'popularmovies',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'popularmovies_automation',
'label': 'Popular Movies',
'description': 'Imports the <a href="http://movies.stevenlu.com/">top titles of movies that have been in theaters</a>. Script provided by <a href="https://github.com/sjlu/popular-movies">Steven Lu</a>',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

View File

@@ -39,15 +39,14 @@ class Rottentomatoes(Automation, RSS):
if result:
log.info2('Something smells...')
rating = tryInt(self.getTextElement(movie, rating_tag))
name = result.group(0)
print rating, tryInt(self.conf('tomatometer_percent'))
if rating < tryInt(self.conf('tomatometer_percent')):
log.info2('%s seems to be rotten...', name)
else:
log.info2('Found %s fresh enough movies, enqueuing: %s', (rating, name))
log.info2('Found %s with fresh rating %s', (name, rating))
year = datetime.datetime.now().strftime("%Y")
imdb = self.search(name, year)

View File

@@ -26,7 +26,14 @@ class MovieResultModifier(Plugin):
'backdrop': [],
'poster_original': [],
'backdrop_original': [],
'actors': {}
'actors': {},
'landscape': [],
'logo': [],
'clear_art': [],
'disc_art': [],
'banner': [],
'extra_thumbs': [],
'extra_fanart': []
},
'runtime': 0,
'plot': '',

View File

@@ -2,7 +2,7 @@ import base64
import time
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
from couchpotato.environment import Env
@@ -29,7 +29,7 @@ class CouchPotatoApi(MovieProvider):
api_version = 1
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 1)
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info.release_date', self.getReleaseDate)
addEvent('info.search', self.search, priority = 1)
@@ -66,15 +66,18 @@ class CouchPotatoApi(MovieProvider):
if not name:
return
name_enc = base64.b64encode(name)
name_enc = base64.b64encode(ss(name))
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None):
def isMovie(self, identifier = None, adding = False):
if not identifier:
return
data = self.getJsonData(self.urls['is_movie'] % identifier, headers = self.getRequestHeaders())
url = self.urls['is_movie'] % identifier
url += '?adding=1' if adding else ''
data = self.getJsonData(url, headers = self.getRequestHeaders())
if data:
return data.get('is_movie', True)

View File

@@ -0,0 +1,133 @@
import traceback
from couchpotato import tryInt
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
from requests import HTTPError
log = CPLog(__name__)
autoload = 'FanartTV'
class FanartTV(MovieProvider):
urls = {
'api': 'http://webservice.fanart.tv/v3/movies/%s?api_key=b28b14e9be662e027cfbc7c3dd600405'
}
MAX_EXTRAFANART = 20
http_time_between_calls = 0
def __init__(self):
addEvent('movie.info', self.getArt, priority = 1)
def getArt(self, identifier = None, extended = True, **kwargs):
if not identifier or not extended:
return {}
images = {}
try:
url = self.urls['api'] % identifier
fanart_data = self.getJsonData(url, show_error = False)
if fanart_data:
log.debug('Found images for %s', fanart_data.get('name'))
images = self._parseMovie(fanart_data)
except HTTPError as e:
log.debug('Failed getting extra art for %s: %s',
(identifier, e))
except:
log.error('Failed getting extra art for %s: %s',
(identifier, traceback.format_exc()))
return {}
return {
'images': images
}
def _parseMovie(self, movie):
images = {
'landscape': self._getMultImages(movie.get('moviethumb', []), 1),
'logo': [],
'disc_art': self._getMultImages(self._trimDiscs(movie.get('moviedisc', [])), 1),
'clear_art': self._getMultImages(movie.get('hdmovieart', []), 1),
'banner': self._getMultImages(movie.get('moviebanner', []), 1),
'extra_fanart': [],
}
if len(images['clear_art']) == 0:
images['clear_art'] = self._getMultImages(movie.get('movieart', []), 1)
images['logo'] = self._getMultImages(movie.get('hdmovielogo', []), 1)
if len(images['logo']) == 0:
images['logo'] = self._getMultImages(movie.get('movielogo', []), 1)
fanarts = self._getMultImages(movie.get('moviebackground', []), self.MAX_EXTRAFANART + 1)
if fanarts:
images['backdrop_original'] = [fanarts[0]]
images['extra_fanart'] = fanarts[1:]
return images
def _trimDiscs(self, disc_images):
"""
Return a subset of discImages. Only bluray disc images will be returned.
"""
trimmed = []
for disc in disc_images:
if disc.get('disc_type') == 'bluray':
trimmed.append(disc)
if len(trimmed) == 0:
return disc_images
return trimmed
def _getImage(self, images):
image_url = None
highscore = -1
for image in images:
if tryInt(image.get('likes')) > highscore:
highscore = tryInt(image.get('likes'))
image_url = image.get('url') or image.get('href')
return image_url
def _getMultImages(self, images, n):
"""
Chooses the best n images and returns them as a list.
If n<0, all images will be returned.
"""
image_urls = []
pool = []
for image in images:
if image.get('lang') == 'en':
pool.append(image)
orig_pool_size = len(pool)
while len(pool) > 0 and (n < 0 or orig_pool_size - len(pool) < n):
best = None
highscore = -1
for image in pool:
if tryInt(image.get('likes')) > highscore:
highscore = tryInt(image.get('likes'))
best = image
url = best.get('url') or best.get('href')
if url:
image_urls.append(url)
pool.remove(best)
return image_urls
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')
return True
return False

View File

@@ -2,6 +2,7 @@ import json
import re
import traceback
from couchpotato import Env
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString
@@ -17,8 +18,8 @@ autoload = 'OMDBAPI'
class OMDBAPI(MovieProvider):
urls = {
'search': 'http://www.omdbapi.com/?%s',
'info': 'http://www.omdbapi.com/?i=%s',
'search': 'http://www.omdbapi.com/?type=movie&%s',
'info': 'http://www.omdbapi.com/?type=movie&i=%s',
}
http_time_between_calls = 0
@@ -38,7 +39,8 @@ class OMDBAPI(MovieProvider):
}
cache_key = 'omdbapi.cache.%s' % q
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}), timeout = 3)
url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
result = self.parseMovie(cached)
@@ -56,7 +58,7 @@ class OMDBAPI(MovieProvider):
return {}
cache_key = 'omdbapi.cache.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3)
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
result = self.parseMovie(cached)

View File

@@ -1,11 +1,10 @@
import traceback
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
import tmdb3
log = CPLog(__name__)
@@ -14,52 +13,65 @@ autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider):
http_time_between_calls = .35
configuration = {
'images': {
'secure_base_url': 'https://image.tmdb.org/t/p/',
},
}
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('info.search', self.search, priority = 3)
addEvent('movie.search', self.search, priority = 3)
addEvent('movie.info', self.getInfo, priority = 3)
addEvent('movie.info_by_tmdb', self.getInfo)
addEvent('app.load', self.config)
# Configure TMDB settings
tmdb3.set_key(self.conf('api_key'))
tmdb3.set_cache('null')
def config(self):
configuration = self.request('configuration')
if configuration:
self.configuration = configuration
def search(self, q, limit = 12):
def search(self, q, limit = 3):
""" Find movie by name """
if self.isDisabled():
return False
search_string = simplifyString(q)
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
log.debug('Searching for movie: %s', q)
if not results:
log.debug('Searching for movie: %s', q)
raw = None
try:
name_year = fireEvent('scanner.name_year', q, single = True)
raw = self.request('search/movie', {
'query': name_year.get('name', q),
'year': name_year.get('year'),
'search_type': 'ngram' if limit > 1 else 'phrase'
}, return_key = 'results')
except:
log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc()))
raw = None
results = []
if raw:
try:
raw = tmdb3.searchMovie(search_string)
except:
log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc()))
nr = 0
results = []
if raw:
try:
nr = 0
for movie in raw:
parsed_movie = self.parseMovie(movie, extended = False)
if parsed_movie:
results.append(parsed_movie)
for movie in raw:
results.append(self.parseMovie(movie, extended = False))
nr += 1
if nr == limit:
break
nr += 1
if nr == limit:
break
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
self.setCache(cache_key, results)
return results
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e)
return False
return results
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e)
return False
return results
@@ -68,97 +80,91 @@ class TheMovieDb(MovieProvider):
if not identifier:
return {}
cache_key = 'tmdb.cache.%s%s' % (identifier, '.ex' if extended else '')
result = self.getCache(cache_key)
result = self.parseMovie({
'id': identifier
}, extended = extended)
if not result:
try:
log.debug('Getting info: %s', cache_key)
# noinspection PyArgumentList
movie = tmdb3.Movie(identifier)
try: exists = movie.title is not None
except: exists = False
if exists:
result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result)
else:
result = {}
except:
log.error('Failed getting info for %s: %s', (identifier, traceback.format_exc()))
return result
return result or {}
def parseMovie(self, movie, extended = True):
cache_key = 'tmdb.cache.%s%s' % (movie.id, '.ex' if extended else '')
movie_data = self.getCache(cache_key)
# Do request, append other items
movie = self.request('movie/%s' % movie.get('id'), {
'append_to_response': 'alternative_titles' + (',images,casts' if extended else '')
})
if not movie:
return
if not movie_data:
# Images
poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else []
# Images
poster = self.getImage(movie, type = 'poster', size = 'poster')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {},
'extra_thumbs': extra_thumbs
}
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {}
}
# Genres
try:
genres = [genre.get('name') for genre in movie.get('genres', [])]
except:
genres = []
# Genres
try:
genres = [genre.name for genre in movie.genres]
except:
genres = []
# 1900 is the same as None
year = str(movie.get('release_date') or '')[:4]
if not movie.get('release_date') or year == '1900' or year.lower() == 'none':
year = None
# 1900 is the same as None
year = str(movie.releasedate or '')[:4]
if not movie.releasedate or year == '1900' or year.lower() == 'none':
year = None
# Gather actors data
actors = {}
if extended:
# Gather actors data
actors = {}
if extended:
for cast_item in movie.cast:
try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
# Full data
cast = movie.get('casts', {}).get('cast', [])
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.id,
'titles': [toUnicode(movie.title)],
'original_title': movie.originaltitle,
'images': images,
'imdb': movie.imdb,
'runtime': movie.runtime,
'released': str(movie.releasedate),
'year': tryInt(year, None),
'plot': movie.overview,
'genres': genres,
'collection': getattr(movie.collection, 'name', None),
'actor_roles': actors
}
for cast_item in cast:
try:
actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character'))
images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
movie_data = dict((k, v) for k, v in movie_data.items() if v)
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.get('id'),
'titles': [toUnicode(movie.get('title'))],
'original_title': movie.get('original_title'),
'images': images,
'imdb': movie.get('imdb_id'),
'runtime': movie.get('runtime'),
'released': str(movie.get('release_date')),
'year': tryInt(year, None),
'plot': movie.get('overview'),
'genres': genres,
'collection': getattr(movie.get('belongs_to_collection'), 'name', None),
'actor_roles': actors
}
# Add alternative names
if extended:
movie_data['titles'].append(movie.originaltitle)
for alt in movie.alternate_titles:
alt_name = alt.title
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
movie_data = dict((k, v) for k, v in movie_data.items() if v)
# Cache movie parsed
self.setCache(cache_key, movie_data)
# Add alternative names
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']:
movie_data['titles'].append(movie_data['original_title'])
# Add alternative titles
alternate_titles = movie.get('alternative_titles', {}).get('titles', [])
for alt in alternate_titles:
alt_name = alt.get('title')
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
return movie_data
@@ -166,12 +172,41 @@ class TheMovieDb(MovieProvider):
image_url = ''
try:
image_url = getattr(movie, type).geturl(size = size)
path = movie.get('%s_path' % type)
image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path)
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_url
def getMultImages(self, movie, type = 'backdrops', size = 'original'):
image_urls = []
try:
for image in movie.get('images', {}).get(type, [])[1:5]:
image_urls.append(self.getImage(image, 'file', size))
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_urls
def request(self, call = '', params = {}, return_key = None):
params = dict((k, v) for k, v in params.items() if v)
params = tryUrlencode(params)
try:
url = 'http://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.conf('api_key'), '&%s' % params if params else '')
data = self.getJsonData(url, show_error = False)
except:
log.debug('Movie not found: %s, %s', (call, params))
data = None
if data and return_key and return_key in data:
data = data.get(return_key)
return data
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')

Some files were not shown because too many files have changed in this diff Show More