Compare commits

..

178 Commits

Author SHA1 Message Date
Ruud
d233e4d22e Merge branch 'refs/heads/develop' into desktop 2013-01-26 13:54:56 +01:00
Ruud
a60e9dc4c3 Encode nzbname before sending it to NZBGet. fix #1321 2013-01-25 21:29:25 +01:00
Ruud
b168c1364d Blackhole error on manual download. fix #1351 2013-01-25 21:08:19 +01:00
Ruud
23893dbcb9 Merge branch 'refs/heads/develop' into desktop 2013-01-25 20:13:58 +01:00
Ruud
14fffda3ff Don't add signal handlers when daemonized. fix #1346 2013-01-25 15:26:06 +01:00
Ruud
51364a3c25 Transmission params not set properly. fix #1344 2013-01-25 13:12:10 +01:00
Ruud
c6642ffeb7 Allow 1080p in brrip releases. fixes #1339 2013-01-25 12:57:41 +01:00
Ruud
9fe9ccf0ad Open browser didn't work. 2013-01-24 23:51:11 +01:00
Ruud
cb92b00534 Manage setting instead of getting folders. fix #1307 2013-01-24 23:33:48 +01:00
Ruud
7d3780133f Missing download function. fix #1337 2013-01-24 23:02:36 +01:00
Ruud
f23b9d7cb9 Use host from config again. fix #1329 2013-01-24 22:56:33 +01:00
Ruud
506871b506 One up 2013-01-23 23:10:55 +01:00
Ruud
6115917660 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2013-01-23 22:57:07 +01:00
Ruud
44b78f8d2f Version up 2013-01-23 22:56:23 +01:00
Ruud
21df8819d3 Merge branch 'refs/heads/develop' into desktop 2013-01-23 22:55:09 +01:00
Ruud
cad9bfae9f Transmission: Don't use ratio when not filled in. 2013-01-23 22:54:02 +01:00
Ruud
749075b4cb Setting cleanup 2013-01-23 22:50:31 +01:00
Ruud
0456a1e820 Typo 2013-01-23 22:29:02 +01:00
ikkemaniac
35a9739ec5 Improve debugging for email notifications 2013-01-23 22:26:09 +01:00
ikkemaniac
2a451c255e Rename var for naming consistency 2013-01-23 22:26:09 +01:00
Ruud
7c38ad1c00 Remove non-int backup folders. closes #1298 2013-01-23 22:24:09 +01:00
Ruud
647159e549 Fix and cleanup wizard. fix #1324 2013-01-23 22:16:04 +01:00
Ruud
7cc55c21b6 Shutdown cleanly on quit process 2013-01-22 23:43:15 +01:00
Ruud
89c38f5aa4 Use host from config again. fix #1278 2013-01-22 23:24:09 +01:00
Ruud
5f428649c3 Writing larger files fails on Windows. fix #1281 2013-01-22 23:20:55 +01:00
Ruud
8ed2a99830 Don't try to parse None on IMDB watchlist 2013-01-22 22:55:06 +01:00
Ruud
1a89d551dc Contribute update 2013-01-22 22:41:17 +01:00
Ruud
9d633910f6 NZBsRus fixes 2013-01-22 22:35:44 +01:00
Ruud
54ea22e9b6 Only remove available status releases when refreshing 2013-01-22 22:02:23 +01:00
Ruud
fb3f3e11f6 Merge branch 'refs/heads/develop' into desktop 2013-01-22 21:40:40 +01:00
Ruud
f84b23eecc Only fire enabled downloader failed event 2013-01-22 21:35:11 +01:00
Ruud
6ea045ddd3 Remove failed wrong parameters 2013-01-21 22:37:58 +01:00
Ruud
f8b4e75b74 If data is empty, assume correct type for downloaders 2013-01-20 21:11:36 +01:00
Ruud
faaf351662 Group providers together 2013-01-20 20:50:50 +01:00
Ruud
f41fc794c1 Don't search full disk when no manage folders are filled. fix #1304 2013-01-19 01:30:56 +01:00
Ruud
0f789b5b40 NZBsRus rss different item path. fix #1301 2013-01-19 01:17:57 +01:00
Ruud
d2496d768d Don't reorder based on omdb 2013-01-19 01:04:24 +01:00
Ruud
b93488f025 Use default timeout for CP calls 2013-01-19 00:56:03 +01:00
Ruud
d4de68ef86 Add page nr after 2013-01-19 00:51:20 +01:00
Ruud
61a0bb8ec6 Don't use quality identifier in title searches 2013-01-19 00:45:08 +01:00
Ruud
fe52ac7203 Use default title as email subject 2013-01-16 19:45:39 +01:00
Cybertinus
4447b7611e Added the e-mail notifier 2013-01-15 21:15:17 +01:00
Ruud
178c8942c3 Merge branch 'refs/heads/develop' into desktop 2013-01-14 19:54:22 +01:00
Ruud
4fe9f9e42f Log subtitle search 2013-01-13 10:08:59 +01:00
Ruud
71b22345bc Make sure downloaders and providers match
Remove releases not found anymore on new searches
2013-01-12 16:07:11 +01:00
Ruud
a0dc5c075a Remove print 2013-01-12 16:05:54 +01:00
Ruud
a264c75f8c Remove releases that aren't found in latest search 2013-01-11 22:31:19 +01:00
Ruud
fcc8a71eae NZBget version check 2013-01-11 21:45:25 +01:00
Ruud
cdd681ad48 XBMC icon image 2013-01-11 21:02:22 +01:00
Ruud
36e5c49147 TorrentDay: decoding error. fix #1260 2013-01-11 20:50:38 +01:00
Ruud
300f4738a0 Randomize wanted search. fix #1261 2013-01-11 20:12:12 +01:00
Ruud
9447833653 RT cleanup 2013-01-10 07:42:15 +01:00
Ruud
df53d0c578 Rename RT folder 2013-01-10 07:29:48 +01:00
Kris Kater
17eaba3e2a Added rottentomatoes automation 2013-01-10 07:28:39 +01:00
Ruud
0f389f18cb NZBGet: Don't use priority when set to normal. 2013-01-09 23:41:47 +01:00
Prinz23
28ce083f48 nzbget priority support 2013-01-09 23:34:03 +01:00
Ruud
cfaffe2bcb SSL support 2013-01-09 23:29:54 +01:00
Ruud
432852cf5d Enable added combined list by default 2013-01-09 21:20:27 +01:00
Ruud
3c728608e9 FTDWorld use simple login
Add size
2013-01-09 20:42:33 +01:00
Ruud
8892ace3c2 OMGWTF proper link in settings 2013-01-09 20:26:02 +01:00
Ruud
87574a1810 Allow spotweb url without rewriterule. fix #1248 2013-01-08 20:28:59 +01:00
Ruud
14e0219e62 Urlencode spotweb id. fix #1213 2013-01-07 23:11:45 +01:00
Ruud
51e747049d One up 2013-01-07 23:10:42 +01:00
Ruud
0582f7d694 Urlencode spotweb id. fix #1213 2013-01-07 23:10:06 +01:00
Ruud
fa7cac7538 Merge branch 'refs/heads/develop' into desktop 2013-01-07 22:41:55 +01:00
Ruud
ec857a9b3d FTDWorld: Check for login success 2013-01-07 22:31:42 +01:00
Ruud
4d32b0b16d Use FTDWorld temp api. closes #1243 2013-01-07 22:21:44 +01:00
Ruud
ca08287cff Ignore Growl timeout. fixes #1240 2013-01-07 20:54:21 +01:00
Ruud
36fee69843 XBMC notifier for Frodo & Eden 2013-01-06 23:06:38 +01:00
ikkemaniac
c5cae5ab9b add XBMC v11 Eden notifications support
This is my approach on working with Eden, maybe a little late since Frodo is almost released, but better late then never.

- First detect the JSON-RPC version XBMC is running (once per boot of CouchPotatoServer on the first notification, except for sending test message then the JSON version is always checked).
- Set a variable indicating whether or not to use JSON (or normal http).
- If JSON should be used, proceed as before this commit.
- If normal-http should be used, use 'notifyXBMCnoJSON' func
- 'notifyXBMCnoJSON' just opens a specific XBMC api url, unfortunately importing urllib for this was necessary to escape the message strings.

TODO: support multiple XBMC hosts, right now the last host in the hosts array will set the 'useJSONnotifications' var.

Conflicts:

	couchpotato/core/notifications/xbmc/main.py

Conflicts:
	couchpotato/core/notifications/xbmc/main.py
2013-01-06 22:52:59 +01:00
ikkemaniac
9bd5688fb9 Remove services that are not required for couchpotato to run 2013-01-06 22:35:35 +01:00
ikkemaniac
1993c2b6cb Redo FreeBSD init script completely.
Use rc.subr functions and proper rc.conf variables.
2013-01-06 22:35:35 +01:00
ikkemaniac
acc8ed2092 Acutally use config_file variable 2013-01-06 22:35:35 +01:00
ikkemaniac
7b4924dd7a Don't influence the PATH variable in FreeBSD rc script
Don't prepend the PATH variable, it's ugly, unwanted and unnecessary. Call binaries with their full path.
2013-01-06 22:35:35 +01:00
ikkemaniac
3a2861f72a fix FreeBSD init script
-add actual start command
-fix verify_couchpotato_pid function, 'ps' command failed if PID var was empty
-fix verify_couchpotato_pid usage, acutally use the return of verify_couchpotato_pid in the 'stop' routine
2013-01-06 22:35:35 +01:00
Ruud
4779265b43 Change xbmc description 2013-01-06 11:52:05 +01:00
ikkemaniac
f8a46ebe6d clearly state XBMC version dependency for notifications 2013-01-06 11:33:53 +01:00
ikkemaniac
383ec7e6f5 check for XBMC JSON-RPC version and improve logging info 2013-01-06 11:33:53 +01:00
Ruud
dd9118292d Newznab log error 2013-01-06 11:20:13 +01:00
Ruud
4d0f8eb4ac Default add top25 to itunes automation 2013-01-04 20:26:36 +01:00
Ruud
637b21cc68 iTunes automation cleanup 2013-01-04 20:20:52 +01:00
Joseph Gardner
da429f0cb8 Adding itunes automation provider 2013-01-04 20:15:30 +01:00
Ruud
41c2845328 Toasty cleanup 2013-01-04 20:14:25 +01:00
Travis La Marr
c2453bb070 Added Windows Phone SuperToasty Notifier 2013-01-04 19:05:58 +01:00
Ruud
a3a2c8da8e Typo 2013-01-04 19:03:36 +01:00
Ruud
a1d4bab793 NZBVortex: Delete failed option 2013-01-02 14:11:40 +01:00
Ruud
d314a9b5b3 Also check status on manual 2013-01-02 14:11:11 +01:00
Ruud
9a60f6001a Check snatched on startup 2013-01-02 14:10:41 +01:00
Ruud
96a39dbf60 Link to downloaders 2013-01-02 13:52:59 +01:00
Ruud
015675750c Properly use imdb_results 2013-01-02 13:43:24 +01:00
Ruud
bf4dc62f54 NZBVortex support. closes #1204 2013-01-02 13:31:14 +01:00
Ruud
c2382ade05 Use provider for downloader 2013-01-02 13:29:44 +01:00
Ruud
2f65545086 Extend opener with multipart 2013-01-02 13:29:28 +01:00
Ruud
3aea2cd968 Simpler CP tag regex 2013-01-02 13:29:08 +01:00
Ruud
f30cb9185c Add nzbgeek to the defaults of newznab 2013-01-02 10:40:08 +01:00
Ruud
615468e8e6 Make nzbget password a password field. closes #1205 2012-12-31 21:31:11 +01:00
Ruud
0cbee01024 Don't use unicode when not needed in urlopen 2012-12-31 13:10:11 +01:00
Ruud
c29cb39797 Automation cleanup 2012-12-30 21:43:13 +01:00
Kris Kater
580ff38136 Added moviemeter.nl automation 2012-12-30 20:51:47 +01:00
Sander Boele
6b8bca5491 added path to the freebsd init script 2012-12-30 20:51:24 +01:00
Ruud
e92b5d95ca IOLoop cleanup 2012-12-30 18:40:38 +01:00
Ruud
611a32d110 Add randomstring to each internal api 2012-12-30 18:39:16 +01:00
Ruud
74e4b015a9 Module update: Tornade 2012-12-30 18:38:52 +01:00
Ruud
1e0267cdb5 Change OMGWTF to .org 2012-12-30 11:21:23 +01:00
Ruud
041a206fb4 Rename to OMDBapi 2012-12-29 23:45:21 +01:00
Ruud
12a4d6a995 Send proper user-agent with nzbx.co 2012-12-29 21:23:15 +01:00
Ruud
b14a6c1e63 nzbx description 2012-12-29 20:22:42 +01:00
spion06
7fa08ef9b6 Update init/freebsd to not use perl
When using couchpotato on slimmed down versions of freebsd (freenas for example) sometimes perl is not available. Since the previous parsing of the INI required a "key = value" format it is pretty simple to use awk for this.
2012-12-29 19:24:12 +01:00
Ruud
9a314cfbc4 One up 2012-12-29 00:03:45 +01:00
Ruud
5941d0bf77 Add version to update url 2012-12-29 00:03:36 +01:00
Ruud
d326c1c25c Merge branch 'refs/heads/master' into desktop
Conflicts:
	version.py
2012-12-28 23:31:08 +01:00
Ruud
7e6234298d Merge branch 'refs/heads/develop' 2012-12-28 23:25:40 +01:00
Ruud
5cf4b8b4d3 Binsearch provider 2012-12-28 23:01:35 +01:00
Ruud
6e56072250 Don't migrate when in development 2012-12-28 17:44:02 +01:00
Ruud
917c5552a4 Simplified providers 2012-12-27 19:53:12 +01:00
Ruud
73c5b90232 TorrentDay support. closes #1161 2012-12-25 18:38:37 +01:00
Ruud
fd53ba0637 SceneAccess: Don't add quality to query 2012-12-25 17:43:47 +01:00
Ruud
0ef3906b3d Cleanup 2012-12-25 17:15:09 +01:00
Ruud
5ab0d7a97b Cleanup torrent providers 2012-12-25 14:56:27 +01:00
Ruud
dbbbbb2f84 Module update: dateutil 2012-12-25 11:38:49 +01:00
Ruud
1bfe948a45 Newznab didn't return results 2012-12-24 19:21:43 +01:00
Ruud
0d2dcff7f0 NZB Provider cleanup 2012-12-23 02:48:36 +01:00
Ruud
d4da206f93 Merge branch 'refs/heads/develop' 2012-12-22 16:33:47 +01:00
Ruud
439cda8b63 Newznab age wrong. fix #1171 2012-12-22 16:33:28 +01:00
Ruud
bbe8362b08 Show updating screen instantly. closes #1167 2012-12-21 23:54:41 +01:00
Ruud
985a168724 Merge branch 'refs/heads/develop' 2012-12-21 23:18:00 +01:00
Ruud
5e6aea97f7 Score providers 2012-12-21 23:17:50 +01:00
Ruud
6c7c4c7aba Use same api call for all qualities. closes #1164 2012-12-21 23:17:42 +01:00
Ruud
e2f59f5ff4 Merge branch 'develop' of github.com:RuudBurger/CouchPotatoServer into develop 2012-12-21 22:15:18 +01:00
Ruud
b225980ce7 Use pubDate and enclosure length for newznab 2012-12-21 22:14:37 +01:00
Ruud
b8e86b378f NZBx provider 2012-12-20 15:45:04 +01:00
Ruud
031a186d71 NZBx fixes 2012-12-20 15:19:40 +01:00
Ruud
3c04eed218 Added nzbx option 2012-12-20 15:18:46 +01:00
Ruud
17e01689d9 Remove torrage.ws. fix #1157 2012-12-19 16:13:40 +01:00
Ruud
173c6194ed Merge branch 'refs/heads/develop' 2012-12-19 11:12:26 +01:00
Ruud
95c2e992b0 Use trailer naming from settings. closes #936 2012-12-19 11:11:12 +01:00
Ruud
4bffb299af Catch urlerrors. closes #1154 2012-12-19 08:01:35 +01:00
Ruud
a2c4119508 Change PublicHD to .se TLD 2012-12-18 23:36:13 +01:00
Ruud
4e9472f8ee Encode path properly before using it in walk. close #978 2012-12-18 23:18:54 +01:00
Ruud
f7911fe9f3 Remove release on new scan 2012-12-18 14:14:06 +01:00
Ruud
8ffa6a8392 Quality by id 2012-12-18 13:54:13 +01:00
Ruud
382d49f895 Delete release if it has no files 2012-12-17 22:41:56 +01:00
Ruud
570b79a67e Use height with margin to check quality. fix #582 2012-12-17 22:27:07 +01:00
Ruud
e7aafc406f Check if identifier exists before adding release. fix #1048 2012-12-17 21:10:30 +01:00
Ruud
2dcc1e096e Make path safe first 2012-12-17 21:10:04 +01:00
Ruud
9f0746a668 Encoding issues. fix #974 2012-12-17 20:50:58 +01:00
Ruud
d9c437bd7f Fix some torrentleech stuff. closes #1149 2012-12-17 19:55:27 +01:00
Ruud
7079647f87 Also try and find movie name between [] 2012-12-17 18:49:37 +01:00
Ruud
65570ba479 Improve name searching. closes #1137 2012-12-17 18:22:12 +01:00
Ruud
a57ba9026d Year match only 1900-2099 2012-12-17 18:21:15 +01:00
Ruud
63246256ee Don't remove stuff from python cache 2012-12-17 17:10:53 +01:00
Ruud
1ac0dc3bbf Don't show Environment vars when developing 2012-12-17 16:40:15 +01:00
Ruud
bcd23ad10c Merge branch 'refs/heads/develop' 2012-12-17 15:13:00 +01:00
Ruud
342d31b48a Remove ignored words which are part of title. close #1123 2012-12-17 14:08:44 +01:00
Ruud
ea7904ed9a Typo on seeders check. fix #1142 2012-12-17 13:56:11 +01:00
Ruud
ca37c2f018 Merge branch 'develop-renamer' of https://github.com/clinton-hall/CouchPotatoServer into develop 2012-12-17 13:18:23 +01:00
Ruud
5aa2146614 OMGWTFNZBs support. closes #1130
ZOMG BBQ SAUCAGES NOMNOMNOM
2012-12-17 13:07:01 +01:00
Ruud
0fd49a2c67 FTDWorld returned wrong backup category 2012-12-17 13:03:33 +01:00
Ruud
b680d84cba Don't use handler when in desktop build 2012-12-17 12:00:42 +01:00
Ruud
898e6f487d Merge branch 'refs/heads/develop' 2012-12-16 23:52:06 +01:00
clinton-hall
bb7b4cbbed Added try: except for two common errors
Does not fix the errors, but prevents the renamer being stuck as "in progress"
Allows next instance to run.
2012-12-13 19:45:13 -08:00
Ruud
6618c3927c Merge branch 'refs/heads/develop' 2012-12-11 23:15:06 +01:00
Ruud
4b58b40226 Merge branch 'refs/heads/develop' 2012-12-01 11:48:54 +01:00
Ruud
3ecc826629 Merge branch 'refs/heads/develop'
Conflicts:
	version.py
2012-11-11 22:06:48 +01:00
Ruud
32fe3796e4 Merge branch 'refs/heads/develop' 2012-10-26 22:22:47 +02:00
Ruud
359d1aaafa Merge branch 'refs/heads/develop' 2012-10-26 14:54:12 +02:00
Ruud
fb5d336351 Merge branch 'refs/heads/develop' 2012-10-26 14:36:04 +02:00
Ruud
eb30dff986 Merge branch 'refs/heads/develop' 2012-10-13 00:00:44 +02:00
Ruud
9312336962 Merge branch 'refs/heads/develop' 2012-09-24 09:36:59 +02:00
Ruud
ade4338ea6 Merge branch 'refs/heads/develop' 2012-09-16 21:32:16 +02:00
Ruud
55b20324c0 Merge branch 'refs/heads/develop' 2012-09-16 12:36:48 +02:00
Ruud
c0fb28301d Merge branch 'refs/heads/develop'
Conflicts:
	version.py
2012-09-16 10:46:39 +02:00
Ruud
f9c2503f81 Merge branch 'refs/heads/develop' 2012-09-14 13:15:35 +02:00
Ruud
5b4cdf05b1 Merge branch 'refs/heads/develop' 2012-09-14 13:06:56 +02:00
Ruud
6f25a6bdfd Merge branch 'refs/heads/develop' 2012-09-03 10:32:09 +02:00
Ruud
23427e95f7 Merge branch 'refs/heads/develop' 2012-08-26 23:09:51 +02:00
Ruud
90a09e573b Merge branch 'refs/heads/develop'
Conflicts:
	couchpotato/core/_base/updater/main.py
2012-08-05 16:15:53 +02:00
Ruud
e1d7440b9d Wrong branch in master 2012-07-15 00:23:44 +02:00
156 changed files with 3321 additions and 1617 deletions

View File

@@ -62,7 +62,6 @@ class Loader(object):
self.log.logger.addHandler(hdlr)
def addSignals(self):
signal.signal(signal.SIGINT, self.onExit)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
@@ -74,7 +73,7 @@ class Loader(object):
def onExit(self, signal, frame):
from couchpotato.core.event import fireEvent
fireEvent('app.crappy_shutdown', single = True)
fireEvent('app.shutdown', single = True)
def run(self):

View File

@@ -1,5 +1,6 @@
from esky.util import appdir_from_executable #@UnresolvedImport
from threading import Thread
from version import VERSION
from wx.lib.softwareupdate import SoftwareUpdate
import os
import sys
@@ -165,7 +166,7 @@ class CouchPotatoApp(wx.App, SoftwareUpdate):
def OnInit(self):
# Updater
base_url = 'http://couchpota.to/updates/'
base_url = 'http://couchpota.to/updates/%s/' % VERSION
self.InitUpdates(base_url, base_url + 'changelog.html',
icon = wx.Icon('icon.png'))

View File

@@ -5,9 +5,10 @@
* Search through the existing (and closed) issues first. See if you can get your answer there.
* Double check the result manually, because it could be an external issue.
* Post logs! Without seeing what is going on, I can't reproduce the error.
* What are you settings for the specific problem
* What providers are you using. (While your logs include these, scanning through hundred of lines of log isn't my hobby)
* Give me a short step by step of how to reproduce
* What is the movie + quality you are searching for.
* What are you settings for the specific problem.
* What providers are you using. (While your logs include these, scanning through hundred of lines of log isn't my hobby).
* Give me a short step by step of how to reproduce.
* What hardware / OS are you using and what are the limits? NAS can be slow and maybe have a different python installed then when you use CP on OSX or Windows for example.
* I will mark issues with the "can't reproduce" tag. Don't go asking me "why closed" if it clearly says the issue in the tag ;)

View File

@@ -1,6 +1,5 @@
from flask.blueprints import Blueprint
from flask.helpers import url_for
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, asynchronous
from werkzeug.utils import redirect
@@ -11,7 +10,11 @@ api_nonblock = {}
class NonBlockHandler(RequestHandler):
stoppers = []
def __init__(self, application, request, **kwargs):
cls = NonBlockHandler
cls.stoppers = []
super(NonBlockHandler, self).__init__(application, request, **kwargs)
@asynchronous
def get(self, route):

View File

@@ -23,20 +23,22 @@ config = [{
'default': '',
'type': 'password',
},
{
'name': 'host',
'advanced': True,
'default': '0.0.0.0',
'hidden': True,
'label': 'IP',
'description': 'Host that I should listen to. "0.0.0.0" listens to all ips.',
},
{
'name': 'port',
'default': 5050,
'type': 'int',
'description': 'The port I should listen to.',
},
{
'name': 'ssl_cert',
'description': 'Path to SSL server.crt',
'advanced': True,
},
{
'name': 'ssl_key',
'description': 'Path to SSL server.key',
'advanced': True,
},
{
'name': 'launch_browser',
'default': True,

View File

@@ -53,7 +53,8 @@ class Core(Plugin):
addEvent('setting.save.core.api_key', self.checkApikey)
# Make sure we can close-down with ctrl+c properly
self.signalHandler()
if not Env.get('desktop'):
self.signalHandler()
def md5Password(self, value):
return md5(value.encode(Env.get('encoding'))) if value else ''
@@ -151,7 +152,7 @@ class Core(Plugin):
def createBaseUrl(self):
host = Env.setting('host')
if host == '0.0.0.0':
if host == '0.0.0.0' or host == '':
host = 'localhost'
port = Env.setting('port')
@@ -175,8 +176,10 @@ class Core(Plugin):
})
def signalHandler(self):
if Env.get('daemonized'): return
def signal_handler(signal, frame):
fireEvent('app.do_shutdown')
fireEvent('app.shutdown')
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)

View File

@@ -2,7 +2,6 @@ from apscheduler.scheduler import Scheduler as Sched
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
import logging
log = CPLog(__name__)

View File

@@ -90,17 +90,18 @@ var UpdaterBase = new Class({
doUpdate: function(){
var self = this;
App.blockPage('Please wait while CouchPotato is being updated with more awesome stuff.', 'Updating');
Api.request('updater.update', {
'onComplete': function(json){
if(json.success){
if(json.success)
self.updating();
}
else
App.unBlockPage()
}
});
},
updating: function(){
App.blockPage('Please wait while CouchPotato is being updated with more awesome stuff.', 'Updating');
App.checkAvailable.delay(500, App, [1000, function(){
window.location.reload();
}]);

View File

@@ -0,0 +1,13 @@
config = {
'name': 'download_providers',
'groups': [
{
'label': 'Downloaders',
'description': 'You can select different downloaders for each type (usenet / torrent)',
'type': 'list',
'name': 'download_providers',
'tab': 'downloaders',
'options': [],
},
],
}

View File

@@ -1,20 +1,20 @@
from base64 import b32decode, b16encode
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.providers.base import Provider
import random
import re
log = CPLog(__name__)
class Downloader(Plugin):
class Downloader(Provider):
type = []
http_time_between_calls = 0
torrent_sources = [
'http://torrage.com/torrent/%s.torrent',
'http://torrage.ws/torrent/%s.torrent',
'http://torcache.net/torrent/%s.torrent',
]
@@ -33,18 +33,44 @@ class Downloader(Plugin):
]
def __init__(self):
addEvent('download', self.download)
addEvent('download.status', self.getAllDownloadStatus)
addEvent('download.remove_failed', self.removeFailed)
addEvent('download', self._download)
addEvent('download.enabled', self._isEnabled)
addEvent('download.enabled_types', self.getEnabledDownloadType)
addEvent('download.status', self._getAllDownloadStatus)
addEvent('download.remove_failed', self._removeFailed)
def download(self, data = {}, movie = {}, manual = False, filedata = None):
pass
def getEnabledDownloadType(self):
for download_type in self.type:
if self.isEnabled(manual = True, data = {'type': download_type}):
return self.type
return []
def _download(self, data = {}, movie = {}, manual = False, filedata = None):
if self.isDisabled(manual, data):
return
return self.download(data = data, movie = movie, filedata = filedata)
def _getAllDownloadStatus(self):
if self.isDisabled(manual = True, data = {}):
return
return self.getAllDownloadStatus()
def getAllDownloadStatus(self):
return
def _removeFailed(self, item):
if self.isDisabled(manual = True, data = {}):
return
if self.conf('delete_failed', default = True):
return self.removeFailed(item)
return False
def removeFailed(self, name = {}, nzo_id = {}):
return False
def removeFailed(self, item):
return
def isCorrectType(self, item_type):
is_correct = item_type in self.type
@@ -77,9 +103,16 @@ class Downloader(Plugin):
log.error('Failed converting magnet url to torrent: %s', (torrent_hash))
return False
def isDisabled(self, manual):
return not self.isEnabled(manual)
def isDisabled(self, manual, data):
return not self.isEnabled(manual, data)
def isEnabled(self, manual):
def _isEnabled(self, manual, data = {}):
if not self.isEnabled(manual, data):
return
return True
def isEnabled(self, manual, data = {}):
d_manual = self.conf('manual', default = False)
return super(Downloader, self).isEnabled() and ((d_manual and manual) or (d_manual is False))
return super(Downloader, self).isEnabled() and \
((d_manual and manual) or (d_manual is False)) and \
(not data or self.isCorrectType(data.get('type')))

View File

@@ -10,6 +10,7 @@ config = [{
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'blackhole',
'label': 'Black hole',
'description': 'Download the NZB/Torrent to a specific folder.',

View File

@@ -10,11 +10,7 @@ class Blackhole(Downloader):
type = ['nzb', 'torrent', 'torrent_magnet']
def download(self, data = {}, movie = {}, manual = False, filedata = None):
if self.isDisabled(manual) or \
(not self.isCorrectType(data.get('type')) or \
(not self.conf('use_for') in ['both', 'torrent' if 'torrent' in data.get('type') else data.get('type')])):
return
def download(self, data = {}, movie = {}, filedata = None):
directory = self.conf('directory')
if not directory or not os.path.isdir(directory):
@@ -52,4 +48,23 @@ class Blackhole(Downloader):
except:
log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
return False
return False
def getEnabledDownloadType(self):
if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledDownloadType()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual, data = {}):
for_type = ['both']
if data and 'torrent' in data.get('type'):
for_type.append('torrent')
elif data:
for_type.append(data.get('type'))
return super(Blackhole, self).isEnabled(manual, data) and \
((self.conf('use_for') in for_type))

View File

@@ -8,9 +8,10 @@ config = [{
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'nzbget',
'label': 'NZBGet',
'description': 'Send NZBs to your NZBGet installation.',
'description': 'Use <a href="http://nzbget.sourceforge.net/Main_Page" target="_blank">NZBGet</a> to download NZBs.',
'options': [
{
'name': 'enabled',
@@ -25,6 +26,7 @@ config = [{
},
{
'name': 'password',
'type': 'password',
'description': 'Default NZBGet password is <i>tegbzn6789</i>',
},
{
@@ -32,6 +34,13 @@ config = [{
'default': 'Movies',
'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>',
},
{
'name': 'priority',
'default': '0',
'type': 'dropdown',
'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100)],
'description': 'Only change this if you are using NZBget 9.0 or higher',
},
{
'name': 'manual',
'default': 0,

View File

@@ -1,7 +1,9 @@
from base64 import standard_b64encode
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from inspect import isfunction
import re
import socket
import traceback
import xmlrpclib
@@ -14,10 +16,7 @@ class NZBGet(Downloader):
url = 'http://nzbget:%(password)s@%(host)s/xmlrpc'
def download(self, data = {}, movie = {}, manual = False, filedata = None):
if self.isDisabled(manual) or not self.isCorrectType(data.get('type')):
return
def download(self, data = {}, movie = {}, filedata = None):
if not filedata:
log.error('Unable to get NZB file: %s', traceback.format_exc())
@@ -26,7 +25,7 @@ class NZBGet(Downloader):
log.info('Sending "%s" to NZBGet.', data.get('name'))
url = self.url % {'host': self.conf('host'), 'password': self.conf('password')}
nzb_name = '%s.nzb' % self.createNzbName(data, movie)
nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))
rpc = xmlrpclib.ServerProxy(url)
try:
@@ -44,7 +43,12 @@ class NZBGet(Downloader):
log.error('Protocol Error: %s', e)
return False
if rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip())):
if re.search(r"^0", rpc.version()):
xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip()))
else:
xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip()))
if xml_response:
log.info('NZB sent successfully to NZBGet')
return True
else:

View File

@@ -0,0 +1,47 @@
from .main import NZBVortex
def start():
return NZBVortex()
config = [{
'name': 'nzbvortex',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'nzbvortex',
'label': 'NZBVortex',
'description': 'Use <a href="http://www.nzbvortex.com/landing/" target="_blank">NZBVortex</a> to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'nzb',
},
{
'name': 'host',
'default': 'https://localhost:4321',
},
{
'name': 'api_key',
'label': 'Api Key',
},
{
'name': 'manual',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]

View File

@@ -0,0 +1,170 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from urllib2 import URLError
from uuid import uuid4
import hashlib
import httplib
import json
import socket
import ssl
import sys
import traceback
import urllib2
log = CPLog(__name__)
class NZBVortex(Downloader):
type = ['nzb']
api_level = None
session_id = None
def download(self, data = {}, movie = {}, filedata = None):
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, movie)
self.call('nzb/add', params = {'file': (ss(nzb_filename), filedata)}, multipart = True)
return True
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self):
raw_statuses = self.call('nzb')
statuses = []
for item in raw_statuses.get('nzbs', []):
# Check status
status = 'busy'
if item['state'] == 20:
status = 'completed'
elif item['state'] in [21, 22, 24]:
status = 'failed'
statuses.append({
'id': item['id'],
'name': item['uiTitle'],
'status': status,
'original_status': item['state'],
'timeleft':-1,
})
return statuses
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
try:
self.call('nzb/%s/cancel' % item['id'])
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def login(self):
nonce = self.call('auth/nonce', auth = False).get('authNonce')
cnonce = uuid4().hex
hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest())
params = {
'nonce': nonce,
'cnonce': cnonce,
'hash': hashed
}
login_data = self.call('auth/login', parameters = params, auth = False)
# Save for later
if login_data.get('loginResult') == 'successful':
self.session_id = login_data.get('sessionID')
return True
log.error('Login failed, please check you api-key')
return False
def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs):
# Login first
if not self.session_id and auth:
self.login()
# Always add session id to request
if self.session_id:
parameters['sessionid'] = self.session_id
params = tryUrlencode(parameters)
url = cleanHost(self.conf('host')) + 'api/' + call
url_opener = urllib2.build_opener(HTTPSHandler())
try:
data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs)
if data:
return json.loads(data)
except URLError, e:
if hasattr(e, 'code') and e.code == 403:
# Try login and do again
if not repeat:
self.login()
return self.call(call, parameters = parameters, repeat = True, *args, **kwargs)
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return {}
def getApiLevel(self):
if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
url_opener = urllib2.build_opener(HTTPSHandler())
try:
data = self.urlopen(url, opener = url_opener, show_error = False)
self.api_level = float(json.loads(data).get('apilevel'))
except URLError, e:
if hasattr(e, 'code') and e.code == 403:
log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
else:
log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
return self.api_level
def isEnabled(self, manual, data):
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
class HTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if sys.version_info < (2, 6, 7):
if hasattr(self, '_tunnel_host'):
self.sock = sock
self._tunnel()
else:
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1)
class HTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnection, req)

View File

@@ -9,9 +9,10 @@ config = [{
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'pneumatic',
'label': 'Pneumatic',
'description': 'Download the .strm file to a specific folder.',
'description': 'Use <a href="http://forum.xbmc.org/showthread.php?tid=97657" target="_blank">Pneumatic</a> to download .strm files.',
'options': [
{
'name': 'enabled',

View File

@@ -11,9 +11,7 @@ class Pneumatic(Downloader):
type = ['nzb']
strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
def download(self, data = {}, movie = {}, manual = False, filedata = None):
if self.isDisabled(manual) or (not self.isCorrectType(data.get('type'))):
return
def download(self, data = {}, movie = {}, filedata = None):
directory = self.conf('directory')
if not directory or not os.path.isdir(directory):

View File

@@ -8,9 +8,10 @@ config = [{
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'sabnzbd',
'label': 'Sabnzbd',
'description': 'Send NZBs to your Sabnzbd installation.',
'description': 'Use <a href="http://sabnzbd.org/" target="_blank">SABnzbd</a> to download NZBs.',
'wizard': True,
'options': [
{

View File

@@ -1,5 +1,5 @@
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from urllib2 import URLError
@@ -12,10 +12,7 @@ class Sabnzbd(Downloader):
type = ['nzb']
def download(self, data = {}, movie = {}, manual = False, filedata = None):
if self.isDisabled(manual) or not self.isCorrectType(data.get('type')):
return
def download(self, data = {}, movie = {}, filedata = None):
log.info('Sending "%s" to SABnzbd.', data.get('name'))
@@ -41,7 +38,7 @@ class Sabnzbd(Downloader):
try:
if params.get('mode') is 'addfile':
sab = self.urlopen(url, timeout = 60, params = {'nzbfile': (nzb_filename, filedata)}, multipart = True, show_error = False)
sab = self.urlopen(url, timeout = 60, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True, show_error = False)
else:
sab = self.urlopen(url, timeout = 60, show_error = False)
except URLError:
@@ -65,8 +62,6 @@ class Sabnzbd(Downloader):
return False
def getAllDownloadStatus(self):
if self.isDisabled(manual = False):
return False
log.debug('Checking SABnzbd download status.')
@@ -122,9 +117,6 @@ class Sabnzbd(Downloader):
def removeFailed(self, item):
if not self.conf('delete_failed', default = True):
return False
log.info('%s failed downloading, deleting...', item['name'])
try:

View File

@@ -8,9 +8,10 @@ config = [{
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'synology',
'label': 'Synology',
'description': 'Send torrents to Synology\'s Download Station.',
'description': 'Use <a href="http://www.synology.com/dsm/home_home_applications_download_station.php" target="_blank">Synology Download Station</a> to download.',
'wizard': True,
'options': [
{

View File

@@ -14,10 +14,7 @@ class Synology(Downloader):
type = ['torrent_magnet']
log = CPLog(__name__)
def download(self, data, movie, manual = False, filedata = None):
if self.isDisabled(manual) or not self.isCorrectType(data.get('type')):
return
def download(self, data, movie, filedata = None):
log.error('Sending "%s" (%s) to Synology.', (data.get('name'), data.get('type')))

View File

@@ -8,9 +8,10 @@ config = [{
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'transmission',
'label': 'Transmission',
'description': 'Send torrents to Transmission.',
'description': 'Use <a href="http://www.transmissionbt.com/" target="_blank">Transmission</a> to download torrents.',
'wizard': True,
'options': [
{

View File

@@ -16,10 +16,7 @@ class Transmission(Downloader):
type = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
def download(self, data, movie, manual = False, filedata = None):
if self.isDisabled(manual) or not self.isCorrectType(data.get('type')):
return
def download(self, data, movie, filedata = None):
log.debug('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type')))
@@ -41,10 +38,12 @@ class Transmission(Downloader):
'download-dir': folder_path
}
torrent_params = {
'seedRatioLimit': self.conf('ratio'),
'seedRatioMode': (0 if self.conf('ratio') else 1)
}
torrent_params = {}
if self.conf('ratio'):
torrent_params = {
'seedRatioLimit': self.conf('ratio'),
'seedRatioMode': self.conf('ratio')
}
if not filedata and data.get('type') == 'torrent':
log.error('Failed sending torrent, no data')
@@ -60,7 +59,8 @@ class Transmission(Downloader):
remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params)
# Change settings of added torrents
trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
if torrent_params:
trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
return True
except Exception, err:

View File

@@ -8,9 +8,10 @@ config = [{
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'utorrent',
'label': 'uTorrent',
'description': 'Send torrents to uTorrent.',
'description': 'Use <a href="http://www.utorrent.com/" target="_blank">uTorrent</a> to download torrents.',
'wizard': True,
'options': [
{

View File

@@ -1,6 +1,6 @@
from bencode import bencode, bdecode
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.helpers.encoding import isInt, ss
from couchpotato.core.logger import CPLog
from hashlib import sha1
from multipartpost import MultipartPostHandler
@@ -20,10 +20,7 @@ class uTorrent(Downloader):
type = ['torrent', 'torrent_magnet']
utorrent_api = None
def download(self, data, movie, manual = False, filedata = None):
if self.isDisabled(manual) or not self.isCorrectType(data.get('type')):
return
def download(self, data, movie, filedata = None):
log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('type')))
@@ -125,7 +122,7 @@ class uTorrentAPI(object):
def add_torrent_file(self, filename, filedata):
action = "action=add-file"
return self._request(action, {"torrent_file": (filename, filedata)})
return self._request(action, {"torrent_file": (ss(filename), filedata)})
def set_torrent(self, hash, params):
action = "action=setprops&hash=%s" % hash

View File

@@ -12,7 +12,7 @@ def runHandler(name, handler, *args, **kwargs):
return handler(*args, **kwargs)
except:
from couchpotato.environment import Env
log.error('Error in event "%s", that wasn\'t caught: %s%s', (name, traceback.format_exc(), Env.all()))
log.error('Error in event "%s", that wasn\'t caught: %s%s', (name, traceback.format_exc(), Env.all() if not Env.get('dev') else ''))
def addEvent(name, handler, priority = 100):
@@ -105,17 +105,18 @@ def fireEvent(name, *args, **kwargs):
# Merge
if options['merge'] and len(results) > 0:
# Dict
if type(results[0]) == dict:
if isinstance(results[0], dict):
merged = {}
for result in results:
merged = mergeDicts(merged, result)
results = merged
# Lists
elif type(results[0]) == list:
elif isinstance(results[0], list):
merged = []
for result in results:
merged += result
if result not in merged:
merged += result
results = merged

View File

@@ -68,7 +68,7 @@ def tryUrlencode(s):
return new[1:]
else:
for letter in toUnicode(s):
for letter in ss(s):
try:
new += quote_plus(letter)
except:

View File

@@ -1,3 +1,4 @@
from couchpotato.core.helpers.encoding import simplifyString, toSafeString
from couchpotato.core.logger import CPLog
import hashlib
import os.path
@@ -153,6 +154,16 @@ def getTitle(library_dict):
log.error('Could not get title for library item: %s', library_dict)
return None
def possibleTitles(raw_title):
titles = []
titles.append(toSafeString(raw_title).lower())
titles.append(raw_title.lower())
titles.append(simplifyString(raw_title))
return list(set(titles))
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))

View File

@@ -67,6 +67,18 @@ class Loader(object):
def addFromDir(self, plugin_type, priority, module, dir_name):
# Load dir module
try:
m = __import__(module)
splitted = module.split('.')
for sub in splitted[1:]:
m = getattr(m, sub)
if hasattr(m, 'config'):
fireEvent('settings.options', splitted[-1] + '_config', getattr(m, 'config'))
except:
raise
for cur_file in glob.glob(os.path.join(dir_name, '*')):
name = os.path.basename(cur_file)
if os.path.isdir(os.path.join(dir_name, name)):

View File

@@ -0,0 +1,13 @@
config = {
'name': 'notification_providers',
'groups': [
{
'label': 'Notifications',
'description': 'Notify when movies are done or snatched',
'type': 'list',
'name': 'notification_providers',
'tab': 'notifications',
'options': [],
},
],
}

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'boxcar',
'options': [
{

View File

@@ -0,0 +1,56 @@
from .main import Email
def start():
return Email()
config = [{
'name': 'email',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'email',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'from',
'label': 'Send e-mail from',
},
{
'name': 'to',
'label': 'Send e-mail to',
},
{
'name': 'smtp_server',
'label': 'SMTP server',
},
{
'name': 'ssl',
'label': 'Enable SSL',
'default': 0,
'type': 'bool',
},
{
'name': 'smtp_user',
'label': 'SMTP user',
},
{
'name': 'smtp_pass',
'label': 'SMTP password',
'type': 'password',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

View File

@@ -0,0 +1,54 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from email.mime.text import MIMEText
import smtplib
import traceback
log = CPLog(__name__)
class Email(Notification):
def notify(self, message = '', data = {}, listener = None):
if self.isDisabled(): return
# Extract all the settings from settings
from_address = self.conf('from')
to_address = self.conf('to')
ssl = self.conf('ssl')
smtp_server = self.conf('smtp_server')
smtp_user = self.conf('smtp_user')
smtp_pass = self.conf('smtp_pass')
# Make the basic message
message = MIMEText(toUnicode(message))
message['Subject'] = self.default_title
message['From'] = from_address
message['To'] = to_address
try:
# Open the SMTP connection, via SSL if requested
log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled"))
mailserver = smtplib.SMTP_SSL(smtp_server) if ssl == 1 else smtplib.SMTP(smtp_server)
# Check too see if an login attempt should be attempted
if len(smtp_user) > 0:
log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else ""))
mailserver.login(smtp_user, smtp_pass)
# Send the e-mail
log.debug("Sending the email")
mailserver.sendmail(from_address, to_address, message.as_string())
# Close the SMTP connection
mailserver.quit()
log.info('Email notification sent')
return True
except:
log.error('E-mail failed: %s', traceback.format_exc())
return False
return False

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'growl',
'description': 'Version 1.4+',
'options': [

View File

@@ -37,8 +37,11 @@ class Growl(Notification):
)
self.growl.register()
self.registered = True
except:
log.error('Failed register of growl: %s', traceback.format_exc())
except Exception, e:
if 'timed out' in str(e):
self.registered = True
else:
log.error('Failed register of growl: %s', traceback.format_exc())
def notify(self, message = '', data = {}, listener = None):
if self.isDisabled(): return

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'nmj',
'label': 'NMJ',
'options': [

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifo',
'description': 'Keep in mind that Notifo service will end soon.',
'options': [

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifymyandroid',
'label': 'Notify My Android',
'options': [

View File

@@ -8,8 +8,9 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifymywp',
'label': 'Notify My Windows Phone',
'label': 'Windows Phone',
'options': [
{
'name': 'enabled',

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'plex',
'options': [
{

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'prowl',
'options': [
{

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'pushover',
'options': [
{

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'synoindex',
'description': 'Automaticly adds index to Synology Media Server.',
'options': [

View File

@@ -0,0 +1,33 @@
from .main import Toasty
def start():
return Toasty()
config = [{
'name': 'toasty',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'toasty',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'api_key',
'label': 'Device ID',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

View File

@@ -0,0 +1,30 @@
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import traceback
log = CPLog(__name__)
class Toasty(Notification):
urls = {
'api': 'http://api.supertoasty.com/notify/%s?%s'
}
def notify(self, message = '', data = {}, listener = None):
if self.isDisabled(): return
data = {
'title': self.default_title,
'text': toUnicode(message),
'sender': toUnicode("CouchPotato"),
'image': 'https://raw.github.com/RuudBurger/CouchPotatoServer/master/couchpotato/static/images/homescreen.png',
}
try:
self.urlopen(self.urls['api'] % (self.conf('api_key'), tryUrlencode(data)), show_error = False)
return True
except:
log.error('Toasty failed: %s', traceback.format_exc())
return False

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'twitter',
'options': [
{

View File

@@ -8,8 +8,10 @@ config = [{
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'xbmc',
'label': 'XBMC',
'description': 'v11 (Eden) and v12 (Frodo)',
'options': [
{
'name': 'enabled',

View File

@@ -4,6 +4,7 @@ from couchpotato.core.notifications.base import Notification
from flask.helpers import json
import base64
import traceback
import urllib
log = CPLog(__name__)
@@ -11,27 +12,148 @@ log = CPLog(__name__)
class XBMC(Notification):
listen_to = ['renamer.after']
use_json_notifications = {}
couch_logo_url = 'https://raw.github.com/RuudBurger/CouchPotatoServer/master/couchpotato/static/images/xbmc-notify.png'
def notify(self, message = '', data = {}, listener = None):
if self.isDisabled(): return
hosts = splitString(self.conf('host'))
successful = 0
for host in hosts:
response = self.request(host, [
('GUI.ShowNotification', {"title":"CouchPotato", "message":message}),
('VideoLibrary.Scan', {}),
])
if self.use_json_notifications.get(host) is None:
self.getXBMCJSONversion(host, message = message)
if self.use_json_notifications.get(host):
response = self.request(host, [
('GUI.ShowNotification', {'title': self.default_title, 'message': message, 'image': self.couch_logo_url}),
('VideoLibrary.Scan', {}),
])
else:
response = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message})
response += self.request(host, [('VideoLibrary.Scan', {})])
try:
for result in response:
if result['result'] == "OK":
if (result.get('result') and result['result'] == 'OK'):
successful += 1
elif (result.get('error')):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
except:
log.error('Failed parsing results: %s', traceback.format_exc())
return successful == len(hosts) * 2
def getXBMCJSONversion(self, host, message = ''):
success = False
# XBMC JSON-RPC version request
response = self.request(host, [
('JSONRPC.Version', {})
])
for result in response:
if (result.get('result') and type(result['result']['version']).__name__ == 'int'):
# only v2 and v4 return an int object
# v6 (as of XBMC v12(Frodo)) is required to send notifications
xbmc_rpc_version = str(result['result']['version'])
log.debug('XBMC JSON-RPC Version: %s ; Notifications by JSON-RPC only supported for v6 [as of XBMC v12(Frodo)]', xbmc_rpc_version)
# disable JSON use
self.use_json_notifications[host] = False
# send the text message
resp = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message})
for result in resp:
if (result.get('result') and result['result'] == 'OK'):
log.debug('Message delivered successfully!')
success = True
break
elif (result.get('error')):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
break
elif (result.get('result') and type(result['result']['version']).__name__ == 'dict'):
# XBMC JSON-RPC v6 returns an array object containing
# major, minor and patch number
xbmc_rpc_version = str(result['result']['version']['major'])
xbmc_rpc_version += '.' + str(result['result']['version']['minor'])
xbmc_rpc_version += '.' + str(result['result']['version']['patch'])
log.debug('XBMC JSON-RPC Version: %s', xbmc_rpc_version)
# ok, XBMC version is supported
self.use_json_notifications[host] = True
# send the text message
resp = self.request(host, [('GUI.ShowNotification', {'title':self.default_title, 'message':message, 'image':self.couch_logo_url})])
for result in resp:
if (result.get('result') and result['result'] == 'OK'):
log.debug('Message delivered successfully!')
success = True
break
elif (result.get('error')):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
break
# error getting version info (we do have contact with XBMC though)
elif (result.get('error')):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
log.debug('Use JSON notifications: %s ', self.use_json_notifications)
return success
def notifyXBMCnoJSON(self, host, data):
server = 'http://%s/xbmcCmds/' % host
# Notification(title, message [, timeout , image])
cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % (urllib.quote(data['title']), urllib.quote(data['message']), urllib.quote(self.couch_logo_url))
server += cmd
# I have no idea what to set to, just tried text/plain and seems to be working :)
headers = {
'Content-Type': 'text/plain',
}
# authentication support
if self.conf('password'):
base64string = base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password'))).replace('\n', '')
headers['Authorization'] = 'Basic %s' % base64string
try:
log.debug('Sending non-JSON-type request to %s: %s', (host, data))
# response wil either be 'OK':
# <html>
# <li>OK
# </html>
#
# or 'Error':
# <html>
# <li>Error:<message>
# </html>
#
response = self.urlopen(server, headers = headers)
if 'OK' in response:
log.debug('Returned from non-JSON-type request %s: %s', (host, response))
# manually fake expected response array
return [{'result': 'OK'}]
else:
log.error('Returned from non-JSON-type request %s: %s', (host, response))
# manually fake expected response array
return [{'result': 'Error'}]
except:
log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc())
return [{'result': 'Error'}]
def request(self, host, requests):
server = 'http://%s/jsonrpc' % host

View File

@@ -1,9 +1,8 @@
from StringIO import StringIO
from couchpotato import addView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import tryUrlencode, simplifyString, ss, \
toSafeString
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString
from couchpotato.core.helpers.variable import getExt, md5
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from flask.templating import render_template_string
@@ -79,7 +78,7 @@ class Plugin(object):
self.makeDir(os.path.dirname(path))
try:
f = open(path, 'w' if not binary else 'wb')
f = open(path, 'w+' if not binary else 'w+b')
f.write(content)
f.close()
os.chmod(path, Env.getPermission('file'))
@@ -99,6 +98,7 @@ class Plugin(object):
# http request
def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True):
url = ss(url)
if not headers: headers = {}
if not params: params = {}
@@ -130,8 +130,11 @@ class Plugin(object):
log.info('Opening multipart url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data'))
request = urllib2.Request(url, params, headers)
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
if opener:
opener.add_handler(MultipartPostHandler())
else:
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
response = opener.open(request, timeout = timeout)
else:
@@ -222,7 +225,7 @@ class Plugin(object):
def getCache(self, cache_key, url = None, **kwargs):
cache_key = simplifyString(cache_key)
cache_key = md5(ss(cache_key))
cache = Env.get('cache').get(cache_key)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
@@ -242,9 +245,11 @@ class Plugin(object):
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error'):
if not kwargs.get('show_error', True):
raise
return ''
def setCache(self, cache_key, value, timeout = 300):
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key, value, timeout)

View File

@@ -66,10 +66,12 @@ class FileManager(Plugin):
time.sleep(3)
log.debug('Cleaning up unused files')
python_cache = Env.get('cache')._path
try:
db = get_session()
for root, dirs, walk_files in os.walk(Env.get('cache_dir')):
for filename in walk_files:
if root == python_cache: continue
file_path = os.path.join(root, filename)
f = db.query(File).filter(File.path == toUnicode(file_path)).first()
if not f:

View File

@@ -115,12 +115,34 @@ class Manage(Plugin):
if done_movie['library']['identifier'] not in added_identifiers:
fireEvent('movie.delete', movie_id = done_movie['id'], delete_from = 'all')
else:
for release in done_movie.get('releases', []):
for release_file in release.get('files', []):
# Remove release not available anymore
if not os.path.isfile(ss(release_file['path'])):
fireEvent('release.clean', release['id'])
break
if len(release.get('files', [])) == 0:
fireEvent('release.delete', release['id'])
else:
for release_file in release.get('files', []):
# Remove release not available anymore
if not os.path.isfile(ss(release_file['path'])):
fireEvent('release.clean', release['id'])
break
# Check if there are duplicate releases (different quality) use the last one, delete the rest
if len(done_movie.get('releases', [])) > 1:
used_files = {}
for release in done_movie.get('releases', []):
for release_file in release.get('files', []):
already_used = used_files.get(release_file['path'])
if already_used:
if already_used < release['id']:
fireEvent('release.delete', release['id'], single = True) # delete this one
else:
fireEvent('release.delete', already_used, single = True) # delete previous one
break
else:
used_files[release_file['path']] = release.get('id')
del used_files
Env.prop('manage.last_update', time.time())
except:
@@ -153,7 +175,7 @@ class Manage(Plugin):
'to_go': total_found,
}
if group['library']:
if group['library'] and group['library'].get('identifier'):
identifier = group['library'].get('identifier')
added_identifiers.append(identifier)
@@ -176,9 +198,12 @@ class Manage(Plugin):
def directories(self):
try:
return splitString(self.conf('library', default = ''), '::')
if self.conf('library', default = '').strip():
return splitString(self.conf('library', default = ''), '::')
except:
return []
pass
return []
def scanFilesToLibrary(self, folder = None, files = None):
@@ -187,5 +212,5 @@ class Manage(Plugin):
groups = fireEvent('scanner.scan', folder = folder, files = files, single = True)
for group in groups.itervalues():
if group['library']:
if group['library'] and group['library'].get('identifier'):
fireEvent('release.add', group = group)

View File

@@ -7,6 +7,7 @@ from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Quality, Profile, ProfileType
from sqlalchemy.sql.expression import or_
import os.path
import re
import time
@@ -18,9 +19,9 @@ class QualityPlugin(Plugin):
qualities = [
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': '1080p', 'hd': True, 'size': (5000, 20000), 'label': '1080P', 'width': 1920, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']},
{'identifier': '720p', 'hd': True, 'size': (3500, 10000), 'label': '720P', 'width': 1280, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p'], 'ext':['avi']},
{'identifier': '1080p', 'hd': True, 'size': (5000, 20000), 'label': '1080P', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']},
{'identifier': '720p', 'hd': True, 'size': (3500, 10000), 'label': '720P', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':['avi']},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg']},
@@ -76,7 +77,7 @@ class QualityPlugin(Plugin):
db = get_session()
quality_dict = {}
quality = db.query(Quality).filter_by(identifier = identifier).first()
quality = db.query(Quality).filter(or_(Quality.identifier == identifier, Quality.id == identifier)).first()
if quality:
quality_dict = dict(self.getQuality(quality.identifier), **quality.to_dict())
@@ -198,9 +199,14 @@ class QualityPlugin(Plugin):
for quality in self.all():
# Last check on resolution only
if quality.get('width', 480) == extra.get('resolution_width', 0):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width', 480), extra.get('resolution_width', 0)))
# Check width resolution, range 20
if (quality.get('width', 720) - 20) <= extra.get('resolution_width', 0) <= (quality.get('width', 720) + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width', 720), extra.get('resolution_width', 0)))
return self.setCache(hash, quality)
# Check height resolution, range 20
if (quality.get('height', 480) - 20) <= extra.get('resolution_height', 0) <= (quality.get('height', 480) + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height', 480), extra.get('resolution_height', 0)))
return self.setCache(hash, quality)
if 480 <= extra.get('resolution_width', 0) <= 720:

View File

@@ -133,6 +133,9 @@ class Release(Plugin):
db.delete(release_file)
db.commit()
if len(rel.files) == 0:
self.delete(id)
return True
return False

View File

@@ -133,13 +133,6 @@ config = [{
'type': 'choice',
'options': rename_options
},
{
'name': 'trailer_name',
'label': 'Trailer naming',
'default': '<filename>-trailer.<ext>',
'type': 'choice',
'options': rename_options
},
],
},
],

View File

@@ -33,6 +33,7 @@ class Renamer(Plugin):
addEvent('renamer.check_snatched', self.checkSnatched)
addEvent('app.load', self.scan)
addEvent('app.load', self.checkSnatched)
if self.conf('run_every') > 0:
fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'))
@@ -313,7 +314,10 @@ class Renamer(Plugin):
elif release.status_id is snatched_status.get('id'):
if release.quality.id is group['meta_data']['quality']['id']:
log.debug('Marking release as downloaded')
release.status_id = downloaded_status.get('id')
try:
release.status_id = downloaded_status.get('id')
except Exception, e:
log.error('Failed marking release as finished: %s %s', (e, traceback.format_exc()))
db.commit()
# Remove leftover files
@@ -337,6 +341,7 @@ class Renamer(Plugin):
log.info('Removing "%s"', src)
try:
src = ss(src)
if os.path.isfile(src):
os.remove(src)
@@ -350,7 +355,10 @@ class Renamer(Plugin):
# Delete leftover folder from older releases
for delete_folder in delete_folders:
self.deleteEmptyFolder(delete_folder, show_error = False)
try:
self.deleteEmptyFolder(delete_folder, show_error = False)
except Exception, e:
log.error('Failed to delete folder: %s %s', (e, traceback.format_exc()))
# Rename all files marked
group['renamed_files'] = []
@@ -491,6 +499,7 @@ class Renamer(Plugin):
return string.replace(' ', ' ').replace(' .', '.')
def deleteEmptyFolder(self, folder, show_error = True):
folder = ss(folder)
loge = log.error if show_error else log.debug
for root, dirs, files in os.walk(folder):

View File

@@ -89,7 +89,7 @@ class Scanner(Plugin):
'()([ab])(\.....?)$' #*a.mkv
]
cp_imdb = '(\.cp\((?P<id>tt[0-9{7}]+)\))'
cp_imdb = '(.cp.(?P<id>tt[0-9{7}]+).)'
def __init__(self):
@@ -341,7 +341,7 @@ class Scanner(Plugin):
group['files']['movie'] = self.getMediaFiles(group['unsorted_files'])
if len(group['files']['movie']) == 0:
log.error('Couldn\t find any movie files for %s', identifier)
log.error('Couldn\'t find any movie files for %s', identifier)
continue
log.debug('Getting metadata for %s', identifier)
@@ -421,7 +421,7 @@ class Scanner(Plugin):
if not data['quality']:
data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True)
data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 else 'SD'
data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD'
filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0])
data['group'] = self.getGroup(filename[len(folder):])
@@ -775,7 +775,7 @@ class Scanner(Plugin):
return None
def findYear(self, text):
matches = re.search('(?P<year>[12]{1}[0-9]{3})', text)
matches = re.search('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
if matches:
return matches.group('year')

View File

@@ -27,9 +27,9 @@ class Score(Plugin):
score += sizeScore(nzb['size'])
# Torrents only
if nzb.get('seeds'):
if nzb.get('seeders'):
try:
score += nzb.get('seeds') / 5
score += nzb.get('seeders') / 5
score += nzb.get('leechers') / 10
except:
pass

View File

@@ -116,7 +116,7 @@ def sizeScore(size):
def providerScore(provider):
if provider in ['NZBMatrix', 'Nzbs', 'Newzbin']:
if provider in ['OMGWTFNZBs', 'PassThePopcorn', 'SceneAccess', 'TorrentLeech']:
return 20
if provider in ['Newznab']:

View File

@@ -3,7 +3,8 @@ from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.request import jsonified, getParam
from couchpotato.core.helpers.variable import md5, getTitle, splitString
from couchpotato.core.helpers.variable import md5, getTitle, splitString, \
possibleTitles
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
@@ -11,6 +12,7 @@ from couchpotato.environment import Env
from inspect import ismethod, isfunction
from sqlalchemy.exc import InterfaceError
import datetime
import random
import re
import time
import traceback
@@ -82,37 +84,51 @@ class Searcher(Plugin):
movies = db.query(Movie).filter(
Movie.status.has(identifier = 'active')
).all()
random.shuffle(movies)
self.in_progress = {
'total': len(movies),
'to_go': len(movies),
}
for movie in movies:
movie_dict = movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
})
try:
search_types = self.getSearchTypes()
try:
self.single(movie_dict)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
fireEvent('library.update', movie_dict['library']['identifier'], force = True)
except:
log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
for movie in movies:
movie_dict = movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
})
self.in_progress['to_go'] -= 1
try:
self.single(movie_dict, search_types)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
fireEvent('library.update', movie_dict['library']['identifier'], force = True)
except:
log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
# Break if CP wants to shut down
if self.shuttingDown():
break
self.in_progress['to_go'] -= 1
# Break if CP wants to shut down
if self.shuttingDown():
break
except SearchSetupError:
pass
self.in_progress = False
def single(self, movie):
def single(self, movie, search_types = None):
# Find out search type
try:
if not search_types:
search_types = self.getSearchTypes()
except SearchSetupError:
return
done_status = fireEvent('status.get', 'done', single = True)
@@ -127,6 +143,8 @@ class Searcher(Plugin):
available_status = fireEvent('status.get', 'available', single = True)
ignored_status = fireEvent('status.get', 'ignored', single = True)
found_releases = []
default_title = getTitle(movie['library'])
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
@@ -135,6 +153,7 @@ class Searcher(Plugin):
fireEvent('notify.frontend', type = 'searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title)
ret = False
for quality_type in movie['profile']['types']:
if not self.couldBeReleased(quality_type['quality']['identifier'], release_dates, pre_releases):
@@ -154,7 +173,11 @@ class Searcher(Plugin):
log.info('Search for %s in %s', (default_title, quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = fireEvent('yarr.search', movie, quality, merge = True)
results = []
for search_type in search_types:
type_results = fireEvent('%s.search' % search_type, movie, quality, merge = True)
if type_results:
results += type_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
if len(sorted_results) == 0:
@@ -171,10 +194,13 @@ class Searcher(Plugin):
# Add them to this movie releases list
for nzb in sorted_results:
rls = db.query(Release).filter_by(identifier = md5(nzb['url'])).first()
nzb_identifier = md5(nzb['url'])
found_releases.append(nzb_identifier)
rls = db.query(Release).filter_by(identifier = nzb_identifier).first()
if not rls:
rls = Release(
identifier = md5(nzb['url']),
identifier = nzb_identifier,
movie_id = movie.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
@@ -222,6 +248,12 @@ class Searcher(Plugin):
break
elif downloaded != 'try_next':
break
# Remove releases that aren't found anymore
for release in movie.get('releases', []):
if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('id'), single = True)
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('movie.restatus', movie['id'])
@@ -237,67 +269,93 @@ class Searcher(Plugin):
def download(self, data, movie, manual = False):
snatched_status = fireEvent('status.get', 'snatched', single = True)
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
# Download movie to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
if downloader_enabled:
successful = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)
snatched_status = fireEvent('status.get', 'snatched', single = True)
if successful:
# Download movie to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
try:
# Mark release as snatched
db = get_session()
rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
if rls:
rls.status_id = snatched_status.get('id')
db.commit()
successful = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)
log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())
if successful:
# If renamer isn't used, mark movie done
if not Env.setting('enabled', 'renamer'):
active_status = fireEvent('status.get', 'active', single = True)
done_status = fireEvent('status.get', 'done', single = True)
try:
if movie['status_id'] == active_status.get('id'):
for profile_type in movie['profile']['types']:
if rls and profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking movie as finished: %s', log_movie)
try:
# Mark release as snatched
db = get_session()
rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
if rls:
rls.status_id = snatched_status.get('id')
db.commit()
# Mark release done
rls.status_id = done_status.get('id')
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())
# Mark movie done
mvie = db.query(Movie).filter_by(id = movie['id']).first()
mvie.status_id = done_status.get('id')
db.commit()
except:
log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())
# If renamer isn't used, mark movie done
if not Env.setting('enabled', 'renamer'):
active_status = fireEvent('status.get', 'active', single = True)
done_status = fireEvent('status.get', 'done', single = True)
try:
if movie['status_id'] == active_status.get('id'):
for profile_type in movie['profile']['types']:
if rls and profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking movie as finished: %s', log_movie)
except:
log.error('Failed marking movie finished: %s', traceback.format_exc())
# Mark release done
rls.status_id = done_status.get('id')
db.commit()
return True
# Mark movie done
mvie = db.query(Movie).filter_by(id = movie['id']).first()
mvie.status_id = done_status.get('id')
db.commit()
except:
log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())
except:
log.error('Failed marking movie finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled', (data.get('type', '')))
log.info('Tried to download, but none of the downloaders are enabled')
return False
def getSearchTypes(self):
download_types = fireEvent('download.enabled_types', merge = True)
provider_types = fireEvent('provider.enabled_types', merge = True)
if download_types and len(list(set(provider_types) & set(download_types))) == 0:
log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_types))
raise NoProviders
for useless_provider in list(set(provider_types) - set(download_types)):
log.debug('Provider for "%s" enabled, but no downloader.', useless_provider)
search_types = download_types
if len(search_types) == 0:
log.error('There aren\'t any downloaders enabled. Please pick one in settings.')
raise NoDownloaders
return search_types
def correctMovie(self, nzb = {}, movie = {}, quality = {}, **kwargs):
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
if nzb.get('seeds') is None and 0 < retention < nzb.get('age', 0):
if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
return False
@@ -317,16 +375,16 @@ class Searcher(Plugin):
return False
ignored_words = splitString(self.conf('ignored_words').lower())
blacklisted = list(set(nzb_words) & set(ignored_words))
blacklisted = list(set(nzb_words) & set(ignored_words) - set(movie_words))
if self.conf('ignored_words') and blacklisted:
log.info2("Wrong: '%s' blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted)))
return False
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic']
for p_tag in pron_tags:
if p_tag in nzb_words and p_tag not in movie_words:
log.info('Wrong: %s, probably pr0n', (nzb['name']))
return False
pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', (nzb['name']))
return False
#qualities = fireEvent('quality.all', single = True)
preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True)
@@ -362,20 +420,21 @@ class Searcher(Plugin):
return True
# Check if nzb contains imdb link
if self.checkIMDB([nzb['description']], movie['library']['identifier']):
if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']):
return True
for movie_title in movie['library']['titles']:
movie_words = re.split('\W+', simplifyString(movie_title['title']))
for raw_title in movie['library']['titles']:
for movie_title in possibleTitles(raw_title['title']):
movie_words = re.split('\W+', simplifyString(movie_title))
if self.correctName(nzb['name'], movie_title['title']):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and self.correctYear([nzb['name']], movie['library']['year'], 1):
return True
if self.correctName(nzb['name'], movie_title):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and self.correctYear([nzb['name']], movie['library']['year'], 1):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and self.correctYear([nzb['name']], movie['library']['year'], 0):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and self.correctYear([nzb['name']], movie['library']['year'], 0):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'" % (nzb['name'], movie_name, movie['library']['year']))
return False
@@ -444,12 +503,16 @@ class Searcher(Plugin):
def correctName(self, check_name, movie_name):
check_names = [check_name]
try:
check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except:
pass
for check_name in check_names:
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(check_name.split('['), key = len))
except: pass
for check_name in list(set(check_names)):
check_movie = fireEvent('scanner.name_year', check_name, single = True)
try:
@@ -530,3 +593,12 @@ class Searcher(Plugin):
except:
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
class SearchSetupError(Exception):
pass
class NoDownloaders(SearchSetupError):
pass
class NoProviders(SearchSetupError):
pass

View File

@@ -49,6 +49,7 @@ class Subtitle(Plugin):
available_languages = sum(group['subtitle_language'].itervalues(), [])
downloaded = []
files = [toUnicode(x) for x in group['files']['movie']]
log.debug('Searching for subtitles for: %s', files)
for lang in self.getLanguages():
if lang not in available_languages:
@@ -57,6 +58,7 @@ class Subtitle(Plugin):
downloaded.extend(download[subtitle])
for d_sub in downloaded:
log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files))
group['files']['subtitle'].add(d_sub.path)
group['subtitle_language'][d_sub.path] = [d_sub.language.alpha2]

View File

@@ -24,6 +24,13 @@ config = [{
'type': 'dropdown',
'values': [('1080P', '1080p'), ('720P', '720p'), ('480P', '480p')],
},
{
'name': 'name',
'label': 'Naming',
'default': '<filename>-trailer',
'advanced': True,
'description': 'Use <filename> to use above settings.'
},
],
},
],

View File

@@ -19,10 +19,11 @@ class Trailer(Plugin):
trailers = fireEvent('trailer.search', group = group, merge = True)
if not trailers or trailers == []:
log.info('No trailers found for: %s', getTitle(group['library']))
return
return False
for trailer in trailers.get(self.conf('quality'), []):
destination = '%s-trailer.%s' % (self.getRootName(group), getExt(trailer))
filename = self.conf('name').replace('<filename>', group['filename']) + ('.%s' % getExt(trailer))
destination = os.path.join(group['destination_dir'], filename)
if not os.path.isfile(destination):
fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True)
else:
@@ -33,5 +34,5 @@ class Trailer(Plugin):
# Download first and break
break
def getRootName(self, data = {}):
return os.path.join(data['destination_dir'], data['filename'])
return True

View File

@@ -1,8 +1,13 @@
.page.wizard .uniForm {
width: 80%;
margin: 0 auto 30px;
}
.page.wizard h1 {
padding: 10px 30px;
margin: 0;
display: block;
font-size: 40px;
font-size: 30px;
margin-top: 80px;
}

View File

@@ -41,8 +41,7 @@ Page.Wizard = new Class({
},
'providers': {
'title': 'Are you registered at any of these sites?',
'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have a few more. Check settings for the full list of available providers.',
'include': ['nzb_providers', 'torrent_providers']
'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have a few more. Check settings for the full list of available providers.'
},
'renamer': {
'title': 'Move & rename the movies after downloading?',
@@ -213,8 +212,6 @@ Page.Wizard = new Class({
// Hide retention
self.el.getElement('.tab_searcher').hide();
self.el.getElement('.t_searcher').hide();
self.el.getElement('.t_nzb_providers').hide();
self.el.getElement('.t_torrent_providers').hide();
// Add pointer
new Element('.tab_wrapper').wraps(tabs).adopt(

View File

@@ -0,0 +1,21 @@
config = {
'name': 'automation_providers',
'groups': [
{
'label': 'Watchlists',
'description': 'Check watchlists for new movies',
'type': 'list',
'name': 'watchlist_providers',
'tab': 'automation',
'options': [],
},
{
'label': 'Automated',
'description': 'Uses minimal requirements',
'type': 'list',
'name': 'automation_providers',
'tab': 'automation',
'options': [],
},
],
}

View File

@@ -1,13 +1,13 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.providers.base import Provider
from couchpotato.environment import Env
import time
log = CPLog(__name__)
class Automation(Plugin):
class Automation(Provider):
enabled_option = 'automation_enabled'
@@ -19,6 +19,9 @@ class Automation(Plugin):
def _getMovies(self):
if self.isDisabled():
return
if not self.canCheck():
log.debug('Just checked, skipping %s', self.getName())
return []

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'bluray_automation',
'label': 'Blu-ray.com',
'description': 'Imports movies from blu-ray.com. (uses minimal requirements)',

View File

@@ -1,8 +1,7 @@
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import md5, tryInt
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@@ -14,32 +13,24 @@ class Bluray(Automation, RSS):
def getIMDBids(self):
if self.isDisabled():
return
movies = []
cache_key = 'bluray.%s' % md5(self.rss_url)
rss_data = self.getCache(cache_key, self.rss_url)
data = XMLTree.fromstring(rss_data)
rss_movies = self.getRSSData(self.rss_url)
if data is not None:
rss_movies = self.getElements(data, 'channel/item')
for movie in rss_movies:
name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip()
year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip()
for movie in rss_movies:
name = self.getTextElement(movie, "title").lower().split("blu-ray")[0].strip("(").rstrip()
year = self.getTextElement(movie, "description").split("|")[1].strip("(").strip()
if not name.find('/') == -1: # make sure it is not a double movie release
continue
if not name.find("/") == -1: # make sure it is not a double movie release
continue
if tryInt(year) < self.getMinimal('year'):
continue
if tryInt(year) < self.getMinimal('year'):
continue
imdb = self.search(name, year)
imdb = self.search(name, year)
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies

View File

@@ -8,7 +8,4 @@ class CP(Automation):
def getMovies(self):
if self.isDisabled():
return
return []

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'automation',
'list': 'watchlist_providers',
'name': 'imdb_automation',
'label': 'IMDB',
'description': 'From any <strong>public</strong> IMDB watchlists. Url should be the RSS link.',

View File

@@ -1,5 +1,5 @@
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import md5, getImdb, splitString, tryInt
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
import traceback
@@ -13,9 +13,6 @@ class IMDB(Automation, RSS):
def getIMDBids(self):
if self.isDisabled():
return
movies = []
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
@@ -29,9 +26,8 @@ class IMDB(Automation, RSS):
continue
try:
cache_key = 'imdb.rss.%s' % md5(url)
rss_data = self.getCache(cache_key, url)
imdbs = getImdb(rss_data, multiple = True)
rss_data = self.getHTMLData(url)
imdbs = getImdb(rss_data, multiple = True) if rss_data else []
for imdb in imdbs:
movies.append(imdb)

View File

@@ -0,0 +1,36 @@
from .main import ITunes
def start():
return ITunes()
config = [{
'name': 'itunes',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'itunes_automation',
'label': 'iTunes',
'description': 'From any <a href="http://itunes.apple.com/rss">iTunes</a> Store feed. Url should be the RSS link. (uses minimal requirements)',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
'default': ',',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
'default': 'https://itunes.apple.com/rss/topmovies/limit=25/xml,',
},
],
},
],
}]

View File

@@ -0,0 +1,63 @@
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import md5, splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
from xml.etree.ElementTree import QName
import datetime
import traceback
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
class ITunes(Automation, RSS):
interval = 1800
def getIMDBids(self):
if self.isDisabled():
return
movies = []
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
urls = splitString(self.conf('automation_urls'))
namespace = 'http://www.w3.org/2005/Atom'
namespaceIM = 'http://itunes.apple.com/rss'
index = -1
for url in urls:
index += 1
if not enablers[index]:
continue
try:
cache_key = 'itunes.rss.%s' % md5(url)
rss_data = self.getCache(cache_key, url)
data = XMLTree.fromstring(rss_data)
if data is not None:
entry_tag = str(QName(namespace, 'entry'))
rss_movies = self.getElements(data, entry_tag)
for movie in rss_movies:
name_tag = str(QName(namespaceIM, 'name'))
name = self.getTextElement(movie, name_tag)
releaseDate_tag = str(QName(namespaceIM, 'releaseDate'))
releaseDateText = self.getTextElement(movie, releaseDate_tag)
year = datetime.datetime.strptime(releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y")
imdb = self.search(name, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
except:
log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc()))
return movies

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'kinepolis_automation',
'label': 'Kinepolis',
'description': 'Imports movies from the current top 10 of kinepolis. (uses minimal requirements)',

View File

@@ -1,9 +1,7 @@
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import md5
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
import datetime
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@@ -15,25 +13,17 @@ class Kinepolis(Automation, RSS):
def getIMDBids(self):
if self.isDisabled():
return
movies = []
cache_key = 'kinepolis.%s' % md5(self.rss_url)
rss_data = self.getCache(cache_key, self.rss_url)
data = XMLTree.fromstring(rss_data)
rss_movies = self.getRSSData(self.rss_url)
if data is not None:
rss_movies = self.getElements(data, 'channel/item')
for movie in rss_movies:
name = self.getTextElement(movie, 'title')
year = datetime.datetime.now().strftime('%Y')
for movie in rss_movies:
name = self.getTextElement(movie, "title")
year = datetime.datetime.now().strftime("%Y")
imdb = self.search(name, year)
imdb = self.search(name, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies

View File

@@ -0,0 +1,24 @@
from .main import Moviemeter
def start():
return Moviemeter()
config = [{
'name': 'moviemeter',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'moviemeter_automation',
'label': 'Moviemeter',
'description': 'Imports movies from the current top 10 of moviemeter.nl. (uses minimal requirements)',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

View File

@@ -0,0 +1,28 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
log = CPLog(__name__)
class Moviemeter(Automation, RSS):
interval = 1800
rss_url = 'http://www.moviemeter.nl/rss/cinema'
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:
name_year = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
imdb = self.search(name_year.get('name'), name_year.get('year'))
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'automation',
'list': 'watchlist_providers',
'name': 'moviesio',
'label': 'Movies.IO',
'description': 'Imports movies from <a href="http://movies.io">Movies.io</a> RSS watchlists',

View File

@@ -1,11 +1,8 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import md5
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
from xml.etree.ElementTree import ParseError
import traceback
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@@ -16,39 +13,27 @@ class MoviesIO(Automation, RSS):
def getIMDBids(self):
if self.isDisabled():
return
movies = []
enablers = self.conf('automation_urls_use').split(',')
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
index = -1
for rss_url in self.conf('automation_urls').split(','):
for rss_url in splitString(self.conf('automation_urls')):
index += 1
if not enablers[index]:
continue
try:
cache_key = 'imdb.rss.%s' % md5(rss_url)
rss_movies = self.getRSSData(rss_url, headers = {'Referer': ''})
rss_data = self.getCache(cache_key, rss_url, headers = {'Referer': ''})
data = XMLTree.fromstring(rss_data)
rss_movies = self.getElements(data, 'channel/item')
for movie in rss_movies:
for movie in rss_movies:
nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True)
nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, "title"), single = True)
imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True)
if not imdb:
continue
if not imdb:
continue
movies.append(imdb)
except ParseError:
log.debug('Failed loading Movies.io watchlist, probably empty: %s', (rss_url))
except:
log.error('Failed loading Movies.io watchlist: %s %s', (rss_url, traceback.format_exc()))
movies.append(imdb)
return movies

View File

@@ -0,0 +1,29 @@
from .main import Rottentomatoes
def start():
return Rottentomatoes()
config = [{
'name': 'rottentomatoes',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'rottentomatoes_automation',
'label': 'Rottentomatoes',
'description': 'Imports movies from the rottentomatoes "in theaters"-feed.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'tomatometer_percent',
'default': '80',
'label': 'Tomatometer'
}
],
},
],
}]

View File

@@ -0,0 +1,48 @@
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
from xml.etree.ElementTree import QName
import datetime
import re
log = CPLog(__name__)
class Rottentomatoes(Automation, RSS):
interval = 1800
urls = {
'namespace': 'http://www.rottentomatoes.com/xmlns/rtmovie/',
'theater': 'http://www.rottentomatoes.com/syndication/rss/in_theaters.xml',
}
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.urls['theater'])
rating_tag = str(QName(self.urls['namespace'], 'tomatometer_percent'))
for movie in rss_movies:
value = self.getTextElement(movie, "title")
result = re.search('(?<=%\s).*', value)
if result:
log.info2('Something smells...')
rating = tryInt(self.getTextElement(movie, rating_tag))
name = result.group(0)
if rating < tryInt(self.conf('tomatometer_percent')):
log.info2('%s seems to be rotten...' % name)
else:
log.info2('Found %s fresh enough movies, enqueuing: %s' % (rating, name))
year = datetime.datetime.now().strftime("%Y")
imdb = self.search(name, year)
if imdb:
movies.append(imdb['imdb'])
return movies

View File

@@ -8,6 +8,7 @@ config = [{
'groups': [
{
'tab': 'automation',
'list': 'watchlist_providers',
'name': 'trakt_automation',
'label': 'Trakt',
'description': 'import movies from your own watchlist',

View File

@@ -1,9 +1,8 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.variable import md5, sha1
from couchpotato.core.helpers.variable import sha1
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
import base64
import json
log = CPLog(__name__)
@@ -25,9 +24,6 @@ class Trakt(Automation):
def getIMDBids(self):
if self.isDisabled():
return
movies = []
for movie in self.getWatchlist():
movies.append(movie.get('imdb_id'))
@@ -38,22 +34,11 @@ class Trakt(Automation):
method = (self.urls['watchlist'] % self.conf('automation_api_key')) + self.conf('automation_username')
return self.call(method)
def call(self, method_url):
try:
if self.conf('automation_password'):
headers = {
'Authorization': 'Basic %s' % base64.encodestring('%s:%s' % (self.conf('automation_username'), self.conf('automation_password')))[:-1]
}
else:
headers = {}
headers = {}
if self.conf('automation_password'):
headers['Authorization'] = 'Basic %s' % base64.encodestring('%s:%s' % (self.conf('automation_username'), self.conf('automation_password')))[:-1]
cache_key = 'trakt.%s' % md5(method_url)
json_string = self.getCache(cache_key, self.urls['base'] + method_url, headers = headers)
if json_string:
return json.loads(json_string)
except:
log.error('Failed to get data from trakt, check your login.')
return []
data = self.getJsonData(self.urls['base'] + method_url, headers = headers)
return data if data else []

View File

@@ -1,14 +1,17 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.variable import tryFloat
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
possibleTitles, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from urlparse import urlparse
import cookielib
import json
import re
import time
import traceback
import urllib2
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@@ -41,6 +44,34 @@ class Provider(Plugin):
return self.is_available.get(host, False)
def getJsonData(self, url, **kwargs):
data = self.getCache(md5(url), url, **kwargs)
if data:
try:
return json.loads(data)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
def getRSSData(self, url, item_path = 'channel/item', **kwargs):
data = self.getCache(md5(url), url, **kwargs)
if data:
try:
data = XMLTree.fromstring(data)
return self.getElements(data, item_path)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
def getHTMLData(self, url, **kwargs):
return self.getCache(md5(url), url, **kwargs)
class YarrProvider(Provider):
@@ -53,12 +84,16 @@ class YarrProvider(Provider):
login_opener = None
def __init__(self):
addEvent('provider.enabled_types', self.getEnabledProviderType)
addEvent('provider.belongs_to', self.belongsTo)
addEvent('%s.search' % self.type, self.search)
addEvent('yarr.search', self.search)
addEvent('%s.search' % self.type, self.search)
addEvent('nzb.feed', self.feed)
def getEnabledProviderType(self):
if self.isEnabled():
return self.type
else:
return []
def login(self):
@@ -68,15 +103,20 @@ class YarrProvider(Provider):
urllib2.install_opener(opener)
log.info2('Logging into %s', self.urls['login'])
f = opener.open(self.urls['login'], self.getLoginParams())
f.read()
output = f.read()
f.close()
self.login_opener = opener
return True
if self.loginSuccess(output):
self.login_opener = opener
return True
except:
log.error('Failed to login %s: %s', (self.getName(), traceback.format_exc()))
return False
def loginSuccess(self, output):
return True
def loginDownload(self, url = '', nzb_id = ''):
try:
if not self.login_opener and not self.login():
@@ -96,11 +136,29 @@ class YarrProvider(Provider):
return 'try_next'
def feed(self):
return []
def search(self, movie, quality):
return []
if self.isDisabled():
return []
# Login if needed
if self.urls.get('login') and (not self.login_opener and not self.login()):
log.error('Failed to login to: %s', self.getName())
return []
# Create result container
imdb_results = hasattr(self, '_search')
results = ResultList(self, movie, quality, imdb_results = imdb_results)
# Do search based on imdb id
if imdb_results:
self._search(movie, quality, results)
# Search possible titles
else:
for title in possibleTitles(getTitle(movie['library'])):
self._searchOnTitle(title, movie, quality, results)
return results
def belongsTo(self, url, provider = None, host = None):
try:
@@ -148,10 +206,65 @@ class YarrProvider(Provider):
return [self.cat_backup_id]
def found(self, new):
if not new.get('provider_extra'):
new['provider_extra'] = ''
else:
new['provider_extra'] = ', %s' % new['provider_extra']
log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new)
class ResultList(list):
result_ids = None
provider = None
movie = None
quality = None
def __init__(self, provider, movie, quality, **kwargs):
self.result_ids = []
self.provider = provider
self.movie = movie
self.quality = quality
self.kwargs = kwargs
super(ResultList, self).__init__()
def extend(self, results):
for r in results:
self.append(r)
def append(self, result):
new_result = self.fillResult(result)
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new_result, movie = self.movie, quality = self.quality,
imdb_results = self.kwargs.get('imdb_results', False), single = True)
if is_correct_movie and new_result['id'] not in self.result_ids:
new_result['score'] += fireEvent('score.calculate', new_result, self.movie, single = True)
self.found(new_result)
self.result_ids.append(result['id'])
super(ResultList, self).append(new_result)
def fillResult(self, result):
defaults = {
'id': 0,
'type': self.provider.type,
'provider': self.provider.getName(),
'download': self.provider.download,
'url': '',
'name': '',
'age': 0,
'size': 0,
'description': '',
'score': 0
}
return mergeDicts(defaults, result)
def found(self, new_result):
if not new_result.get('provider_extra'):
new_result['provider_extra'] = ''
else:
new_result['provider_extra'] = ', %s' % new_result['provider_extra']
log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new_result)

View File

@@ -30,11 +30,6 @@ class MovieResultModifier(Plugin):
temp[imdb] = self.getLibraryTags(imdb)
order.append(imdb)
if item.get('via_imdb'):
if order.count(imdb):
order.remove(imdb)
order.insert(0, imdb)
# Merge dicts
temp[imdb] = mergeDicts(temp[imdb], item)

View File

@@ -33,7 +33,7 @@ class CouchPotatoApi(MovieProvider):
def search(self, q, limit = 12):
cache_key = 'cpapi.cache.%s' % q
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode(q), timeout = 3, headers = self.getRequestHeaders())
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode(q), headers = self.getRequestHeaders())
if cached:
try:
@@ -50,7 +50,7 @@ class CouchPotatoApi(MovieProvider):
return
cache_key = 'cpapi.cache.info.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = self.getRequestHeaders())
cached = self.getCache(cache_key, self.urls['info'] % identifier, headers = self.getRequestHeaders())
if cached:
try:

View File

@@ -1,6 +0,0 @@
from .main import IMDBAPI
def start():
return IMDBAPI()
config = []

View File

@@ -0,0 +1,6 @@
from .main import OMDBAPI
def start():
return OMDBAPI()
config = []

View File

@@ -10,11 +10,11 @@ import traceback
log = CPLog(__name__)
class IMDBAPI(MovieProvider):
class OMDBAPI(MovieProvider):
urls = {
'search': 'http://www.imdbapi.com/?%s',
'info': 'http://www.imdbapi.com/?i=%s',
'search': 'http://www.omdbapi.com/?%s',
'info': 'http://www.omdbapi.com/?i=%s',
}
http_time_between_calls = 0
@@ -32,7 +32,7 @@ class IMDBAPI(MovieProvider):
'name': q
}
cache_key = 'imdbapi.cache.%s' % q
cache_key = 'omdbapi.cache.%s' % q
cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}), timeout = 3)
if cached:
@@ -50,7 +50,7 @@ class IMDBAPI(MovieProvider):
if not identifier:
return {}
cache_key = 'imdbapi.cache.%s' % identifier
cache_key = 'omdbapi.cache.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3)
if cached:

View File

@@ -3,6 +3,7 @@ from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.movie.base import MovieProvider
from libs.themoviedb import tmdb
import traceback
log = CPLog(__name__)
@@ -61,7 +62,12 @@ class TheMovieDb(MovieProvider):
if not results:
log.debug('Searching for movie: %s', q)
raw = tmdb.search(search_string)
raw = None
try:
raw = tmdb.search(search_string)
except:
log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc()))
results = []
if raw:

View File

@@ -0,0 +1,15 @@
config = {
'name': 'nzb_providers',
'groups': [
{
'label': 'Usenet',
'description': 'Providers searching usenet for new releases',
'wizard': True,
'type': 'list',
'name': 'nzb_providers',
'tab': 'searcher',
'subtab': 'providers',
'options': [],
},
],
}

View File

@@ -0,0 +1,24 @@
from .main import BinSearch
def start():
return BinSearch()
config = [{
'name': 'binsearch',
'groups': [
{
'tab': 'searcher',
'subtab': 'providers',
'list': 'nzb_providers',
'name': 'binsearch',
'description': 'Free provider, less accurate. See <a href="https://www.binsearch.info/">BinSearch</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
},
],
},
],
}]

View File

@@ -0,0 +1,98 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
import re
import traceback
log = CPLog(__name__)
class BinSearch(NZBProvider):
urls = {
'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s',
'detail': 'https://www.binsearch.info%s',
'search': 'https://www.binsearch.info/index.php?%s',
}
http_time_between_calls = 4 # Seconds
def _search(self, movie, quality, results):
arguments = tryUrlencode({
'q': movie['library']['identifier'],
'm': 'n',
'max': 400,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',
'adv_nfo': 'on',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
})
data = self.getHTMLData(self.urls['search'] % arguments)
if data:
try:
html = BeautifulSoup(data)
main_table = html.find('table', attrs = {'id':'r2'})
if not main_table:
return
items = main_table.find_all('tr')
for row in items:
title = row.find('span', attrs = {'class':'s'})
if not title: continue
nzb_id = row.find('input', attrs = {'type':'checkbox'})['name']
info = row.find('span', attrs = {'class':'d'})
size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text)
def extra_check(item):
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
total = tryInt(parts.group('total'))
parts = tryInt(parts.group('parts'))
if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not 'par2' in info.text.lower()):
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
return False
if 'requires password' in info.text.lower():
log.info2('Wrong: \'%s\', passworded', (item['name']))
return False
return True
results.append({
'id': nzb_id,
'name': title.text,
'age': tryInt(re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]),
'size': self.parseSize(size_match.group('size')),
'url': self.urls['download'] % nzb_id,
'detail_url': self.urls['detail'] % info.find('a')['href'],
'extra_check': extra_check
})
except:
log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc())
def download(self, url = '', nzb_id = ''):
params = {'action': 'nzb'}
params[nzb_id] = 'on'
try:
return self.urlopen(url, params = params, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'

View File

@@ -8,9 +8,11 @@ config = [{
'groups': [
{
'tab': 'searcher',
'subtab': 'nzb_providers',
'subtab': 'providers',
'list': 'nzb_providers',
'name': 'FTDWorld',
'description': 'Free provider, less accurate. See <a href="http://ftdworld.net">FTDWorld</a>',
'wizard': True,
'options': [
{
'name': 'enabled',

View File

@@ -1,14 +1,11 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode, \
simplifyString
from couchpotato.core.helpers.variable import tryInt, getTitle
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
import re
import time
import json
import traceback
log = CPLog(__name__)
@@ -16,30 +13,26 @@ log = CPLog(__name__)
class FTDWorld(NZBProvider):
urls = {
'search': 'http://ftdworld.net/category.php?%s',
'search': 'http://ftdworld.net/api/index.php?%s',
'detail': 'http://ftdworld.net/spotinfo.php?id=%s',
'download': 'http://ftdworld.net/cgi-bin/nzbdown.pl?fileID=%s',
'login': 'http://ftdworld.net/index.php',
'login': 'http://ftdworld.net/api/login.php',
}
http_time_between_calls = 1 #seconds
http_time_between_calls = 3 #seconds
cat_ids = [
([4, 11], ['dvdr']),
([1], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
([10, 13, 14], ['bd50', '720p', '1080p']),
([7, 10, 13, 14], ['bd50', '720p', '1080p']),
]
cat_backup_id = [1]
cat_backup_id = 1
def search(self, movie, quality):
def _searchOnTitle(self, title, movie, quality, results):
results = []
if self.isDisabled():
return results
q = '"%s" %s' % (title, movie['library']['year'])
q = '%s %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'])
params = {
params = tryUrlencode({
'ctitle': q,
'customQuery': 'usr',
'cage': Env.setting('retention', 'nzb'),
@@ -47,57 +40,32 @@ class FTDWorld(NZBProvider):
'csizemax': quality.get('size_max'),
'ccategory': 14,
'ctype': ','.join([str(x) for x in self.getCatId(quality['identifier'])]),
}
})
cache_key = 'ftdworld.%s.%s' % (movie['library']['identifier'], q)
data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params), opener = self.login_opener)
data = self.getJsonData(self.urls['search'] % params, opener = self.login_opener)
if data:
try:
html = BeautifulSoup(data)
main_table = html.find('table', attrs = {'id':'ftdresult'})
if data.get('numRes') == 0:
return
if not main_table:
return results
for item in data.get('data'):
items = main_table.find_all('tr', attrs = {'class': re.compile('tcontent')})
for item in items:
tds = item.find_all('td')
nzb_id = tryInt(item.attrs['data-spot'])
up = item.find('img', attrs = {'src': re.compile('up.png')})
down = item.find('img', attrs = {'src': re.compile('down.png')})
new = {
nzb_id = tryInt(item.get('id'))
results.append({
'id': nzb_id,
'type': 'nzb',
'provider': self.getName(),
'name': toUnicode(item.find('a', attrs = {'href': re.compile('./spotinfo')}).text.strip()),
'age': self.calculateAge(int(time.mktime(parse(tds[2].text).timetuple()))),
'size': 0,
'name': toUnicode(item.get('Title')),
'age': self.calculateAge(tryInt(item.get('Created'))),
'size': item.get('Size', 0),
'url': self.urls['download'] % nzb_id,
'download': self.loginDownload,
'detail_url': self.urls['detail'] % nzb_id,
'description': '',
'score': (tryInt(up.attrs['title'].split(' ')[0]) * 3) - (tryInt(down.attrs['title'].split(' ')[0]) * 3),
}
'score': (tryInt(item.get('webPlus', 0)) - tryInt(item.get('webMin', 0))) * 3,
})
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
if is_correct_movie:
new['score'] += fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from NZBClub')
return results
except:
log.error('Failed to parse HTML response from FTDWorld: %s', traceback.format_exc())
def getLoginParams(self):
return tryUrlencode({
@@ -105,3 +73,9 @@ class FTDWorld(NZBProvider):
'passlogin': self.conf('password'),
'submit': 'Log In',
})
def loginSuccess(self, output):
try:
return json.loads(output).get('goodToGo', False)
except:
return False

View File

@@ -8,12 +8,13 @@ config = [{
'groups': [
{
'tab': 'searcher',
'subtab': 'nzb_providers',
'subtab': 'providers',
'list': 'nzb_providers',
'name': 'newznab',
'order': 10,
'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab providers</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \
<a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>',
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a> or <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>',
'wizard': True,
'options': [
{
@@ -22,16 +23,16 @@ config = [{
},
{
'name': 'use',
'default': '0,0,0'
'default': '0,0,0,0'
},
{
'name': 'host',
'default': 'nzb.su,dognzb.cr,nzbs.org',
'default': 'nzb.su,dognzb.cr,nzbs.org,https://index.nzbgeek.info',
'description': 'The hostname of your newznab provider',
},
{
'name': 'api_key',
'default': ',,',
'default': ',,,',
'label': 'Api Key',
'description': 'Can be found on your profile page',
'type': 'combined',

View File

@@ -1,8 +1,8 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import ResultList
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
@@ -10,7 +10,6 @@ from urllib2 import HTTPError
from urlparse import urlparse
import time
import traceback
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@@ -25,140 +24,59 @@ class Newznab(NZBProvider, RSS):
limits_reached = {}
cat_ids = [
([2010], ['dvdr']),
([2030], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
([2040], ['720p', '1080p']),
([2050], ['bd50']),
]
cat_backup_id = 2000
http_time_between_calls = 1 # Seconds
def feed(self):
hosts = self.getHosts()
results = []
for host in hosts:
result = self.singleFeed(host)
if result:
results.extend(result)
return results
def singleFeed(self, host):
results = []
if self.isDisabled(host):
return results
arguments = tryUrlencode({
't': self.cat_backup_id,
'r': host['api_key'],
'i': 58,
})
url = "%s?%s" % (cleanHost(host['host']) + 'rss', arguments)
cache_key = 'newznab.%s.feed.%s' % (host['host'], arguments)
results = self.createItems(url, cache_key, host, for_feed = True)
return results
def search(self, movie, quality):
hosts = self.getHosts()
results = []
for host in hosts:
result = self.singleSearch(host, movie, quality)
results = ResultList(self, movie, quality, imdb_results = True)
if result:
results.extend(result)
for host in hosts:
if self.isDisabled(host):
continue
self._searchOnHost(host, movie, quality, results)
return results
def singleSearch(self, host, movie, quality):
def _searchOnHost(self, host, movie, quality, results):
results = []
if self.isDisabled(host):
return results
cat_id = self.getCatId(quality['identifier'])
arguments = tryUrlencode({
'imdbid': movie['library']['identifier'].replace('tt', ''),
'cat': cat_id[0],
'apikey': host['api_key'],
'extended': 1
})
url = "%s&%s" % (self.getUrl(host['host'], self.urls['search']), arguments)
url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments)
cache_key = 'newznab.%s.%s.%s' % (host['host'], movie['library']['identifier'], cat_id[0])
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
results = self.createItems(url, cache_key, host, movie = movie, quality = quality)
for nzb in nzbs:
return results
date = None
for item in nzb:
if item.attrib.get('name') == 'usenetdate':
date = item.attrib.get('value')
break
def createItems(self, url, cache_key, host, movie = None, quality = None, for_feed = False):
results = []
if not date:
date = self.getTextElement(nzb, 'pubDate')
data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
name = self.getTextElement(nzb, 'title')
results = []
for nzb in nzbs:
if not name:
continue
date = ''
size = 0
for item in nzb:
if item.attrib.get('name') == 'size':
size = item.attrib.get('value')
elif item.attrib.get('name') == 'usenetdate':
date = item.attrib.get('value')
if date is '': log.debug('Date not parsed properly or not available for %s: %s', (host['host'], self.getTextElement(nzb, "title")))
if size is 0: log.debug('Size not parsed properly or not available for %s: %s', (host['host'], self.getTextElement(nzb, "title")))
id = self.getTextElement(nzb, "guid").split('/')[-1:].pop()
new = {
'id': id,
'provider': self.getName(),
'provider_extra': host['host'],
'type': 'nzb',
'name': self.getTextElement(nzb, "title"),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(size) / 1024 / 1024,
'url': (self.getUrl(host['host'], self.urls['download']) % id) + self.getApiExt(host),
'download': self.download,
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), id),
'content': self.getTextElement(nzb, "description"),
}
if not for_feed:
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = True, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
else:
results.append(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from Newznab: %s', host)
return results
results.append({
'id': nzb_id,
'provider_extra': host['host'],
'name': self.getTextElement(nzb, 'title'),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)),
'content': self.getTextElement(nzb, 'description'),
})
def getHosts(self):
@@ -186,12 +104,23 @@ class Newznab(NZBProvider, RSS):
return result
def getUrl(self, host, type):
if '?page=newznabapi' in host:
return cleanHost(host)[:-1] + '&t=' + type
return cleanHost(host) + 'api?t=' + type
def isDisabled(self, host):
def isDisabled(self, host = None):
return not self.isEnabled(host)
def isEnabled(self, host):
def isEnabled(self, host = None):
# Return true if at least one is enabled and no host is given
if host is None:
for host in self.getHosts():
if self.isEnabled(host):
return True
return False
return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use'])
def getApiExt(self, host):
@@ -218,6 +147,6 @@ class Newznab(NZBProvider, RSS):
self.limits_reached[host] = time.time()
return 'try_next'
log.error('Failed download from %s', (host, traceback.format_exc()))
log.error('Failed download from %s: %s', (host, traceback.format_exc()))
return 'try_next'

View File

@@ -8,9 +8,11 @@ config = [{
'groups': [
{
'tab': 'searcher',
'subtab': 'nzb_providers',
'subtab': 'providers',
'list': 'nzb_providers',
'name': 'NZBClub',
'description': 'Free provider, less accurate. See <a href="https://www.nzbclub.com/">NZBClub</a>',
'wizard': True,
'options': [
{
'name': 'enabled',

View File

@@ -1,15 +1,11 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode, \
simplifyString
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, getTitle
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
import time
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@@ -22,80 +18,48 @@ class NZBClub(NZBProvider, RSS):
http_time_between_calls = 4 #seconds
def search(self, movie, quality):
def _searchOnTitle(self, title, movie, quality, results):
results = []
if self.isDisabled():
return results
q = '"%s %s"' % (title, movie['library']['year'])
q = '"%s %s" %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'], quality.get('identifier'))
params = {
params = tryUrlencode({
'q': q,
'ig': '1',
'ig': 1,
'rpp': 200,
'st': 1,
'st': 5,
'sp': 1,
'ns': 1,
}
})
cache_key = 'nzbclub.%s.%s.%s' % (movie['library']['identifier'], quality.get('identifier'), q)
data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params))
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
nzbs = self.getRSSData(self.urls['search'] % params)
for nzb in nzbs:
for nzb in nzbs:
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
if ignored in full_description:
log.info('Wrong: Seems to be passworded or corrupted files: %s', new['name'])
return False
for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
if ignored in full_description:
log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
return False
return True
return True
new = {
'id': nzbclub_id,
'type': 'nzb',
'provider': self.getName(),
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'].replace(' ', '_'),
'download': self.download,
'detail_url': self.getTextElement(nzb, "link"),
'description': '',
'get_more_info': self.getMoreInfo,
'extra_check': extra_check
}
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from NZBClub')
return results
results.append({
'id': nzbclub_id,
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'].replace(' ', '_'),
'detail_url': self.getTextElement(nzb, "link"),
'get_more_info': self.getMoreInfo,
'extra_check': extra_check
})
def getMoreInfo(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)

Some files were not shown because too many files have changed in this diff Show More