Merge branch 'refs/heads/develop' into redesign

This commit is contained in:
Ruud
2014-07-06 22:38:51 +02:00
229 changed files with 7700 additions and 8002 deletions

View File

@@ -19,7 +19,12 @@ base_path = dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(base_path, 'libs'))
from couchpotato.environment import Env
from couchpotato.core.helpers.variable import getDataDir
from couchpotato.core.helpers.variable import getDataDir, removePyc
# Remove pyc files before dynamic load (sees .pyc files regular .py modules)
removePyc(base_path)
class Loader(object):
@@ -50,7 +55,7 @@ class Loader(object):
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
os.makedirs(self.log_dir)
# Logging
from couchpotato.core.logger import CPLog
@@ -67,10 +72,11 @@ class Loader(object):
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
from couchpotato.core.event import addEvent
addEvent('app.after_shutdown', self.afterShutdown)
addEvent('app.do_shutdown', self.setRestart)
def afterShutdown(self, restart):
def setRestart(self, restart):
self.do_restart = restart
return True
def onExit(self, signal, frame):
from couchpotato.core.event import fireEvent
@@ -98,7 +104,6 @@ class Loader(object):
# Release log files and shutdown logger
logging.shutdown()
time.sleep(3)
args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:]
subprocess.Popen(args)

View File

@@ -17,9 +17,9 @@ Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for
* Open up `Git Bash` (or CMD) and go to the folder you want to install CP. Something like Program Files.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`.
* You can now start CP via `CouchPotatoServer\CouchPotato.py` to start
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
OSx:
OS X:
* If you're on Leopard (10.5) install Python 2.6+: [Python 2.6.5](http://www.python.org/download/releases/2.6.5/)
* Install [GIT](http://git-scm.com/)
@@ -27,19 +27,20 @@ OSx:
* Go to your App folder `cd /Applications`
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py`
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
Linux (ubuntu / debian):
Linux (Ubuntu / Debian):
* Install [GIT](http://git-scm.com/) with `apt-get install git-core`
* 'cd' to the folder of your choosing.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py` to start
* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
* Change the paths inside the init script. `sudo nano /etc/init.d/couchpotato`
* Make it executable. `sudo chmod +x /etc/init.d/couchpotato`
* Add it to defaults. `sudo update-rc.d couchpotato defaults`
* Open your browser and go to: `http://localhost:5050/`
* To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
* Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato`
* Change the paths inside the default file `sudo nano /etc/default/couchpotato`
* Make it executable `sudo chmod +x /etc/init.d/couchpotato`
* Add it to defaults `sudo update-rc.d couchpotato defaults`
* Open your browser and go to `http://localhost:5050/`
FreeBSD :

View File

@@ -22,11 +22,11 @@ Before you submit an issue, please go through the following checklist:
* What providers are you using? (While your logs include these, scanning through hundreds of lines of logs isn't our hobby)
* Post the logs from the *config* directory, please do not copy paste the UI. Use pastebin to store these logs!
* Give a short step by step of how to reproduce the error.
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed then when you use CP on OSX or Windows.
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows.
* Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag.
* If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!!
The more relevant information you can provide, the more likely it is the issue will be resolved rather than closed.
* If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!
The more relevant information you provide, the more likely that your issue will be resolved.
## Pull Requests
Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following:

View File

@@ -1,3 +1,7 @@
import os
import time
import traceback
from couchpotato.api import api_docs, api_docs_missing, api
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import md5, tryInt
@@ -5,9 +9,6 @@ from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from tornado import template
from tornado.web import RequestHandler, authenticated
import os
import time
import traceback
log = CPLog(__name__)
@@ -45,7 +46,7 @@ class WebHandler(BaseHandler):
self.write({'success': False, 'error': 'Failed returning results'})
def addView(route, func, static = False):
def addView(route, func):
views[route] = func

View File

@@ -7,9 +7,7 @@ import urllib
from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog
from tornado.gen import coroutine
from tornado.web import RequestHandler, asynchronous
import tornado
log = CPLog(__name__)
@@ -28,10 +26,18 @@ def run_async(func):
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def run_handler(route, kwargs, callback = None):
try:
res = api[route](**kwargs)
callback(res, route)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'}, route)
# NonBlock API handler
class NonBlockHandler(RequestHandler):
@@ -78,13 +84,18 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
# Blocking API handler
class ApiHandler(RequestHandler):
@coroutine
@asynchronous
def get(self, route, *args, **kwargs):
route = route.strip('/')
if not api.get(route):
self.write('API call doesn\'t seem to exist')
self.finish()
return
# Create lock if it doesn't exist
if route in api_locks and not api_locks.get(route):
api_locks[route] = threading.Lock()
api_locks[route].acquire()
try:
@@ -102,36 +113,43 @@ class ApiHandler(RequestHandler):
except: pass
# Add async callback handler
@run_async
def run_handler(callback):
try:
res = api[route](**kwargs)
callback(res)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'})
result = yield tornado.gen.Task(run_handler)
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
run_handler(route, kwargs, callback = self.taskFinished)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
try:
self.write({'success': False, 'error': 'Failed returning results'})
self.finish()
except:
log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
api_locks[route].release()
api_locks[route].release()
post = get
def taskFinished(self, result, route):
if not self.request.connection.stream.closed():
try:
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
self.finish()
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
self.finish()
except:
log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
api_locks[route].release()
def addApiView(route, func, static = False, docs = None, **kwargs):

View File

@@ -8,7 +8,7 @@ import webbrowser
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import cleanHost, md5
from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
@@ -71,13 +71,14 @@ class Core(Plugin):
return value if value and len(value) > 3 else uuid4().hex
def checkDataDir(self):
if Env.get('app_dir') in Env.get('data_dir'):
if isSubFolder(Env.get('data_dir'), Env.get('app_dir')):
log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
return True
def cleanUpFolders(self):
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False)
only_clean = ['couchpotato', 'libs', 'init']
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean)
def available(self, **kwargs):
return {
@@ -117,7 +118,7 @@ class Core(Plugin):
self.shutdown_started = True
fireEvent('app.do_shutdown')
fireEvent('app.do_shutdown', restart = restart)
log.debug('Every plugin got shutdown event')
loop = True
@@ -142,9 +143,11 @@ class Core(Plugin):
log.debug('Safe to shutdown/restart')
loop = IOLoop.current()
try:
if not IOLoop.current()._closing:
IOLoop.current().stop()
if not loop._closing:
loop.stop()
except RuntimeError:
pass
except:

View File

@@ -25,6 +25,7 @@ class DownloaderBase(Provider):
status_support = True
torrent_sources = [
'https://zoink.it/torrent/%s.torrent',
'http://torrage.com/torrent/%s.torrent',
'https://torcache.net/torrent/%s.torrent',
]
@@ -72,6 +73,9 @@ class DownloaderBase(Provider):
return
return self.download(data = data, media = media, filedata = filedata)
def download(self, *args, **kwargs):
return False
def _getAllDownloadStatus(self, download_ids):
if self.isDisabled(manual = True, data = {}):
return

View File

@@ -40,15 +40,16 @@ var DownloadersBase = new Class({
button.set('text', button_name);
var message;
if(json.success){
var message = new Element('span.success', {
message = new Element('span.success', {
'text': 'Connection successful'
}).inject(button, 'after')
}
else {
var msg_text = 'Connection failed. Check logs for details.';
if(json.hasOwnProperty('msg')) msg_text = json.msg;
var message = new Element('span.failed', {
message = new Element('span.failed', {
'text': msg_text
}).inject(button, 'after')
}

View File

@@ -33,9 +33,9 @@ class Scheduler(Plugin):
except:
pass
def doShutdown(self):
def doShutdown(self, *args, **kwargs):
self.stop()
return super(Scheduler, self).doShutdown()
return super(Scheduler, self).doShutdown(*args, **kwargs)
def stop(self):
if self.started:

View File

@@ -10,13 +10,13 @@ from threading import RLock
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss, sp
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import removePyc
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from dateutil.parser import parse
from git.repository import LocalRepository
from scandir import scandir
import version
from six.moves import filter
@@ -142,9 +142,11 @@ class Updater(Plugin):
'success': success
}
def doShutdown(self):
self.updater.deletePyc(show_logs = False)
return super(Updater, self).doShutdown()
def doShutdown(self, *args, **kwargs):
if not Env.get('dev') and not Env.get('desktop'):
removePyc(Env.get('app_dir'), show_logs = False)
return super(Updater, self).doShutdown(*args, **kwargs)
class BaseUpdater(Plugin):
@@ -180,30 +182,6 @@ class BaseUpdater(Plugin):
def check(self):
pass
def deletePyc(self, only_excess = True, show_logs = True):
for root, dirs, files in scandir.walk(Env.get('app_dir')):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
class GitUpdater(BaseUpdater):
@@ -327,13 +305,13 @@ class SourceUpdater(BaseUpdater):
data_dir = Env.get('data_dir')
# Get list of files we want to overwrite
self.deletePyc()
removePyc(app_dir)
existing_files = []
for root, subfiles, filenames in scandir.walk(app_dir):
for root, subfiles, filenames in os.walk(app_dir):
for filename in filenames:
existing_files.append(os.path.join(root, filename))
for root, subfiles, filenames in scandir.walk(path):
for root, subfiles, filenames in os.walk(path):
for filename in filenames:
fromfile = os.path.join(root, filename)
tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))

View File

@@ -3,10 +3,11 @@ import os
import time
import traceback
from CodernityDB.index import IndexException, IndexNotFoundException, IndexConflict
from couchpotato import CPLog
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import getImdb, tryInt
@@ -15,18 +16,22 @@ log = CPLog(__name__)
class Database(object):
indexes = []
indexes = None
db = None
def __init__(self):
self.indexes = {}
addApiView('database.list_documents', self.listDocuments)
addApiView('database.reindex', self.reindex)
addApiView('database.compact', self.compact)
addApiView('database.document.update', self.updateDocument)
addApiView('database.document.delete', self.deleteDocument)
addEvent('database.setup.after', self.startup_compact)
addEvent('database.setup_index', self.setupIndex)
addEvent('app.migrate', self.migrate)
addEvent('app.after_shutdown', self.close)
@@ -43,26 +48,45 @@ class Database(object):
def setupIndex(self, index_name, klass):
self.indexes.append(index_name)
self.indexes[index_name] = klass
db = self.getDB()
# Category index
index_instance = klass(db.path, index_name)
try:
db.add_index(index_instance)
db.reindex_index(index_name)
except:
previous = db.indexes_names[index_name]
previous_version = previous._version
current_version = klass._version
# Only edit index if versions are different
if previous_version < current_version:
log.debug('Index "%s" already exists, updating and reindexing', index_name)
db.destroy_index(previous)
# Make sure store and bucket don't exist
exists = []
for x in ['buck', 'stor']:
full_path = os.path.join(db.path, '%s_%s' % (index_name, x))
if os.path.exists(full_path):
exists.append(full_path)
if index_name not in db.indexes_names:
# Remove existing buckets if index isn't there
for x in exists:
os.unlink(x)
# Add index (will restore buckets)
db.add_index(index_instance)
db.reindex_index(index_name)
else:
# Previous info
previous = db.indexes_names[index_name]
previous_version = previous._version
current_version = klass._version
# Only edit index if versions are different
if previous_version < current_version:
log.debug('Index "%s" already exists, updating and reindexing', index_name)
db.destroy_index(previous)
db.add_index(index_instance)
db.reindex_index(index_name)
except:
log.error('Failed adding index %s: %s', (index_name, traceback.format_exc()))
def deleteDocument(self, **kwargs):
@@ -136,20 +160,80 @@ class Database(object):
'success': success
}
def compact(self, **kwargs):
def compact(self, try_repair = True, **kwargs):
success = False
db = self.getDB()
# Removing left over compact files
db_path = sp(db.path)
for f in os.listdir(sp(db.path)):
for x in ['_compact_buck', '_compact_stor']:
if f[-len(x):] == x:
os.unlink(os.path.join(db_path, f))
success = True
try:
db = self.getDB()
start = time.time()
size = float(db.get_db_details().get('size', 0))
log.debug('Compacting database, current size: %sMB', round(size/1048576, 2))
db.compact()
new_size = float(db.get_db_details().get('size', 0))
log.debug('Done compacting database in %ss, new size: %sMB, saved: %sMB', (round(time.time()-start, 2), round(new_size/1048576, 2), round((size-new_size)/1048576, 2)))
success = True
except (IndexException, AttributeError):
if try_repair:
log.error('Something wrong with indexes, trying repair')
# Remove all indexes
old_indexes = self.indexes.keys()
for index_name in old_indexes:
try:
db.destroy_index(index_name)
except IndexNotFoundException:
pass
except:
log.error('Failed removing old index %s', index_name)
# Add them again
for index_name in self.indexes:
klass = self.indexes[index_name]
# Category index
index_instance = klass(db.path, index_name)
try:
db.add_index(index_instance)
db.reindex_index(index_name)
except IndexConflict:
pass
except:
log.error('Failed adding index %s', index_name)
raise
self.compact(try_repair = False)
else:
log.error('Failed compact: %s', traceback.format_exc())
except:
log.error('Failed compact: %s', traceback.format_exc())
success = False
return {
'success': success
}
# Compact on start
def startup_compact(self):
from couchpotato import Env
db = self.getDB()
size = db.get_db_details().get('size')
prop_name = 'last_db_compact'
last_check = int(Env.prop(prop_name, default = 0))
if size > 26214400 and last_check < time.time()-604800: # 25MB / 7 days
self.compact()
Env.prop(prop_name, value = int(time.time()))
def migrate(self):
from couchpotato import Env
@@ -289,13 +373,16 @@ class Database(object):
for profile_type in types:
p_type = types[profile_type]
if types[profile_type]['profile_id'] == p['id']:
new_profile['finish'].append(p_type['finish'])
new_profile['wait_for'].append(p_type['wait_for'])
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
if p_type['quality_id']:
new_profile['finish'].append(p_type['finish'])
new_profile['wait_for'].append(p_type['wait_for'])
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
new_profile.update(db.insert(new_profile))
profile_link[x] = new_profile.get('_id')
if len(new_profile['qualities']) > 0:
new_profile.update(db.insert(new_profile))
profile_link[x] = new_profile.get('_id')
else:
log.error('Corrupt profile list for "%s", using default.', p.get('label'))
# Qualities
log.info('Importing quality sizes')
@@ -369,10 +456,10 @@ class Database(object):
m = medias[x]
status = statuses.get(m['status_id']).get('identifier')
l = libraries[m['library_id']]
l = libraries.get(m['library_id'])
# Only migrate wanted movies, Skip if no identifier present
if not getImdb(l.get('identifier')): continue
if not l or not getImdb(l.get('identifier')): continue
profile_id = profile_link.get(m['profile_id'])
category_id = category_link.get(m['category_id'])

View File

@@ -5,14 +5,12 @@ from urlparse import urlparse
import os
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.logger import CPLog
from bencode import bencode, bdecode
from rtorrent import RTorrent
from scandir import scandir
log = CPLog(__name__)
@@ -238,7 +236,7 @@ class rTorrent(DownloaderBase):
if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
# Remove empty directories bottom up
try:
for path, _, _ in scandir.walk(sp(torrent.directory), topdown = False):
for path, _, _ in os.walk(sp(torrent.directory), topdown = False):
os.rmdir(path)
except OSError:
log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)

View File

@@ -90,6 +90,7 @@ class SynologyRPC(object):
self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
self.sid = None
self.username = username
self.password = password
self.destination = destination

View File

@@ -174,8 +174,8 @@ class TransmissionRPC(object):
self.session = {}
if username and password:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
password_manager.add_password(realm = 'Transmission', uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager))
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
urllib2.install_opener(opener)
elif username or password:

View File

@@ -168,7 +168,7 @@ class uTorrent(DownloaderBase):
status = 'busy'
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & self.status_flags['ERROR']):
elif torrent[1] & self.status_flags['ERROR']:
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
@@ -229,7 +229,6 @@ class uTorrentAPI(object):
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager))
self.opener.add_handler(urllib2.HTTPDigestAuthHandler(password_manager))
elif username or password:
log.debug('User or password missing, not using authentication.')
self.token = self.get_token()

View File

@@ -1,4 +1,5 @@
import collections
import ctypes
import hashlib
import os
import platform
@@ -6,8 +7,9 @@ import random
import re
import string
import sys
import traceback
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
from couchpotato.core.logger import CPLog
import six
from six.moves import map, zip, filter
@@ -290,9 +292,14 @@ def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
# Returns True if sub_folder is the same as or inside base_folder
def isSubFolder(sub_folder, base_folder):
# Returns True if sub_folder is the same as or inside base_folder
return base_folder and sub_folder and ss(os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep) in ss(os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep)
if base_folder and sub_folder:
base = sp(os.path.realpath(base_folder)) + os.path.sep
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
return os.path.commonprefix([subfolder, base]) == base
return False
# From SABNZBD
@@ -307,3 +314,69 @@ def scanForPassword(name):
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def removePyc(folder, only_excess = True, show_logs = True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getFreeSpace(directories):
single = not isinstance(directories, (tuple, list))
if single:
directories = [directories]
free_space = {}
for folder in directories:
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
return [total.value, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
if single: return size
free_space[folder] = size
return free_space

View File

@@ -25,6 +25,12 @@ class CPLog(object):
self.Env = Env
self.is_develop = Env.get('dev')
from couchpotato.core.event import addEvent
addEvent('app.after_shutdown', self.close)
def close(self, *args, **kwargs):
logging.shutdown()
def info(self, msg, replace_tuple = ()):
self.logger.info(self.addContext(msg, replace_tuple))

View File

@@ -1,7 +1,7 @@
import os
import traceback
from couchpotato import get_db, CPLog
from couchpotato import CPLog
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.plugins.base import Plugin
@@ -28,7 +28,7 @@ class MediaBase(Plugin):
media = fireEvent('media.get', media_id, single = True)
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id))
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())

View File

@@ -99,7 +99,7 @@ from couchpotato.core.helpers.encoding import simplifyString"""
class TitleIndex(TreeBasedIndex):
_version = 3
_version = 4
custom_header = """from CodernityDB.tree_index import TreeBasedIndex
from string import ascii_letters
@@ -128,7 +128,7 @@ from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
title = title[len(prefix):]
break
return str(nr_prefix + title).ljust(32, '_')[:32]
return str(nr_prefix + title).ljust(32, ' ')[:32]
class StartsWithIndex(TreeBasedIndex):
@@ -176,3 +176,24 @@ class MediaChildrenIndex(TreeBasedIndex):
if data.get('_t') == 'media' and data.get('parent_id'):
return data.get('parent_id'), None
class MediaTagIndex(MultiTreeBasedIndex):
_version = 2
custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex"""
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(MediaTagIndex, self).__init__(*args, **kwargs)
def make_key_value(self, data):
if data.get('_t') == 'media' and data.get('tags') and len(data.get('tags', [])) > 0:
tags = set()
for tag in data.get('tags', []):
tags.add(self.make_key(tag))
return list(tags), None
def make_key(self, key):
return md5(key).hexdigest()

View File

@@ -1,6 +1,10 @@
from datetime import timedelta
from operator import itemgetter
import time
import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
@@ -8,7 +12,7 @@ from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex
log = CPLog(__name__)
@@ -20,6 +24,7 @@ class MediaPlugin(MediaBase):
'media': MediaIndex,
'media_search_title': TitleSearchIndex,
'media_status': MediaStatusIndex,
'media_tag': MediaTagIndex,
'media_by_type': MediaTypeIndex,
'media_title': TitleIndex,
'media_startswith': StartsWithIndex,
@@ -80,6 +85,8 @@ class MediaPlugin(MediaBase):
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
addEvent('media.tag', self.tag)
addEvent('media.untag', self.unTag)
def refresh(self, id = '', **kwargs):
handlers = []
@@ -119,25 +126,30 @@ class MediaPlugin(MediaBase):
def get(self, media_id):
db = get_db()
try:
db = get_db()
imdb_id = getImdb(str(media_id))
imdb_id = getImdb(str(media_id))
media = None
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
return media
except RecordNotFound:
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
@@ -155,8 +167,15 @@ class MediaPlugin(MediaBase):
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('media_status', s, with_doc = with_doc):
yield ms['doc'] if with_doc else ms
for ms in db.get_many('media_status', s):
if with_doc:
try:
doc = db.get('id', ms['_id'])
yield doc
except RecordNotFound:
log.debug('Record not found, skipping: %s', ms['_id'])
else:
yield ms
def withIdentifiers(self, identifiers, with_doc = False):
@@ -171,7 +190,7 @@ class MediaPlugin(MediaBase):
log.debug('No media found with identifiers: %s', identifiers)
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, starts_with = None, search = None):
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None):
db = get_db()
@@ -182,6 +201,8 @@ class MediaPlugin(MediaBase):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
if with_tags and not isinstance(with_tags, (list, tuple)):
with_tags = [with_tags]
# query media ids
if types:
@@ -208,11 +229,17 @@ class MediaPlugin(MediaBase):
# Add search filters
if starts_with:
filter_by['starts_with'] = set()
starts_with = toUnicode(starts_with.lower())[0]
starts_with = starts_with if starts_with in ascii_lowercase else '#'
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
# Add tag filter
if with_tags:
filter_by['with_tags'] = set()
for tag in with_tags:
for x in db.get_many('media_tag', tag):
filter_by['with_tags'].add(x['_id'])
# Filter with search query
if search:
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
@@ -265,6 +292,7 @@ class MediaPlugin(MediaBase):
release_status = splitString(kwargs.get('release_status')),
status_or = kwargs.get('status_or') is not None,
limit_offset = kwargs.get('limit_offset'),
with_tags = splitString(kwargs.get('with_tags')),
starts_with = kwargs.get('starts_with'),
search = kwargs.get('search')
)
@@ -383,16 +411,18 @@ class MediaPlugin(MediaBase):
total_deleted += 1
new_media_status = 'done'
elif delete_from == 'manage':
if release.get('status') == 'done':
if release.get('status') == 'done' or media.get('status') == 'done':
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active'):
if (total_releases == total_deleted and media['status'] != 'active') or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:
media['status'] = new_media_status
db.update(media)
fireEvent('media.untag', media['_id'], 'recent', single = True)
else:
fireEvent('media.restatus', media.get('_id'), single = True)
@@ -432,24 +462,72 @@ class MediaPlugin(MediaBase):
if not m['profile_id']:
m['status'] = 'done'
else:
move_to_wanted = True
m['status'] = 'active'
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
try:
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
done_releases = [release for release in media_releases if release.get('status') == 'done']
for q_identifier in profile['qualities']:
index = profile['qualities'].index(q_identifier)
if done_releases:
# Only look at latest added release
release = sorted(done_releases, key = itemgetter('last_edit'), reverse = True)[0]
for release in media_releases:
if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]):
move_to_wanted = False
# Check if we are finished with the media
if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True):
m['status'] = 'done'
elif previous_status == 'done':
m['status'] = 'done'
m['status'] = 'active' if move_to_wanted else 'done'
except RecordNotFound:
log.debug('Failed restatus, keeping previous: %s', traceback.format_exc())
m['status'] = previous_status
# Only update when status has changed
if previous_status != m['status']:
db.update(m)
return True
# Tag media as recent
self.tag(media_id, 'recent')
return m['status']
except:
log.error('Failed restatus: %s', traceback.format_exc())
def tag(self, media_id, tag):
try:
db = get_db()
m = db.get('id', media_id)
tags = m.get('tags') or []
if tag not in tags:
tags.append(tag)
m['tags'] = tags
db.update(m)
return True
except:
log.error('Failed tagging: %s', traceback.format_exc())
return False
def unTag(self, media_id, tag):
try:
db = get_db()
m = db.get('id', media_id)
tags = m.get('tags') or []
if tag in tags:
new_tags = list(set(tags))
new_tags.remove(tag)
m['tags'] = new_tags
db.update(m)
return True
except:
log.error('Failed untagging: %s', traceback.format_exc())
return False

View File

@@ -129,6 +129,9 @@ class YarrProvider(Provider):
else:
return []
def buildUrl(self, *args, **kwargs):
pass
def login(self):
# Check if we are still logged in every hour
@@ -181,7 +184,7 @@ class YarrProvider(Provider):
try:
return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'

View File

@@ -100,6 +100,7 @@ config = [{
'name': 'binsearch',
'description': 'Free provider, less accurate. See <a href="https://www.binsearch.info/">BinSearch</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAATklEQVQY02NwQAMMWAXOnz+PKvD//3/CAvM//z+fgiwAAs+RBab4PP//vwbFjPlAffgEChzOo2r5fBuIfRAC5w8D+QUofkkp8MHjOWQAAM3Sbogztg2wAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -1,9 +1,7 @@
from urllib2 import HTTPError
from urlparse import urlparse
import time
import traceback
import re
import urllib2
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.rss import RSS
@@ -13,6 +11,7 @@ from couchpotato.core.media._base.providers.base import ResultList
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
from requests import HTTPError
log = CPLog(__name__)
@@ -92,7 +91,7 @@ class Base(NZBProvider, RSS):
# Extract a password from the description
password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1)
if password:
name = name + ' {{%s}}' % password.strip()
name += ' {{%s}}' % password.strip()
except:
log.debug('Error getting details of "%s": %s', (name, traceback.format_exc()))
@@ -184,16 +183,7 @@ class Base(NZBProvider, RSS):
return 'try_next'
try:
# Get final redirected url
log.debug('Checking %s for redirects.', url)
req = urllib2.Request(url)
req.add_header('User-Agent', self.user_agent)
res = urllib2.urlopen(req)
finalurl = res.geturl()
if finalurl != url:
log.debug('Redirect url used: %s', finalurl)
data = self.urlopen(finalurl, show_error = False)
data = self.urlopen(url, show_error = False)
self.limits_reached[host] = False
return data
except HTTPError as e:
@@ -230,8 +220,9 @@ config = [{
'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \
<a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>, <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>, \
<a href="https://smackdownonyou.com" target="_blank">SmackDown</a>, <a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
<a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
@@ -240,30 +231,30 @@ config = [{
},
{
'name': 'use',
'default': '0,0,0,0,0,0'
'default': '0,0,0,0,0'
},
{
'name': 'host',
'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws',
'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://index.nzbgeek.info,https://www.nzbfinder.ws',
'description': 'The hostname of your newznab provider',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'default': '0,0,0,0,0,0',
'default': '0,0,0,0,0',
'description': 'Starting score for each release found via this provider.',
},
{
'name': 'custom_tag',
'advanced': True,
'label': 'Custom tag',
'default': ',,,,,',
'default': ',,,,',
'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
},
{
'name': 'api_key',
'default': ',,,,,',
'default': ',,,,',
'label': 'Api Key',
'description': 'Can be found on your profile page',
'type': 'combined',

View File

@@ -80,6 +80,7 @@ config = [{
'name': 'NZBClub',
'description': 'Free provider, less accurate. See <a href="https://www.nzbclub.com/">NZBClub</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACEUlEQVQ4y3VSMWgUQRR9/8/s7OzeJSdnTsVGghLEYBNQjBpQiRBFhIB2EcHG1kbs0murhZAmVocExEZQ0c7CxkLINYcJJpoYj9wZcnu72fF21uJSXMzuhyne58/j/fcf4b+KokgBIOSU53lxP5b9oNVqDT36dH+5UjoiKvIwPFEEgWBshGZ3E7/NOupL9fMjx0e+ZhKsrq+c/FPZKJi0w4FsQXMBDEJsd7BNW9h2tuyP9vfTALIJkMIu1hYRtINM+dpzcWc0sbkreK4fUEogyraAmKGF3+7vcT/wtR9QwkCabSAzQQuvk0uglAo5YaQ5DASGYjfMXcHVOqKu6NmR7iehlKAdHWUqWPv1c3i+9uwVdRlEBGaGEAJCCrDo9ShhvF6qPq8tL57bp+DbRn2sHtUuCY9YphLMu5921VhrwYJ5tbt0tt6sjQP4vEfB2Ikz7/ytwbeR6ljHkXCUA6UcOLtPOg4MYhtH8ZcLw5er+xQMDAwEURRNl96X596Y6oxFwsw9fmtTOAr2Ik19nL365FZpsLSdnQPPM8aYewc+lDcX4rkHqbQMAGTJXulOLzycmr1bKBTi3DOGYagajcahiaOT89fbM0/dxEsUu3aidfPljWO3HzebzYNBELi5Z5RSJlrrHd/3w8lT114MrVTWOn875fHRiYVisRhorWMpZXdvNnLKGCOstb0AMlulVJI19w/+nceU4D0aCwAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -105,6 +105,7 @@ config = [{
'name': 'nzbindex',
'description': 'Free provider, less accurate. See <a href="https://www.nzbindex.com/">NZBIndex</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAo0lEQVR42t2SQQ2AMBAEcUCwUAv94QMLfHliAQtYqIVawEItYAG6yZFMLkUANNlk79Kbbtp2P1j9uKxVV9VWFeStl+Wh3fWK9hNwEoADZkJtMD49AqS5AUjWGx6A+m+ARICGrM5W+wSTB0gETKzdHZwCEZAJ8PGZQN4AiQAmkR9s06EBAugJiBoAAPFfAQcBgZcIHzwA6TYP4JsXeSg3P9L31w3eksbH3zMb/wAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -74,6 +74,7 @@ config = [{
'name': 'OMGWTFNZBs',
'description': 'See <a href="http://omgwtfnzbs.org/">OMGWTFNZBs</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAIAAADAAbR1AAADbElEQVR4AZ2UW0ybZRiAy/OvdHaLYvB0YTRIFi7GkM44zRLmIfNixkWdiRMyYoxRE8/TC7MYvXCGEBmr3mxLwVMwY0wYA7e6Wso4lB6h/U9taSlMGIfBXLYlJMyo0S///2dJI5lxN8/F2/f9nu9737e/jYmXr6KTbN9BGG9HE/NotQ76UWziNzrXFiETk/5ARUNH+7+0kW7fSgTl0VKGOLZzidOkmuuIo7q2oTArNLPIzhdIkqXkerFOm2CaD/5bcKrjIL2c3fkhPxOq93Kcb91v46fV9TQKF4TgV/TbUsQtzfCaK6jMOd5DJrguSIIhexmqqVxN0FXbRR8/ND/LYTTj6J7nl2gnL47OkDW4KJhnQHCa6JpKVNJGA3OC58nwBJoZ//ebbIyKpBxjrr0o1q1FMRkrKXZnHWF85VvxMrJxibwhGyd0f5bLnKzqJs1k0Sfo+EU8hdAUvkbcwKEgs2D0OiV4jmmD1zb+Tp6er0JMMvDxPo5xev9zTBF683NS+N56n1YiB95B5crr93KRuKhKI0tb0Kw2mgLLqTjLEWO8424i9IvURaYeOckwf3+/yCC9e3bQQ/MuD+Monk0k+XFXMUfx7z5EEP+XlXi5tLlMxH8zLppw7idJrugcus30kC86gc7UrQqjLIukM8zWHOACeU+TiMxXN6ExVOkgz4lvPEzice1GIVhxhG4CrZvpl6TH55giKWqXGLy9hZh5aUtgDSew/msSyCKpl+DDNfxJc8NBIsxUxUnz14O/oONu+IIIvso9TLBQ1SY5rUhuSzUhAqJ2mRXBLDOCeUtgUZXsaObT8BffhUJPqWgiV+3zKKzYH0ClvTRLhD77HIqVkyh5jThnivehoG+qJctIRSPn6bxvO4FCgTl9c1DmbpjLajbQFE8aW5SU3rg+zOPGUjTUF9NFpLEbH2c/KmGYlY69/GQJVtGMSUcEp9eCbB1nctbxHTLRdTUkGDf+B02uGWRG3OvpJ/zSMwzif+oxVBID3cQKBavLCiPmB2PM2UuSCUPgrX4VDb97AwEG67bh4+KTOlncvu3M31BwA5rLHbCfEjwkNDky9e/SSbSxnD46Pg0RJtpXRvhmBSZHpRjWtKwFybjuQeXaKxto4WjLZZZvVmC17pZLJFkwxm5++PS2Mrwc7nyIMYZe/IzoP5d6QgEybqTXAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -78,8 +78,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'Awesome-HD',
'description': 'See <a href="https://awesome-hd.net">AHD</a>',
'description': '<a href="https://awesome-hd.net">AHD</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+UlEQVR4AV1SO0y6dxQ9H4g8CoIoohZ5NA0aR2UgkYpNB5uocTSaLlrDblMH09Gt8d90r3YpJkanxjA4GGkbO7RNxSABq8jDGnkpD+UD5NV7Bxvbk9wvv+/3uPece66A/yEWi42FQqHVfD7/cbPZtIEglUpjOp3uZHR0dBvAn3gDIRqNgjE4OKj0+Xzf3NzcfD4wMCCjf5TLZbTbbajVatzf3+Pu7q5uNpt35ufnvwBQAScQRREEldfr9RWLxan+/n5YrVa+jFarhVfQQyQSCU4EhULhX15engEgSrjC0dHRVqlUmjQYDBgaGgKtuTqz4mTgIoVCASaTCX19fajVapOHh4dbFJBks9mxcDi8qtFoJEajkfVyJWi1WkxMTMDhcIAT8x6D7/Dd6+vr1fHx8TGp2+3+iqo5+YCzBwIBToK5ubl/mQwPDyMSibAs2Gw2UHNRrValz8/PDUk8Hv9EqVRCr9fj4uICTNflcqFer+Pg4AB7e3uoVCq8x9Rxfn6O7u5uqFQq8FspZXxHTekggByA3W4Hr9PpNDeRL3I1cMhkMrBrnZ2dyGQyvNYIs7OzVbJNPjIyAraLwYdcjR8wXl5eIJfLwRIFQQDLYkm3t7c1CdGPPT4+cpOImp4PODMeaK+n10As2jBbrHifHOjS6qAguVFimkqlwAMmIQnHV1dX4NDQhVwuhyZTV6pgIktzDzkkk0lEwhEEzs7ASQr5Ai4vL1nuccfCwsLO/v6+p9FoyJhF6ekJro/cPCzIZLNQa7rQoK77/SdgWWpKkCaJ5EB9aWnpe6nH40nRMBnJV4f5gw+FX3/5GX/8/htXRZdOzzqhJWn6nl6YbTZqqhrhULD16fT0d8FgcFtYW1vD5uamfGVl5cd4IjldKhZACdkJvKfWUANrxEaJV4hiGVaL1b+7653hXzwRZQr2X76xsfG1xWIRaZzbNPv/CdrjEL9cX/+WXFBSgEPgzxuwG3Yans9OT0+naBZMIJDNfzudzp8WFxd/APAX3uAf9WOTxOPLdosAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -93,8 +93,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'BiT-HDTV',
'description': 'See <a href="http://bit-hdtv.com">BiT-HDTV</a>',
'description': '<a href="http://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -1,6 +1,6 @@
import traceback
from bs4 import BeautifulSoup
from bs4 import BeautifulSoup, SoupStrainer
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -20,6 +20,7 @@ class Base(TorrentProvider):
}
http_time_between_calls = 1 # Seconds
only_tables_tags = SoupStrainer('table')
def _searchOnTitle(self, title, movie, quality, results):
@@ -27,7 +28,7 @@ class Base(TorrentProvider):
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
try:
result_table = html.find('table', attrs = {'class': 'koptekst'})
@@ -87,8 +88,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'Bitsoup',
'description': 'See <a href="https://bitsoup.me">Bitsoup</a>',
'description': '<a href="https://bitsoup.me">Bitsoup</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB8ElEQVR4AbWSS2sTURiGz3euk0mswaE37HhNhIrajQheFgF3rgR/lAt/gOBCXNZlo6AbqfUWRVCxi04wqUnTRibpJLaJzdzOOZ6WUumyC5/VHOb9eN/FA91uFx0FjI4IPfgiGLTWH73tn348GKmN7ijD0d2b41fO5qJEaX24AWNIUrVQCTTJ3Llx6vbV6Vtzk7Gi9+ebi996guFDDYAQAVj4FExP5qdOZB49W62t/zH3hECcwsPnbWeMXz6Xi2K1f0ApeK3hMCHHbP5gvvoriBgFAAQJEAxhjJ4u+YWTNsVI6b1JgtPWZkoIefKy4fcii2OTw2BABs7wj3bYDlLL4rvjGWOdTser1j5Xf7c3Q/MbHQYApxItvnm31mhQQ71eX2vUB76/vsWB2hg0QuogrMwLIG8P3InM2/eVGXeDViqVwWB79vRU2lgJYmdHcgXCTAXQFJTN5HguvDCR2Hxsxe8EvT54nlcul5vNpqDIEgwRQanAhAAABgRIyiQcjpIkkTOuWyqVoN/vSylX67XXH74uV1vHRUyxxFqbLBCSmBpiXSq6xcL5QrGYzWZ3XQIAwdlOJB+/aL764ucdmncYs0WsCI7kvTnn+qyDMEnTVCn1Tz5KsBFg6fvWcmsUAcnYNC/g2hnromvvqbHvxv+39S+MX+bWkFXwAgAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -71,7 +71,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HDBits',
'description': 'See <a href="http://hdbits.org">HDBits</a>',
'wizard': True,
'description': '<a href="http://hdbits.org">HDBits</a>',
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -3,7 +3,7 @@ import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -15,7 +15,7 @@ class Base(TorrentProvider):
urls = {
'download': 'https://www.ilovetorrents.me/%s',
'detail': 'https//www.ilovetorrents.me/%s',
'detail': 'https://www.ilovetorrents.me/%s',
'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s',
'test': 'https://www.ilovetorrents.me/',
'login': 'https://www.ilovetorrents.me/takelogin.php',
@@ -47,17 +47,24 @@ class Base(TorrentProvider):
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
results_table = None
data_split = splitString(data, '<table')
soup = None
for x in data_split:
soup = BeautifulSoup(x)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
if results_table:
break
if not results_table:
return
try:
pagelinks = soup.findAll(href = re.compile('page'))
pageNumbers = [int(re.search('page=(?P<pageNumber>.+'')', i['href']).group('pageNumber')) for i in pagelinks]
total_pages = max(pageNumbers)
page_numbers = [int(re.search('page=(?P<page_number>.+'')', i['href']).group('page_number')) for i in pagelinks]
total_pages = max(page_numbers)
except:
pass
@@ -139,6 +146,7 @@ config = [{
'name': 'ILoveTorrents',
'description': 'Where the Love of Torrents is Born',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACPUlEQVR4AYWM0U9SbxjH3+v266I/oNvWZTfd2J1d0ZqbZEFwWrUImOKs4YwtumFKZvvlJJADR2TCQQlMPKg5NmpREgaekAPnBATKgmK1LqQlx6awHnZWF1Tr2Xfvvs+7z+dB0mlO7StpAh+M4S/2jbo3w8+xvJvlnSneEt+10zwer5ujNUOoChjALWFw5XOwdCAk/P57cGvPl+Oht0W7VJHN5NC1uW1BON4hGjXbwpVWMZhsy9v7sEIXAsDNYBXgdkEoIKyWD2CF8ut/aOXTZc/fBSgLWw1BgA4BDHOV0GkT90cBQpXahU5TFomsb38XhJC5/Tbh1P8c6rJlBeGfAeyMhUFwNVcs9lxV9Ot0dwmyd+mrNvRtbJ2fSPC6Z3Vsvub2z3sDFACAAYzk0+kUyxEkyfN7PopqNBro55A+P6yPKIrL5zF1HwjdeBJJCObIsZO79bo3sHhWhglo5WMV3mazuVPb4fLvSL8/FAkB1hK6rXQPwYhMyROK8VK5LAiH/jsMt0HQjxiN4/ePdoilllcqDyt3Mkg8mRBNbIhMb8RERkowQA/p76g0/UDDdCoNmDminM0qSK5vlpE5kugCHhNPxntwWmJPYTMZtYcFR6ABHQsVRlYLukVORaaULvqKI46keFSCv77kSPS6kxrPptLNDHgz16fWBtyxe6v5h08LUy+KI8ushqTPWWIX8Sg6b45IrGtyW6zXFb/hpQf9m3oqfWuB0fpSw0uZ4WB69En69uOk2rmO2V52PXj+A/mI4ESKpb2HAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -120,8 +120,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'IPTorrents',
'description': 'See <a href="http://www.iptorrents.com">IPTorrents</a>',
'description': '<a href="http://www.iptorrents.com">IPTorrents</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -32,8 +32,12 @@ class Base(TorrentMagnetProvider):
proxy_list = [
'https://kickass.to',
'http://kickass.pw',
'http://kickassto.come.in',
'http://katproxy.ws',
'http://www.kickassunblock.info',
'http://www.kickassproxy.info',
'http://katph.eu',
'http://kickassto.come.in',
]
def _search(self, media, quality, results):
@@ -128,8 +132,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'KickAssTorrents',
'description': 'See <a href="https://kat.ph/">KickAssTorrents</a>',
'description': '<a href="https://kat.ph/">KickAssTorrents</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -187,8 +187,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'PassThePopcorn',
'description': 'See <a href="https://passthepopcorn.me">PassThePopcorn.me</a>',
'description': '<a href="https://passthepopcorn.me">PassThePopcorn.me</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f532+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -1,136 +0,0 @@
from urlparse import parse_qs
import re
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
import six
log = CPLog(__name__)
class Base(TorrentMagnetProvider):
urls = {
'test': 'https://publichd.se',
'detail': 'https://publichd.se/index.php?page=torrent-details&id=%s',
'search': 'https://publichd.se/index.php',
}
http_time_between_calls = 0
def search(self, movie, quality):
if not quality.get('hd', False):
return []
return super(Base, self).search(movie, quality)
def _search(self, media, quality, results):
query = self.buildUrl(media)
params = tryUrlencode({
'page': 'torrents',
'search': query,
'active': 1,
})
data = self.getHTMLData('%s?%s' % (self.urls['search'], params))
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'bgtorrlist2'})
entries = results_table.find_all('tr')
for result in entries[2:len(entries) - 1]:
info_url = result.find(href = re.compile('torrent-details'))
download = result.find(href = re.compile('magnet:'))
if info_url and download:
url = parse_qs(info_url['href'])
results.append({
'id': url['id'][0],
'name': six.text_type(info_url.string),
'url': download['href'],
'detail_url': self.urls['detail'] % url['id'][0],
'size': self.parseSize(result.find_all('td')[7].string),
'seeders': tryInt(result.find_all('td')[4].string),
'leechers': tryInt(result.find_all('td')[5].string),
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getMoreInfo(self, item):
cache_key = 'publichd.%s' % item['id']
description = self.getCache(cache_key)
if not description:
try:
full_description = self.urlopen(item['detail_url'])
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'id': 'torrmain'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
except:
log.error('Failed getting more info for %s', item['name'])
description = ''
self.setCache(cache_key, description, timeout = 25920000)
item['description'] = description
return item
config = [{
'name': 'publichd',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'PublicHD',
'description': 'Public Torrent site with only HD content. See <a href="https://publichd.se/">PublicHD</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]

View File

@@ -89,8 +89,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'SceneAccess',
'description': 'See <a href="https://sceneaccess.eu/">SceneAccess</a>',
'description': '<a href="https://sceneaccess.eu/">SceneAccess</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -24,15 +24,18 @@ class Base(TorrentMagnetProvider):
http_time_between_calls = 0
proxy_list = [
'https://tpb.ipredator.se',
'https://nobay.net',
'https://thebay.al',
'https://thepiratebay.se',
'http://pirateproxy.ca',
'http://tpb.al',
'http://thepiratebay.cd',
'http://thebootlegbay.com',
'http://www.tpb.gr',
'http://bayproxy.me',
'http://proxybay.eu',
'http://tpbproxy.co.uk',
'http://pirateproxy.in',
'http://www.getpirate.com',
'http://piratebay.io',
'http://bayproxy.li',
'http://proxybay.pw',
]
def _search(self, media, quality, results):
@@ -110,7 +113,7 @@ class Base(TorrentMagnetProvider):
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'class': 'nfo'})
description = ''
try:
try:
description = toUnicode(nfo_pre.text)
except:
pass
@@ -126,8 +129,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'ThePirateBay',
'description': 'The world\'s largest bittorrent tracker. See <a href="http://fucktimkuik.org/">ThePirateBay</a>',
'description': 'The world\'s largest bittorrent tracker. <a href="http://fucktimkuik.org/">ThePirateBay</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAA3UlEQVQY02P4DwT/YADIZvj//7qnozMYODmtAAusZoCDELDAegYGViZhAWZmRoYoqIDupfhNN1M3dTBEggXWMZg9jZRXV77YxhAOFpjDwMAPMoCXmcHsF1SAQZ6bQY2VgUEbKHClcAYzg3mINEO8jSCD478/DPsZmvqWblu1bOmStes3Pp0ezVDF4Gif0Hfx9///74/ObRZ2YNiZ47C8XIRBxFJR0jbSSUud4f9zAQWn8NTuziAt2zy5xIMM/z8LFX0E+fD/x0MRDCeA1v7Z++Y/FDzyvAtyBxIA+h8A8ZKLeT+lJroAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -90,8 +90,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentBytes',
'description': 'See <a href="http://torrentbytes.net">TorrentBytes</a>',
'description': '<a href="http://torrentbytes.net">TorrentBytes</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAeFBMVEUAAAAAAEQAA1QAEmEAKnQALHYAMoEAOokAQpIASYsASZgAS5UATZwATosATpgAVJ0AWZwAYZ4AZKAAaZ8Ab7IAcbMAfccAgcQAgcsAhM4AiscAjMkAmt0AoOIApecAp/EAqvQAs+kAt+wA3P8A4f8A//8VAAAfDbiaAl08AAAAjUlEQVQYGQXBO04DQRAFwHqz7Z8sECIl5f73ISRD5GBs7UxTlWfg9vYXnvJRQJqOL88D6BAwJtMMumHUVCl60aa6H93IrIv0b+157f1lpk+fm87lMWrZH0vncKbXdRUQrRmrh9C6Iwkq6rg4PXZcyXmbizzeV/g+rDra0rGve8jPKLSOJNi2AQAwAGjwD7ApPkEHdtPQAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -1,4 +1,3 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -69,8 +68,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentDay',
'description': 'See <a href="http://www.td.af/">TorrentDay</a>',
'description': '<a href="http://www.td.af/">TorrentDay</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',

View File

@@ -24,9 +24,9 @@ class Base(TorrentProvider):
http_time_between_calls = 1 # Seconds
cat_backup_id = None
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
url = self.urls['search'] % self.buildUrl(media, quality)
url = self.urls['search'] % self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
@@ -80,8 +80,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentLeech',
'description': 'See <a href="http://torrentleech.org">TorrentLeech</a>',
'description': '<a href="http://torrentleech.org">TorrentLeech</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -134,6 +134,7 @@ config = [{
'order': 10,
'description': 'CouchPotato torrent provider. Checkout <a href="https://github.com/RuudBurger/CouchPotatoServer/wiki/CouchPotato-Torrent-Provider">the wiki page about this provider</a> for more info.',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABSElEQVR4AZ2Nz0oCURTGv8t1YMpqUxt9ARFxoQ/gQtppgvUKcu/sxB5iBJkogspaBC6iVUplEC6kv+oiiKDNhAtt16roP0HQgdsMLgaxfvy4nHP4Pi48qE2g4v91JOqT1CH/UnA7w7icUlLawyEdj+ZI/7h6YluWbRiddHonHh9M70aj7VTKzuXuikUMci/EO/ACnAI15599oAk8AR/AgxBQNCzreD7bmpl+FOIVuAHqQDUcJo+AK+CZFKLt95/MpSmMt0TiW9POxse6UvYZ6zB2wFgjFiNpOGesR0rZ0PVPXf8KhUCl22CwClz4eN8weoZBb9c0bdPsOWvHx/cYu9Y0CoNoZTJrwAbn5DrnZc6XOV+igVbnsgo0IxEomlJuA1vUIYGyq3PZBChwmExCUSmVZgMBDIUCK4UCFIv5vHIhm/XUDeAf/ADbcpd5+aXSWQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -48,9 +48,9 @@ class Base(TorrentProvider):
'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(result.find_all('td')[4].string),
'seeders': tryInt(result.find_all('td')[6].string),
'leechers': tryInt(result.find_all('td')[7].string),
'size': self.parseSize(result.find_all('td')[5].string),
'seeders': tryInt(result.find_all('td')[7].string),
'leechers': tryInt(result.find_all('td')[8].string),
})
except:
@@ -80,7 +80,9 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentShack',
'description': 'See <a href="https://www.torrentshack.net/">TorrentShack</a>',
'description': '<a href="https://www.torrentshack.net/">TorrentShack</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',

View File

@@ -80,11 +80,12 @@ config = [{
'name': 'Torrentz',
'description': 'Torrentz is a free, fast and powerful meta-search engine. <a href="https://torrentz.eu/">Torrentz</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
'default': True
},
{
'name': 'verified_only',

View File

@@ -2,13 +2,13 @@ import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
log = CPLog(__name__)
class Base(TorrentProvider):
class Base(TorrentMagnetProvider):
urls = {
'test': '%s/api',
@@ -49,7 +49,7 @@ class Base(TorrentProvider):
if result['Quality'] and result['Quality'] not in result['MovieTitle']:
title = result['MovieTitle'] + ' BrRip ' + result['Quality']
else:
else:
title = result['MovieTitle'] + ' BrRip'
results.append({
@@ -79,6 +79,7 @@ config = [{
'name': 'Yify',
'description': 'Free provider, less accurate. Small HD movies, encoded by <a href="https://yify-torrents.com/">Yify</a>.',
'wizard': False,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqGiQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+WgEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mYYbjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9KumpjgvwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeROst0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nEKPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9eIlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTyiGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',

View File

@@ -1,6 +1,6 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.helpers.variable import mergeDicts, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -35,12 +35,21 @@ class Search(Plugin):
elif isinstance(types, (list, tuple, set)):
types = list(types)
imdb_identifier = getImdb(q)
if not types:
result = fireEvent('info.search', q = q, merge = True)
if imdb_identifier:
result = fireEvent('movie.info', identifier = imdb_identifier, merge = True)
result = {result['type']: [result]}
else:
result = fireEvent('info.search', q = q, merge = True)
else:
result = {}
for media_type in types:
result[media_type] = fireEvent('%s.search' % media_type)
if imdb_identifier:
result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier)
else:
result[media_type] = fireEvent('%s.search' % media_type, q = q)
return mergeDicts({
'success': True,

View File

@@ -3,7 +3,6 @@
vertical-align: middle;
text-align: right;
transition: all .4s cubic-bezier(0.9,0,0.1,1);
position: absolute;
z-index: 20;
border: 0 solid transparent;
border-bottom-width: 4px;

View File

@@ -13,6 +13,9 @@ var BlockSearch = new Class({
self.input = new Element('input', {
'placeholder': 'Search & add a new media',
'events': {
'input': self.keyup.bind(self),
'paste': self.keyup.bind(self),
'change': self.keyup.bind(self),
'keyup': self.keyup.bind(self),
'focus': function(){
if(focus_timer) clearTimeout(focus_timer);

View File

@@ -2,6 +2,7 @@ import os
import traceback
import time
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
@@ -90,7 +91,7 @@ class MovieBase(MovieTypeBase):
# Default profile and category
default_profile = {}
if not params.get('profile_id'):
if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id')
@@ -105,7 +106,7 @@ class MovieBase(MovieTypeBase):
'imdb': params.get('identifier')
},
'status': status if status else 'active',
'profile_id': params.get('profile_id', default_profile.get('_id')),
'profile_id': params.get('profile_id') or default_profile.get('_id'),
'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
}
@@ -117,8 +118,17 @@ class MovieBase(MovieTypeBase):
media['info'] = info
new = False
previous_profile = None
try:
m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']
try:
db.get('id', m.get('profile_id'))
previous_profile = m.get('profile_id')
except RecordNotFound:
pass
except:
log.error('Failed getting previous profile: %s', traceback.format_exc())
except:
new = True
m = db.insert(media)
@@ -146,9 +156,10 @@ class MovieBase(MovieTypeBase):
else:
fireEvent('release.delete', release['_id'], single = True)
m['profile_id'] = params.get('profile_id', default_profile.get('id'))
m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
m['last_edit'] = int(time.time())
m['tags'] = []
do_search = True
db.update(m)
@@ -225,7 +236,7 @@ class MovieBase(MovieTypeBase):
db.update(m)
fireEvent('media.restatus', m['_id'])
fireEvent('media.restatus', m['_id'], single = True)
m = db.get('id', media_id)

View File

@@ -252,7 +252,7 @@ MA.Release = new Class({
self.trynext_container.adopt(
new Element('span.or', {
'text': 'This movie is snatched, if anything went wrong, download'
'text': 'If anything went wrong, download'
}),
lr ? new Element('a.button.orange', {
'text': 'the same release again',
@@ -302,7 +302,7 @@ MA.Release = new Class({
self.movie.data.releases.each(function(release){
if(has_available && has_snatched) return;
if(['snatched', 'downloaded', 'seeding'].contains(release.status))
if(['snatched', 'downloaded', 'seeding', 'done'].contains(release.status))
has_snatched = true;
if(['available'].contains(release.status))

View File

@@ -357,18 +357,40 @@
top: 30px;
clear: both;
bottom: 30px;
overflow: hidden;
position: absolute;
}
.movies .data:hover .description {
overflow: auto;
}
.movies.list_list .movie:not(.details_view) .info .description,
.movies.mass_edit_list .info .description,
.movies.thumbs_list .info .description {
display: none;
}
.movies .data .eta {
display: none;
}
.movies.details_list .data .eta {
position: absolute;
bottom: 0;
right: 0;
display: block;
min-height: 20px;
text-align: right;
font-style: italic;
opacity: .8;
font-size: 11px;
}
.movies.details_list .movie:hover .data .eta {
display: none;
}
.movies.thumbs_list .data .eta {
display: block;
position: absolute;
bottom: 40px;
}
.movies .data .quality {
position: absolute;
bottom: 2px;

View File

@@ -136,6 +136,21 @@ var Movie = new Class({
self.el.addClass('status_'+self.get('status'));
var eta = null,
eta_date = null,
now = Math.round(+new Date()/1000);
if(self.data.info.release_date)
[self.data.info.release_date.dvd, self.data.info.release_date.theater].each(function(timestamp){
if (timestamp > 0 && (eta == null || Math.abs(timestamp - now) < Math.abs(eta - now)))
eta = timestamp;
});
if(eta){
eta_date = new Date(eta * 1000);
eta_date = eta_date.toLocaleString('en-us', { month: "long" }) + ' ' + eta_date.getFullYear();
}
self.el.adopt(
self.select_checkbox = new Element('input[type=checkbox].inlay', {
'events': {
@@ -158,9 +173,13 @@ var Movie = new Class({
'text': self.data.info.year || 'n/a'
})
),
self.description = new Element('div.description', {
self.description = new Element('div.description.tiny_scroll', {
'text': self.data.info.plot
}),
self.eta = eta_date && (now+8035200 > eta) ? new Element('div.eta', {
'text': eta_date,
'title': 'ETA'
}) : null,
self.quality = new Element('div.quality', {
'events': {
'click': function(e){

View File

@@ -45,6 +45,7 @@ class Charts(Plugin):
if catched_charts:
return catched_charts
charts = []
try:
self.update_in_progress = True
charts = fireEvent('automation.get_chart_list', merge = True)

View File

@@ -3,26 +3,21 @@
margin-bottom: 30px;
}
.charts > h2 {
height: 40px;
}
.charts > h2 {
height: 40px;
}
.charts .chart {
display: inline-block;
width: 50%;
vertical-align: top;
max-height: 510px;
overflow: hidden;
scrollbar-base-color: #4e5969;
}
.charts .chart {
display: inline-block;
width: 50%;
vertical-align: top;
max-height: 510px;
scrollbar-base-color: #4e5969;
}
.charts .chart:hover {
overflow-y: auto;
}
.charts .chart .media_result.hidden {
display: none;
}
.charts .chart .media_result.hidden {
display: none;
}
.charts .refresh {
clear:both;
@@ -36,30 +31,30 @@
text-align:center;
}
.charts .refresh a {
text-align: center;
padding: 0;
display: none;
width: 30px;
height: 30px;
position: absolute;
right: 10px;
top: -40px;
opacity: .7;
}
.charts .refresh a {
text-align: center;
padding: 0;
display: none;
width: 30px;
height: 30px;
position: absolute;
right: 10px;
top: -40px;
opacity: .7;
}
.charts .refresh a:hover {
opacity: 1;
}
.charts .refresh a:hover {
opacity: 1;
}
.charts p.no_charts_enabled {
padding: 0.7em 1em;
display: none;
}
.charts p.no_charts_enabled {
padding: 0.7em 1em;
display: none;
}
.charts .chart h3 a {
color: #fff;
}
.charts .chart h3 a {
color: #fff;
}
.charts .chart .media_result {
@@ -148,7 +143,6 @@
padding: 0 3px 10px 0;
}
.charts .chart .media_result .data:before {
bottom: 0;
content: '';
display: block;
height: 10px;

View File

@@ -22,9 +22,11 @@ var Charts = new Class({
'events': {
'click': function(e) {
e.preventDefault();
self.el.getChildren('div.chart').destroy();
self.el.getElements('.chart').destroy();
self.el_refreshing_text.show();
self.el_refresh_link.hide();
self.api_request = Api.request('charts.view', {
'data': { 'force_update': 1 },
'onComplete': self.fill.bind(self)
@@ -72,7 +74,7 @@ var Charts = new Class({
Object.each(json.charts, function(chart){
var c = new Element('div.chart').grab(
var c = new Element('div.chart.tiny_scroll').grab(
new Element('h3').grab( new Element('a', {
'text': chart.name,
'href': chart.url

View File

@@ -1,4 +1,5 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.library.base import LibraryBase
@@ -17,7 +18,9 @@ class MovieLibraryPlugin(LibraryBase):
if media.get('type') != 'movie':
return
default_title = getTitle(media)
titles = media['info'].get('titles', [])
titles.insert(0, default_title)
# Add year identifier to titles
if include_year:

View File

@@ -3,6 +3,7 @@ import re
from bs4 import BeautifulSoup
from couchpotato import fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt
from couchpotato.core.logger import CPLog
@@ -28,6 +29,39 @@ class IMDBBase(Automation, RSS):
def getInfo(self, imdb_id):
return fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
def getFromURL(self, url):
log.debug('Getting IMDBs from: %s', url)
html = self.getHTMLData(url)
try:
split = splitString(html, split_on = "<div class=\"list compact\">")[1]
html = splitString(split, split_on = "<div class=\"pages\">")[0]
except:
try:
split = splitString(html, split_on = "<div id=\"main\">")
if len(split) < 2:
log.error('Failed parsing IMDB page "%s", unexpected html.', url)
return []
html = BeautifulSoup(split[1])
for x in ['list compact', 'lister', 'list detail sub-list']:
html2 = html.find('div', attrs = {
'class': x
})
if html2:
html = html2.contents
html = ''.join([str(x) for x in html])
break
except:
log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))
html = ss(html)
imdbs = getImdb(html, multiple = True) if html else []
return imdbs
class IMDBWatchlist(IMDBBase):
@@ -65,16 +99,7 @@ class IMDBWatchlist(IMDBBase):
try:
w_url = '%s&start=%s' % (watchlist_url, start)
log.debug('Started IMDB watchlists: %s', w_url)
html = self.getHTMLData(w_url)
try:
split = splitString(html, split_on="<div class=\"list compact\">")[1]
html = splitString(split, split_on="<div class=\"pages\">")[0]
except:
pass
imdbs = getImdb(html, multiple = True) if html else []
imdbs = self.getFromURL(w_url)
for imdb in imdbs:
if imdb not in movies:
@@ -85,13 +110,14 @@ class IMDBWatchlist(IMDBBase):
log.debug('Found %s movies on %s', (len(imdbs), w_url))
if len(imdbs) < 250:
if len(imdbs) < 225:
break
start += 250
start = len(movies)
except:
log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc()))
break
return movies
@@ -109,12 +135,12 @@ class IMDBAutomation(IMDBBase):
'boxoffice': {
'order': 2,
'name': 'IMDB - Box Office',
'url': 'http://www.imdb.com/chart/',
'url': 'http://www.imdb.com/boxoffice/',
},
'rentals': {
'order': 3,
'name': 'IMDB - Top DVD rentals',
'url': 'http://m.imdb.com/boxoffice_json',
'url': 'http://www.imdb.com/boxoffice/rentals',
'type': 'json',
},
'top250': {
@@ -124,8 +150,6 @@ class IMDBAutomation(IMDBBase):
},
}
first_table = ['boxoffice']
def getIMDBids(self):
movies = []
@@ -135,36 +159,19 @@ class IMDBAutomation(IMDBBase):
url = chart.get('url')
if self.conf('automation_charts_%s' % name):
data = self.getHTMLData(url)
imdb_ids = self.getFromURL(url)
if data:
try:
html = BeautifulSoup(data)
try:
for imdb_id in imdb_ids:
info = self.getInfo(imdb_id)
if info and self.isMinimalMovie(info):
movies.append(imdb_id)
if chart.get('type', 'html') == 'html':
result_div = html.find('div', attrs = {'id': 'main'})
if self.shuttingDown():
break
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
imdb_ids = getImdb(str(result_div), multiple = True)
else:
imdb_ids = getImdb(str(data), multiple = True)
for imdb_id in imdb_ids:
info = self.getInfo(imdb_id)
if info and self.isMinimalMovie(info):
movies.append(imdb_id)
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
return movies
@@ -182,42 +189,25 @@ class IMDBAutomation(IMDBBase):
chart['list'] = []
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
imdb_ids = self.getFromURL(url)
try:
try:
for imdb_id in imdb_ids[0:max_items]:
if chart.get('type', 'html') == 'html':
result_div = html.find('div', attrs = {'id': 'main'})
is_movie = fireEvent('movie.is_movie', identifier = imdb_id, single = True)
if not is_movie:
continue
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
info = self.getInfo(imdb_id)
chart['list'].append(info)
imdb_ids = getImdb(str(result_div), multiple = True)
else:
imdb_ids = getImdb(str(data), multiple = True)
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
for imdb_id in imdb_ids[0:max_items]:
is_movie = fireEvent('movie.is_movie', identifier = imdb_id, single = True)
if not is_movie:
continue
info = self.getInfo(imdb_id)
chart['list'].append(info)
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
if chart['list']:
movie_lists.append(chart)
if chart['list']:
movie_lists.append(chart)
return movie_lists
@@ -273,7 +263,7 @@ config = [{
'name': 'automation_charts_rentals',
'type': 'bool',
'label': 'DVD Rentals',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals/">rentals</a> chart',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals">rentals</a> chart',
'default': True,
},
{
@@ -322,7 +312,7 @@ config = [{
'name': 'chart_display_rentals',
'type': 'bool',
'label': 'DVD Rentals',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals/">rentals</a> chart',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals">rentals</a> chart',
'default': True,
},
{

View File

@@ -21,11 +21,15 @@ class Moviemeter(Automation, RSS):
for movie in rss_movies:
name_year = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
imdb = self.search(name_year.get('name'), name_year.get('year'))
title = self.getTextElement(movie, 'title')
name_year = fireEvent('scanner.name_year', title, single = True)
if name_year.get('name') and name_year.get('year'):
imdb = self.search(name_year.get('name'), name_year.get('year'))
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
else:
log.error('Failed getting name and year from: %s', title)
return movies

View File

@@ -0,0 +1,47 @@
from couchpotato import fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'PopularMovies'
class PopularMovies(Automation):
interval = 1800
url = 'https://s3.amazonaws.com/popular-movies/movies.json'
def getIMDBids(self):
movies = []
retrieved_movies = self.getJsonData(self.url)
for movie in retrieved_movies.get('movies'):
imdb_id = movie.get('imdb_id')
info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
if self.isMinimalMovie(info):
movies.append(imdb_id)
return movies
config = [{
'name': 'popularmovies',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'popularmovies_automation',
'label': 'Popular Movies',
'description': 'Imports the <a href="http://movies.stevenlu.com/">top titles of movies that have been in theaters</a>. Script provided by <a href="https://github.com/sjlu/popular-movies">Steven Lu</a>',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

View File

@@ -26,7 +26,14 @@ class MovieResultModifier(Plugin):
'backdrop': [],
'poster_original': [],
'backdrop_original': [],
'actors': {}
'actors': {},
'landscape': [],
'logo': [],
'clear_art': [],
'disc_art': [],
'banner': [],
'extra_thumbs': [],
'extra_fanart': []
},
'runtime': 0,
'plot': '',

View File

@@ -29,7 +29,7 @@ class CouchPotatoApi(MovieProvider):
api_version = 1
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 1)
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info.release_date', self.getReleaseDate)
addEvent('info.search', self.search, priority = 1)

View File

@@ -0,0 +1,130 @@
import traceback
from couchpotato import tryInt
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'FanartTV'
class FanartTV(MovieProvider):
urls = {
'api': 'http://api.fanart.tv/webservice/movie/b28b14e9be662e027cfbc7c3dd600405/%s/JSON/all/1/2'
}
MAX_EXTRAFANART = 20
http_time_between_calls = 0
def __init__(self):
addEvent('movie.info', self.getArt, priority = 1)
def getArt(self, identifier = None, **kwargs):
log.debug("Getting Extra Artwork from Fanart.tv...")
if not identifier:
return {}
images = {}
try:
url = self.urls['api'] % identifier
fanart_data = self.getJsonData(url)
if fanart_data:
name, resource = fanart_data.items()[0]
log.debug('Found images for %s', name)
images = self._parseMovie(resource)
except:
log.error('Failed getting extra art for %s: %s',
(identifier, traceback.format_exc()))
return {}
return {
'images': images
}
def _parseMovie(self, movie):
images = {
'landscape': self._getMultImages(movie.get('moviethumb', []), 1),
'logo': [],
'disc_art': self._getMultImages(self._trimDiscs(movie.get('moviedisc', [])), 1),
'clear_art': self._getMultImages(movie.get('hdmovieart', []), 1),
'banner': self._getMultImages(movie.get('moviebanner', []), 1),
'extra_fanart': [],
}
if len(images['clear_art']) == 0:
images['clear_art'] = self._getMultImages(movie.get('movieart', []), 1)
images['logo'] = self._getMultImages(movie.get('hdmovielogo', []), 1)
if len(images['logo']) == 0:
images['logo'] = self._getMultImages(movie.get('movielogo', []), 1)
fanarts = self._getMultImages(movie.get('moviebackground', []), self.MAX_EXTRAFANART + 1)
if fanarts:
images['backdrop_original'] = [fanarts[0]]
images['extra_fanart'] = fanarts[1:]
return images
def _trimDiscs(self, disc_images):
"""
Return a subset of discImages. Only bluray disc images will be returned.
"""
trimmed = []
for disc in disc_images:
if disc.get('disc_type') == 'bluray':
trimmed.append(disc)
if len(trimmed) == 0:
return disc_images
return trimmed
def _getImage(self, images):
image_url = None
highscore = -1
for image in images:
if tryInt(image.get('likes')) > highscore:
highscore = tryInt(image.get('likes'))
image_url = image.get('url')
return image_url
def _getMultImages(self, images, n):
"""
Chooses the best n images and returns them as a list.
If n<0, all images will be returned.
"""
image_urls = []
pool = []
for image in images:
if image.get('lang') == 'en':
pool.append(image)
orig_pool_size = len(pool)
while len(pool) > 0 and (n < 0 or orig_pool_size - len(pool) < n):
best = None
highscore = -1
for image in pool:
if tryInt(image.get('likes')) > highscore:
highscore = tryInt(image.get('likes'))
best = image
image_urls.append(best.get('url'))
pool.remove(best)
return image_urls
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')
return True
return False

View File

@@ -13,9 +13,10 @@ autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider):
MAX_EXTRATHUMBS = 4
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info', self.getInfo, priority = 3)
addEvent('movie.info_by_tmdb', self.getInfo)
# Configure TMDB settings
@@ -97,16 +98,18 @@ class TheMovieDb(MovieProvider):
if not movie_data:
# Images
poster = self.getImage(movie, type = 'poster', size = 'poster')
poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original', n = self.MAX_EXTRATHUMBS, skipfirst = True)
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {}
'actors': {},
'extra_thumbs': extra_thumbs
}
# Genres
@@ -150,8 +153,10 @@ class TheMovieDb(MovieProvider):
movie_data = dict((k, v) for k, v in movie_data.items() if v)
# Add alternative names
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']:
movie_data['titles'].append(movie_data['original_title'])
if extended:
movie_data['titles'].append(movie.originaltitle)
for alt in movie.alternate_titles:
alt_name = alt.title
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
@@ -172,6 +177,30 @@ class TheMovieDb(MovieProvider):
return image_url
def getMultImages(self, movie, type = 'backdrops', size = 'original', n = -1, skipfirst = False):
"""
If n < 0, return all images. Otherwise return n images.
If n > len(getattr(movie, type)), then return all images.
If skipfirst is True, then it will skip getattr(movie, type)[0]. This
is because backdrops[0] is typically backdrop.
"""
image_urls = []
try:
images = getattr(movie, type)
if n < 0 or n > len(images):
num_images = len(images)
else:
num_images = n
for i in range(int(skipfirst), num_images + int(skipfirst)):
image_urls.append(images[i].geturl(size = size))
except:
log.debug('Failed getting %i %s.%s for "%s"', (n, type, size, ss(str(movie))))
return image_urls
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')

View File

@@ -4,7 +4,7 @@ import traceback
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import getIdentifier
from couchpotato.core.helpers.variable import getIdentifier, underscoreToCamel
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.metadata.base import MetaDataBase
from couchpotato.environment import Env
@@ -38,75 +38,150 @@ class MovieMetaData(MetaDataBase):
movie_info = group['media'].get('info')
for file_type in ['nfo', 'thumbnail', 'fanart']:
for file_type in ['nfo']:
try:
# Get file path
name = getattr(self, 'get' + file_type.capitalize() + 'Name')(meta_name, root)
self._createType(meta_name, root, movie_info, group, file_type, 0)
except:
log.error('Unable to create %s file: %s', ('nfo', traceback.format_exc()))
if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None):
# Get file content
content = getattr(self, 'get' + file_type.capitalize())(movie_info = movie_info, data = group)
if content:
log.debug('Creating %s file: %s', (file_type, name))
if os.path.isfile(content):
content = sp(content)
name = sp(name)
shutil.copy2(content, name)
shutil.copyfile(content, name)
# Try and copy stats seperately
try: shutil.copystat(content, name)
except: pass
else:
self.createFile(name, content)
group['renamed_files'].append(name)
try:
os.chmod(sp(name), Env.getPermission('file'))
except:
log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc()))
for file_type in ['thumbnail', 'fanart', 'banner', 'disc_art', 'logo', 'clear_art', 'landscape', 'extra_thumbs', 'extra_fanart']:
try:
if file_type == 'thumbnail':
num_images = len(movie_info['images']['poster_original'])
elif file_type == 'fanart':
num_images = len(movie_info['images']['backdrop_original'])
else:
num_images = len(movie_info['images'][file_type])
for i in range(num_images):
self._createType(meta_name, root, movie_info, group, file_type, i)
except:
log.error('Unable to create %s file: %s', (file_type, traceback.format_exc()))
def _createType(self, meta_name, root, movie_info, group, file_type, i): # Get file path
camelcase_method = underscoreToCamel(file_type.capitalize())
name = getattr(self, 'get' + camelcase_method + 'Name')(meta_name, root, i)
if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None):
# Get file content
content = getattr(self, 'get' + camelcase_method)(movie_info = movie_info, data = group, i = i)
if content:
log.debug('Creating %s file: %s', (file_type, name))
if os.path.isfile(content):
content = sp(content)
name = sp(name)
if not os.path.exists(os.path.dirname(name)):
os.makedirs(os.path.dirname(name))
shutil.copy2(content, name)
shutil.copyfile(content, name)
# Try and copy stats seperately
try: shutil.copystat(content, name)
except: pass
else:
self.createFile(name, content)
group['renamed_files'].append(name)
try:
os.chmod(sp(name), Env.getPermission('file'))
except:
log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc()))
def getRootName(self, data = None):
if not data: data = {}
return os.path.join(data['destination_dir'], data['filename'])
def getFanartName(self, name, root):
def getFanartName(self, name, root, i):
return
def getThumbnailName(self, name, root):
def getThumbnailName(self, name, root, i):
return
def getNfoName(self, name, root):
def getBannerName(self, name, root, i):
return
def getNfo(self, movie_info = None, data = None):
def getClearArtName(self, name, root, i):
return
def getLogoName(self, name, root, i):
return
def getDiscArtName(self, name, root, i):
return
def getLandscapeName(self, name, root, i):
return
def getExtraThumbsName(self, name, root, i):
return
def getExtraFanartName(self, name, root, i):
return
def getNfoName(self, name, root, i):
return
def getNfo(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original'):
def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original', i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
# See if it is in current files
files = data['media'].get('files')
if files.get('image_' + wanted_file_type):
if os.path.isfile(files['image_' + wanted_file_type][0]):
return files['image_' + wanted_file_type][0]
if os.path.isfile(files['image_' + wanted_file_type][i]):
return files['image_' + wanted_file_type][i]
# Download using existing info
try:
images = movie_info['images'][wanted_file_type]
file_path = fireEvent('file.download', url = images[0], single = True)
file_path = fireEvent('file.download', url = images[i], single = True)
return file_path
except:
pass
def getFanart(self, movie_info = None, data = None):
def getFanart(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original')
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original', i = i)
def getBanner(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'banner', i = i)
def getClearArt(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'clear_art', i = i)
def getLogo(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'logo', i = i)
def getDiscArt(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'disc_art', i = i)
def getLandscape(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data= data, wanted_file_type = 'landscape', i = i)
def getExtraThumbs(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_thumbs', i = i)
def getExtraFanart(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_fanart', i = i)

View File

@@ -0,0 +1,36 @@
import os
from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData
autoload = 'MediaBrowser'
class MediaBrowser(MovieMetaData):
def getThumbnailName(self, name, root, i):
return os.path.join(root, 'folder.jpg')
def getFanartName(self, name, root, i):
return os.path.join(root, 'backdrop.jpg')
config = [{
'name': 'mediabrowser',
'groups': [
{
'tab': 'renamer',
'subtab': 'metadata',
'name': 'mediabrowser_metadata',
'label': 'MediaBrowser',
'description': 'Generate folder.jpg and backdrop.jpg',
'options': [
{
'name': 'meta_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

View File

@@ -0,0 +1,33 @@
import os
from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData
autoload = 'SonyPS3'
class SonyPS3(MovieMetaData):
def getThumbnailName(self, name, root, i):
return os.path.join(root, 'cover.jpg')
config = [{
'name': 'sonyps3',
'groups': [
{
'tab': 'renamer',
'subtab': 'metadata',
'name': 'sonyps3_metadata',
'label': 'Sony PS3',
'description': 'Generate cover.jpg',
'options': [
{
'name': 'meta_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

View File

@@ -8,7 +8,7 @@ autoload = 'WindowsMediaCenter'
class WindowsMediaCenter(MovieMetaData):
def getThumbnailName(self, name, root):
def getThumbnailName(self, name, root, i):
return os.path.join(root, 'folder.jpg')

View File

@@ -17,19 +17,43 @@ autoload = 'XBMC'
class XBMC(MovieMetaData):
def getFanartName(self, name, root):
def getFanartName(self, name, root, i):
return self.createMetaName(self.conf('meta_fanart_name'), name, root)
def getThumbnailName(self, name, root):
def getThumbnailName(self, name, root, i):
return self.createMetaName(self.conf('meta_thumbnail_name'), name, root)
def getNfoName(self, name, root):
def getNfoName(self, name, root, i):
return self.createMetaName(self.conf('meta_nfo_name'), name, root)
def getBannerName(self, name, root, i):
return self.createMetaName(self.conf('meta_banner_name'), name, root)
def getClearArtName(self, name, root, i):
return self.createMetaName(self.conf('meta_clear_art_name'), name, root)
def getLogoName(self, name, root, i):
return self.createMetaName(self.conf('meta_logo_name'), name, root)
def getDiscArtName(self, name, root, i):
return self.createMetaName(self.conf('meta_disc_art_name'), name, root)
def getLandscapeName(self, name, root, i):
return self.createMetaName(self.conf('meta_landscape_name'), name, root)
def getExtraThumbsName(self, name, root, i):
return self.createMetaNameMult(self.conf('meta_extra_thumbs_name'), name, root, i)
def getExtraFanartName(self, name, root, i):
return self.createMetaNameMult(self.conf('meta_extra_fanart_name'), name, root, i)
def createMetaName(self, basename, name, root):
return os.path.join(root, basename.replace('%s', name))
def getNfo(self, movie_info = None, data = None):
def createMetaNameMult(self, basename, name, root, i):
return os.path.join(root, basename.replace('%s', name).replace('<i>', str(i + 1)))
def getNfo(self, movie_info=None, data=None, i=0):
if not data: data = {}
if not movie_info: movie_info = {}
@@ -129,10 +153,25 @@ class XBMC(MovieMetaData):
for image_url in movie_info['images']['poster_original']:
image = SubElement(nfoxml, 'thumb')
image.text = toUnicode(image_url)
fanart = SubElement(nfoxml, 'fanart')
for image_url in movie_info['images']['backdrop_original']:
image = SubElement(fanart, 'thumb')
image.text = toUnicode(image_url)
image_types = [
('fanart', 'backdrop_original'),
('banner', 'banner'),
('discart', 'disc_art'),
('logo', 'logo'),
('clearart', 'clear_art'),
('landscape', 'landscape'),
('extrathumb', 'extra_thumbs'),
('extrafanart', 'extra_fanart'),
]
for image_type in image_types:
sub, type = image_type
sub_element = SubElement(nfoxml, sub)
for image_url in movie_info['images'][type]:
image = SubElement(sub_element, 'thumb')
image.text = toUnicode(image_url)
# Add trailer if found
trailer_found = False
@@ -239,6 +278,92 @@ config = [{
'default': '%s.tbn',
'advanced': True,
},
{
'name': 'meta_banner',
'label': 'Banner',
'default': False,
'type': 'bool'
},
{
'name': 'meta_banner_name',
'label': 'Banner filename',
'default': 'banner.jpg',
'advanced': True,
},
{
'name': 'meta_clear_art',
'label': 'ClearArt',
'default': False,
'type': 'bool'
},
{
'name': 'meta_clear_art_name',
'label': 'ClearArt filename',
'default': 'clearart.png',
'advanced': True,
},
{
'name': 'meta_disc_art',
'label': 'DiscArt',
'default': False,
'type': 'bool'
},
{
'name': 'meta_disc_art_name',
'label': 'DiscArt filename',
'default': 'disc.png',
'advanced': True,
},
{
'name': 'meta_landscape',
'label': 'Landscape',
'default': False,
'type': 'bool'
},
{
'name': 'meta_landscape_name',
'label': 'Landscape filename',
'default': 'landscape.jpg',
'advanced': True,
},
{
'name': 'meta_logo',
'label': 'ClearLogo',
'default': False,
'type': 'bool'
},
{
'name': 'meta_logo_name',
'label': 'ClearLogo filename',
'default': 'logo.png',
'advanced': True,
},
{
'name': 'meta_extra_thumbs',
'label': 'Extrathumbs',
'default': False,
'type': 'bool'
},
{
'name': 'meta_extra_thumbs_name',
'label': 'Extrathumbs filename',
'description': '&lt;i&gt; is the image number, and must be included to have multiple images',
'default': 'extrathumbs/thumb<i>.jpg',
'advanced': True
},
{
'name': 'meta_extra_fanart',
'label': 'Extrafanart',
'default': False,
'type': 'bool'
},
{
'name': 'meta_extra_fanart_name',
'label': 'Extrafanart filename',
'default': 'extrafanart/extrafanart<i>.jpg',
'description': '&lt;i&gt; is the image number, and must be included to have multiple images',
'advanced': True
}
],
},
],

View File

@@ -1,14 +0,0 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.torrent.publichd import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'PublicHD'
class PublicHD(MovieProvider, Base):
def buildUrl(self, media):
return fireEvent('library.query', media, single = True).replace(':', '')

View File

@@ -1,5 +1,4 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.event import fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.sceneaccess import Base
from couchpotato.core.media.movie.providers.base import MovieProvider

View File

@@ -1,4 +1,3 @@
from couchpotato import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.torrentleech import Base
@@ -21,8 +20,8 @@ class TorrentLeech(MovieProvider, Base):
([12], ['dvdr']),
]
def buildUrl(self, media, quality):
def buildUrl(self, title, media, quality):
return (
tryUrlencode(fireEvent('library.query', media, single = True)),
tryUrlencode(title.replace(':', '')),
self.getCatId(quality)[0]
)

View File

@@ -11,3 +11,6 @@ class TrailerProvider(Provider):
def __init__(self):
addEvent('trailer.search', self.search)
def search(self, *args, **kwargs):
pass

View File

@@ -1,5 +1,4 @@
from string import digits, ascii_letters
from urllib2 import HTTPError
import re
from bs4 import SoupStrainer, BeautifulSoup
@@ -7,6 +6,7 @@ from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import mergeDicts, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider
from requests import HTTPError
log = CPLog(__name__)
@@ -21,6 +21,7 @@ class HDTrailers(TrailerProvider):
'backup': 'http://www.hd-trailers.net/blog/',
}
providers = ['apple.ico', 'yahoo.ico', 'moviefone.ico', 'myspace.ico', 'favicon.ico']
only_tables_tags = SoupStrainer('table')
def search(self, group):
@@ -67,8 +68,7 @@ class HDTrailers(TrailerProvider):
return results
try:
tables = SoupStrainer('div')
html = BeautifulSoup(data, parse_only = tables)
html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
result_table = html.find_all('h2', text = re.compile(movie_name))
for h2 in result_table:
@@ -90,8 +90,7 @@ class HDTrailers(TrailerProvider):
results = {'480p':[], '720p':[], '1080p':[]}
try:
tables = SoupStrainer('table')
html = BeautifulSoup(data, parse_only = tables)
html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
result_table = html.find('table', attrs = {'class':'bottomTable'})
for tr in result_table.find_all('tr'):

View File

@@ -58,13 +58,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
def searchAllView(self, **kwargs):
fireEventAsync('movie.searcher.all')
fireEventAsync('movie.searcher.all', manual = True)
return {
'success': not self.in_progress
}
def searchAll(self):
def searchAll(self, manual = False):
if self.in_progress:
log.info('Search already in progress')
@@ -91,7 +91,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
media = fireEvent('media.get', media_id, single = True)
try:
self.single(media, search_protocols)
self.single(media, search_protocols, manual = manual)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc()))
fireEvent('movie.update_info', media_id)
@@ -109,7 +109,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
self.in_progress = False
def single(self, movie, search_protocols = None, manual = False):
def single(self, movie, search_protocols = None, manual = False, force_download = False):
# Find out search type
try:
@@ -120,30 +120,46 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
if not movie['profile_id'] or (movie['status'] == 'done' and not manual):
log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.')
fireEvent('media.restatus', movie['_id'], single = True)
return
default_title = getTitle(movie)
if not default_title:
log.error('No proper info found for movie, removing it from library to stop it from causing more issues.')
fireEvent('media.delete', movie['_id'], single = True)
return
# Update media status and check if it is still not done (due to the stop searching after feature
if fireEvent('media.restatus', movie['_id'], single = True) == 'done':
log.debug('No better quality found, marking movie %s as done.', default_title)
pre_releases = fireEvent('quality.pre_releases', single = True)
release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True)
found_releases = []
previous_releases = movie.get('releases', [])
too_early_to_search = []
default_title = getTitle(movie)
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
fireEvent('media.delete', movie['_id'], single = True)
return
outside_eta_results = 0
alway_search = self.conf('always_search')
ignore_eta = manual
total_result_count = 0
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title)
# Ignore eta once every 7 days
if not alway_search:
prop_name = 'last_ignored_eta.%s' % movie['_id']
last_ignored_eta = float(Env.prop(prop_name, default = 0))
if last_ignored_eta > time.time() - 604800:
ignore_eta = True
Env.prop(prop_name, value = time.time())
db = get_db()
profile = db.get('id', movie['profile_id'])
ret = False
index = 0
for q_identifier in profile.get('qualities'):
for index, q_identifier in enumerate(profile.get('qualities', [])):
quality_custom = {
'index': index,
'quality': q_identifier,
@@ -152,11 +168,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
'3d': profile['3d'][index] if profile.get('3d') else False
}
index += 1
if not self.conf('always_search') and not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']):
could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year'])
if not alway_search and could_not_be_released:
too_early_to_search.append(q_identifier)
continue
# Skip release, if ETA isn't ignored
if not ignore_eta:
continue
has_better_quality = 0
@@ -173,19 +191,24 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
# Don't search for quality lower then already available.
if has_better_quality > 0:
log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title))
fireEvent('media.restatus', movie['_id'])
fireEvent('media.restatus', movie['_id'], single = True)
break
quality = fireEvent('quality.single', identifier = q_identifier, single = True)
log.info('Search for %s in %s', (default_title, quality['label']))
log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if alway_search or ignore_eta else ''))
# Extend quality with profile customs
quality['custom'] = quality_custom
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
if len(results) == 0:
results_count = len(results)
total_result_count += results_count
if results_count == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
# Keep track of releases found outside ETA window
outside_eta_results += results_count if could_not_be_released else 0
# Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True):
break
@@ -193,8 +216,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
# Don't trigger download, but notify user of available releases
if could_not_be_released:
if results_count > 0:
log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title))
# Try find a valid result and download it
if fireEvent('release.try_download_result', results, movie, quality_custom, manual, single = True):
if (force_download or not could_not_be_released or alway_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True):
ret = True
# Remove releases that aren't found anymore
@@ -211,9 +239,19 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
if self.shuttingDown() or ret:
break
if total_result_count > 0:
fireEvent('media.tag', movie['_id'], 'recent', single = True)
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
if outside_eta_results > 0:
message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % (outside_eta_results, default_title)
log.info(message)
if not manual:
fireEvent('media.available', message = message, data = {})
fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']})
return ret
@@ -334,13 +372,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
def tryNextReleaseView(self, media_id = None, **kwargs):
trynext = self.tryNextRelease(media_id, manual = True)
trynext = self.tryNextRelease(media_id, manual = True, force_download = True)
return {
'success': trynext
}
def tryNextRelease(self, media_id, manual = False):
def tryNextRelease(self, media_id, manual = False, force_download = False):
try:
db = get_db()
@@ -352,7 +390,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
movie_dict = fireEvent('media.get', media_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict))
self.single(movie_dict, manual = manual)
self.single(movie_dict, manual = manual, force_download = force_download)
return True

View File

@@ -1,4 +1,3 @@
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import splitString, removeDuplicate, getIdentifier
@@ -84,7 +83,6 @@ class Suggestion(Plugin):
# Get new results and add them
if len(new_suggestions) - 1 < limit:
db = get_db()
active_movies = fireEvent('media.with_status', ['active', 'done'], single = True)
movies = [getIdentifier(x) for x in active_movies]
movies.extend(seen)

View File

@@ -15,6 +15,7 @@ class Notification(Provider):
test_message = 'ZOMG Lazors Pewpewpew!'
listen_to = [
'media.available',
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message.important',

View File

@@ -1,69 +0,0 @@
import time
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Boxcar'
class Boxcar(Notification):
url = 'https://boxcar.io/devices/providers/7MNNXY3UIzVBwvzkKwkC/notifications'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
message = message.strip()
data = {
'email': self.conf('email'),
'notification[from_screen_name]': self.default_title,
'notification[message]': toUnicode(message),
'notification[from_remote_service_id]': int(time.time()),
}
self.urlopen(self.url, data = data)
except:
log.error('Check your email and added services on boxcar.io')
return False
log.info('Boxcar notification successful.')
return True
def isEnabled(self):
return super(Boxcar, self).isEnabled() and self.conf('email')
config = [{
'name': 'boxcar',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'boxcar',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'email',
'description': 'Your Boxcar registration emailaddress.'
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

View File

@@ -28,6 +28,7 @@ class CoreNotifier(Notification):
m_lock = None
listen_to = [
'media.available',
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message', 'core.message.important',
@@ -258,14 +259,14 @@ class CoreNotifier(Notification):
messages = []
# Get unread
# Get last message
if init:
db = get_db()
notifications = db.all('notification_unread', with_doc = True)
notifications = db.all('notification', with_doc = True)
for n in notifications:
if n['doc'].get('time') > (time.time() - 259200):
if n['doc'].get('time') > (time.time() - 604800):
messages.append(n['doc'])
return {

View File

@@ -122,9 +122,12 @@ var NotificationBase = new Class({
startPoll: function(){
var self = this;
if(self.stopped || (self.request && self.request.isRunning()))
if(self.stopped)
return;
if(self.request && self.request.isRunning())
self.request.cancel();
self.request = Api.request('nonblock/notification.listener', {
'onSuccess': function(json){
self.processData(json, false)
@@ -149,7 +152,7 @@ var NotificationBase = new Class({
var self = this;
// Process data
if(json){
if(json && json.result){
Array.each(json.result, function(result){
App.trigger(result._t || result.type, [result]);
if(result.message && result.read === undefined && !init)

View File

@@ -22,10 +22,11 @@ class NMJ(Notification):
# noinspection PyMissingConstructor
def __init__(self):
addEvent('renamer.after', self.addToLibrary)
addApiView(self.testNotifyName(), self.test)
addApiView('notify.nmj.auto_config', self.autoConfig)
addEvent('renamer.after', self.addToLibrary)
def autoConfig(self, host = 'localhost', **kwargs):
mount = ''

View File

@@ -14,7 +14,7 @@ autoload = 'Pushbullet'
class Pushbullet(Notification):
url = 'https://api.pushbullet.com/api/%s'
url = 'https://api.pushbullet.com/v2/%s'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
@@ -25,11 +25,7 @@ class Pushbullet(Notification):
# Get all the device IDs linked to this user
if not len(devices):
response = self.request('devices')
if not response:
return False
devices += [device.get('id') for device in response['devices']]
devices = [None]
successful = 0
for device in devices:
@@ -88,7 +84,8 @@ config = [{
},
{
'name': 'api_key',
'label': 'User API Key'
'label': 'Access Token',
'description': 'Can be found on <a href="https://www.pushbullet.com/account" target="_blank">Account Settings</a>',
},
{
'name': 'devices',

View File

@@ -1,7 +1,7 @@
from httplib import HTTPSConnection
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.helpers.variable import getTitle, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
@@ -13,7 +13,6 @@ autoload = 'Pushover'
class Pushover(Notification):
app_token = 'YkxHMYDZp285L265L3IwH3LmzkTaCy'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
@@ -22,15 +21,15 @@ class Pushover(Notification):
api_data = {
'user': self.conf('user_key'),
'token': self.app_token,
'token': self.conf('api_token'),
'message': toUnicode(message),
'priority': self.conf('priority'),
'sound': self.conf('sound'),
}
if data and data.get('identifier'):
if data and getIdentifier(data):
api_data.update({
'url': toUnicode('http://www.imdb.com/title/%s/' % data['identifier']),
'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)),
'url_title': toUnicode('%s on IMDb' % getTitle(data)),
})
@@ -49,7 +48,7 @@ class Pushover(Notification):
log.error('Pushover auth failed: %s', response.reason)
return False
else:
log.error('Pushover notification failed.')
log.error('Pushover notification failed: %s', request_status)
return False
@@ -70,6 +69,12 @@ config = [{
'name': 'user_key',
'description': 'Register on pushover.net to get one.'
},
{
'name': 'api_token',
'description': '<a href="https://pushover.net/apps/clone/couchpotato" target="_blank">Register on pushover.net</a> to get one.',
'advanced': True,
'default': 'YkxHMYDZp285L265L3IwH3LmzkTaCy',
},
{
'name': 'priority',
'default': 0,

View File

@@ -1,6 +1,7 @@
import os
import subprocess
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
@@ -16,7 +17,8 @@ class Synoindex(Notification):
index_path = '/usr/syno/bin/synoindex'
def __init__(self):
super(Synoindex, self).__init__()
addApiView(self.testNotifyName(), self.test)
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = None):

View File

@@ -8,7 +8,7 @@ from couchpotato.core.helpers.variable import splitString, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import requests
from requests.packages.urllib3.exceptions import MaxRetryError
from requests.packages.urllib3.exceptions import MaxRetryError, ConnectionError
log = CPLog(__name__)
@@ -36,7 +36,7 @@ class XBMC(Notification):
if self.use_json_notifications.get(host):
calls = [
('GUI.ShowNotification', {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}),
('GUI.ShowNotification', None, {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}),
]
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
@@ -44,7 +44,7 @@ class XBMC(Notification):
if not self.conf('force_full_scan') and (self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0])):
param = {'directory': data['destination_dir']}
calls.append(('VideoLibrary.Scan', param))
calls.append(('VideoLibrary.Scan', None, param))
max_successful += len(calls)
response = self.request(host, calls)
@@ -52,7 +52,7 @@ class XBMC(Notification):
response = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message})
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
response += self.request(host, [('VideoLibrary.Scan', {})])
response += self.request(host, [('VideoLibrary.Scan', None, {})])
max_successful += 1
max_successful += 1
@@ -75,7 +75,7 @@ class XBMC(Notification):
# XBMC JSON-RPC version request
response = self.request(host, [
('JSONRPC.Version', {})
('JSONRPC.Version', None, {})
])
for result in response:
if result.get('result') and type(result['result']['version']).__name__ == 'int':
@@ -112,7 +112,7 @@ class XBMC(Notification):
self.use_json_notifications[host] = True
# send the text message
resp = self.request(host, [('GUI.ShowNotification', {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})])
resp = self.request(host, [('GUI.ShowNotification', None, {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})])
for r in resp:
if r.get('result') and r['result'] == 'OK':
log.debug('Message delivered successfully!')
@@ -172,7 +172,7 @@ class XBMC(Notification):
# manually fake expected response array
return [{'result': 'Error'}]
except (MaxRetryError, requests.exceptions.Timeout):
except (MaxRetryError, requests.exceptions.Timeout, ConnectionError):
log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [{'result': 'Error'}]
except:
@@ -184,12 +184,13 @@ class XBMC(Notification):
data = []
for req in do_requests:
method, kwargs = req
method, id, kwargs = req
data.append({
'method': method,
'params': kwargs,
'jsonrpc': '2.0',
'id': method,
'id': id if id else method,
})
data = json.dumps(data)
@@ -223,7 +224,7 @@ config = [{
'list': 'notification_providers',
'name': 'xbmc',
'label': 'XBMC',
'description': 'v11 (Eden) and v12 (Frodo)',
'description': 'v11 (Eden), v12 (Frodo), v13 (Gotham)',
'options': [
{
'name': 'enabled',
@@ -256,7 +257,7 @@ config = [{
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Only scan new movie folder at remote XBMC servers. Works if movie location is the same.',
'description': ('Only scan new movie folder at remote XBMC servers.', 'Useful if the XBMC path is different from the path CPS uses.'),
},
{
'name': 'force_full_scan',
@@ -264,11 +265,11 @@ config = [{
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Do a full scan instead of only the new movie. Useful if the XBMC path is different from the path CPS uses.',
'description': ('Do a full scan instead of only the new movie.', 'Useful if the XBMC path is different from the path CPS uses.'),
},
{
'name': 'on_snatch',
'default': 0,
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',

View File

@@ -1,3 +1,4 @@
from urllib import quote
from urlparse import urlparse
import glob
import inspect
@@ -5,7 +6,6 @@ import os.path
import re
import time
import traceback
import urllib2
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \
@@ -16,7 +16,6 @@ from couchpotato.environment import Env
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from scandir import scandir
from tornado import template
from tornado.web import StaticFileHandler
@@ -41,7 +40,6 @@ class Plugin(object):
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
http_opener = requests.Session()
def __new__(cls, *args, **kwargs):
new_plugin = super(Plugin, cls).__new__(cls)
@@ -141,19 +139,25 @@ class Plugin(object):
return False
def deleteEmptyFolder(self, folder, show_error = True):
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None):
folder = sp(folder)
for root, dirs, files in scandir.walk(folder):
for item in os.listdir(folder):
full_folder = os.path.join(folder, item)
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for root, dirs, files in os.walk(full_folder):
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
try:
os.rmdir(folder)
@@ -163,7 +167,7 @@ class Plugin(object):
# http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True):
url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not data: data = {}
@@ -179,7 +183,7 @@ class Plugin(object):
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = self.http_opener
r = Env.get('http_opener')
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
@@ -201,11 +205,12 @@ class Plugin(object):
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
}
method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, verify = False, **kwargs)
response = r.request(method, url, **kwargs)
if response.status_code == requests.codes.ok:
data = response.content
@@ -258,7 +263,7 @@ class Plugin(object):
def afterCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__), False)
def doShutdown(self):
def doShutdown(self, *args, **kwargs):
self.shuttingDown(True)
return True

View File

@@ -3,6 +3,7 @@ import os
import string
from couchpotato.api import addApiView
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import getUserDir
from couchpotato.core.plugins.base import Plugin
import six
@@ -50,6 +51,7 @@ class FileBrowser(Plugin):
path = '/'
dirs = []
path = sp(path)
for f in os.listdir(path):
p = os.path.join(path, f)
if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)):

View File

@@ -5,11 +5,10 @@ from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import md5, getExt
from couchpotato.core.helpers.variable import md5, getExt, isSubFolder
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from scandir import scandir
from tornado.web import StaticFileHandler
@@ -33,6 +32,8 @@ class FileManager(Plugin):
fireEvent('schedule.interval', 'file.cleanup', self.cleanup, hours = 24)
addEvent('app.test', self.doSubfolderTest)
def cleanup(self):
# Wait a bit after starting before cleanup
@@ -49,9 +50,9 @@ class FileManager(Plugin):
for x in file_dict.keys():
files.extend(file_dict[x])
for f in scandir.scandir(cache_dir):
if os.path.splitext(f.name)[1] in ['.png', '.jpg', '.jpeg']:
file_path = os.path.join(cache_dir, f.name)
for f in os.listdir(cache_dir):
if os.path.splitext(f)[1] in ['.png', '.jpg', '.jpeg']:
file_path = os.path.join(cache_dir, f)
if toUnicode(file_path) not in files:
os.remove(file_path)
except:
@@ -77,3 +78,33 @@ class FileManager(Plugin):
self.createFile(dest, filedata, binary = True)
return dest
def doSubfolderTest(self):
tests = {
('/test/subfolder', '/test/sub'): False,
('/test/sub/folder', '/test/sub'): True,
('/test/sub/folder', '/test/sub2'): False,
('/sub/fold', '/test/sub/fold'): False,
('/sub/fold', '/test/sub/folder'): False,
('/opt/couchpotato', '/var/opt/couchpotato'): False,
('/var/opt', '/var/opt/couchpotato'): False,
('/CapItaLs/Are/OK', '/CapItaLs/Are/OK'): True,
('/CapItaLs/Are/OK', '/CapItaLs/Are/OK2'): False,
('/capitals/are/not/OK', '/capitals/are/NOT'): False,
('\\\\Mounted\\Volume\\Test', '\\\\Mounted\\Volume'): True,
('C:\\\\test\\path', 'C:\\\\test2'): False
}
failed = 0
for x in tests:
if isSubFolder(x[0], x[1]) is not tests[x]:
log.error('Failed subfolder test %s %s', x)
failed += 1
if failed > 0:
log.error('Subfolder test failed %s tests', failed)
else:
log.info('Subfolder test succeeded')
return failed == 0

View File

@@ -134,8 +134,8 @@ class Logging(Plugin):
logs_raw = toUnicode(log_content).split('[0m\n')
logs = []
for log in logs_raw:
split = splitString(log, '\x1b')
for log_line in logs_raw:
split = splitString(log_line, '\x1b')
if split:
try:
date, time, log_type = splitString(split[0], ' ')

View File

@@ -23,7 +23,7 @@
cursor: pointer;
}
.page.log .nav li:hover:not(.active, .filter) {
.page.log .nav li:hover:not(.active):not(.filter) {
background: rgba(255, 255, 255, 0.1);
}

View File

@@ -17,7 +17,7 @@ Page.Log = new Class({
Movie(s) I have this with: ...\n\
Quality of the movie being searched: ...\n\
Providers I use: ...\n\
Version of CouchPotato: ...\n\
Version of CouchPotato: {version}\n\
Running on: ...\n\
\n\
### Logs:\n\
@@ -207,7 +207,11 @@ Running on: ...\n\
showReport: function(text){
var self = this,
body = self.report_text.replace('{issue}', text);
version = Updater.getInfo(),
body = self.report_text
.replace('{issue}', text)
.replace('{version}', version ? version.version.repr : '...'),
textarea;
var overlay = new Element('div.report', {
'method': 'post',
@@ -240,7 +244,7 @@ Running on: ...\n\
'text': ' before posting, then copy the text below'
})
),
new Element('textarea', {
textarea = new Element('textarea', {
'text': body,
'events': {
'click': function(){
@@ -251,7 +255,17 @@ Running on: ...\n\
new Element('a.button', {
'target': '_blank',
'text': 'Create a new issue on GitHub with the text above',
'href': 'https://github.com/RuudBurger/CouchPotatoServer/issues/new?body=' + (body.length < 2000 ? encodeURIComponent(body) : 'Paste the text here')
'href': 'https://github.com/RuudBurger/CouchPotatoServer/issues/new',
'events': {
'click': function(e){
(e).stop();
var body = textarea.get('value'),
bdy = '?body=' + (body.length < 2000 ? encodeURIComponent(body) : 'Paste the text here'),
win = window.open(e.target.get('href') + bdy, '_blank');
win.focus();
}
}
})
)
);

View File

@@ -1,6 +1,4 @@
import ctypes
import os
import sys
import time
import traceback
@@ -8,7 +6,7 @@ from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent, fireEventAsync
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import splitString, getTitle, tryInt, getIdentifier
from couchpotato.core.helpers.variable import splitString, getTitle, tryInt, getIdentifier, getFreeSpace
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
@@ -33,7 +31,7 @@ class Manage(Plugin):
# Add files after renaming
def after_rename(message = None, group = None):
if not group: group = {}
return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'])
return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'], release_download = group['release_download'])
addEvent('renamer.after', after_rename, priority = 110)
addApiView('manage.update', self.updateLibraryView, docs = {
@@ -53,6 +51,20 @@ class Manage(Plugin):
if not Env.get('dev') and self.conf('startup_scan'):
addEvent('app.load', self.updateLibraryQuick)
addEvent('app.load', self.setCrons)
# Enable / disable interval
addEvent('setting.save.manage.library_refresh_interval.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.remove', 'manage.update_library')
refresh = tryInt(self.conf('library_refresh_interval'))
if refresh > 0:
fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True)
return True
def getProgress(self, **kwargs):
return {
'progress': self.in_progress
@@ -71,7 +83,8 @@ class Manage(Plugin):
return self.updateLibrary(full = False)
def updateLibrary(self, full = True):
last_update = float(Env.prop('manage.last_update', default = 0))
last_update_key = 'manage.last_update%s' % ('_full' if full else '')
last_update = float(Env.prop(last_update_key, default = 0))
if self.in_progress:
log.info('Already updating library: %s', self.in_progress)
@@ -122,6 +135,7 @@ class Manage(Plugin):
# Get movies with done status
total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True)
deleted_releases = []
for done_movie in done_movies:
if getIdentifier(done_movie) not in added_identifiers:
fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all')
@@ -151,18 +165,24 @@ class Manage(Plugin):
already_used = used_files.get(release_file)
if already_used:
# delete current one
if already_used.get('last_edit', 0) < release.get('last_edit', 0):
fireEvent('release.delete', release['_id'], single = True)
# delete previous one
else:
fireEvent('release.delete', already_used['_id'], single = True)
release_id = release['_id'] if already_used.get('last_edit', 0) < release.get('last_edit', 0) else already_used['_id']
if release_id not in deleted_releases:
fireEvent('release.delete', release_id, single = True)
deleted_releases.append(release_id)
break
else:
used_files[release_file] = release
del used_files
Env.prop('manage.last_update', time.time())
# Break if CP wants to shut down
if self.shuttingDown():
break
if not self.shuttingDown():
db = get_db()
db.reindex()
Env.prop(last_update_key, time.time())
except:
log.error('Failed updating library: %s', (traceback.format_exc()))
@@ -192,14 +212,14 @@ class Manage(Plugin):
'to_go': total_found,
})
self.updateProgress(folder, to_go)
if group['media'] and group['identifier']:
added_identifiers.append(group['identifier'])
# Add it to release and update the info
fireEvent('release.add', group = group, update_info = False)
fireEvent('movie.update_info', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier']))
else:
self.updateProgress(folder)
return addToLibrary
@@ -210,7 +230,6 @@ class Manage(Plugin):
if not self.in_progress or self.shuttingDown():
return
self.updateProgress(folder)
total = self.in_progress[folder]['total']
movie_dict = fireEvent('media.get', identifier, single = True)
@@ -218,10 +237,11 @@ class Manage(Plugin):
return afterUpdate
def updateProgress(self, folder):
def updateProgress(self, folder, to_go):
pr = self.in_progress[folder]
pr['to_go'] -= 1
if to_go < pr['to_go']:
pr['to_go'] = to_go
avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go'])
pr['eta'] = tryInt(avg * pr['to_go'])
@@ -236,7 +256,7 @@ class Manage(Plugin):
return []
def scanFilesToLibrary(self, folder = None, files = None):
def scanFilesToLibrary(self, folder = None, files = None, release_download = None):
folder = os.path.normpath(folder)
@@ -245,34 +265,13 @@ class Manage(Plugin):
if groups:
for group in groups.values():
if group.get('media'):
fireEvent('release.add', group = group)
if release_download and release_download.get('release_id'):
fireEvent('release.add', group = group, update_id = release_download.get('release_id'))
else:
fireEvent('release.add', group = group)
def getDiskSpace(self):
free_space = {}
for folder in self.directories():
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return [total.value, used, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
free_space[folder] = size
return free_space
return getFreeSpace(self.directories())
config = [{
@@ -308,6 +307,14 @@ config = [{
'advanced': True,
'description': 'Do a quick scan on startup. On slow systems better disable this.',
},
{
'label': 'Full library refresh',
'name': 'library_refresh_interval',
'type': 'int',
'default': 0,
'advanced': True,
'description': 'Do a full scan every X hours. (0 is disabled)',
},
],
},
],

View File

@@ -38,9 +38,18 @@ class ProfilePlugin(Plugin):
def forceDefaults(self):
db = get_db()
# Fill qualities and profiles if they are empty somehow..
if db.count(db.all, 'profile') == 0:
if db.count(db.all, 'quality') == 0:
fireEvent('quality.fill', single = True)
self.fill()
# Get all active movies without profile
try:
db = get_db()
medias = fireEvent('media.with_status', 'active', single = True)
profile_ids = [x.get('_id') for x in self.all()]
@@ -79,6 +88,7 @@ class ProfilePlugin(Plugin):
'core': kwargs.get('core', False),
'qualities': [],
'wait_for': [],
'stop_after': [],
'finish': [],
'3d': []
}
@@ -88,6 +98,7 @@ class ProfilePlugin(Plugin):
for type in kwargs.get('types', []):
profile['qualities'].append(type.get('quality'))
profile['wait_for'].append(tryInt(kwargs.get('wait_for', 0)))
profile['stop_after'].append(tryInt(kwargs.get('stop_after', 0)))
profile['finish'].append((tryInt(type.get('finish')) == 1) if order > 0 else True)
profile['3d'].append(tryInt(type.get('3d')))
order += 1
@@ -208,6 +219,7 @@ class ProfilePlugin(Plugin):
'qualities': profile.get('qualities'),
'finish': [],
'wait_for': [],
'stop_after': [],
'3d': []
}
@@ -215,6 +227,7 @@ class ProfilePlugin(Plugin):
for q in profile.get('qualities'):
pro['finish'].append(True)
pro['wait_for'].append(0)
pro['stop_after'].append(0)
pro['3d'].append(threed.pop() if threed else False)
db.insert(pro)

View File

@@ -43,9 +43,8 @@
}
.profile .wait_for {
position: absolute;
right: 60px;
top: 0;
padding-top: 0;
padding-bottom: 20px;
}
.profile .wait_for input {
@@ -159,9 +158,6 @@
}
#profile_ordering li {
cursor: -webkit-grab;
cursor: -moz-grab;
cursor: grab;
border-bottom: 1px solid rgba(255,255,255,0.2);
padding: 0 5px;
}
@@ -183,6 +179,9 @@
background: url('../../images/handle.png') center;
width: 20px;
float: right;
cursor: -webkit-grab;
cursor: -moz-grab;
cursor: grab;
}
#profile_ordering .formHint {

View File

@@ -37,20 +37,28 @@ var Profile = new Class({
'placeholder': 'Profile name'
})
),
new Element('div.wait_for.ctrlHolder').adopt(
new Element('span', {'text':'Wait'}),
new Element('input.inlay.xsmall', {
'type':'text',
'value': data.wait_for && data.wait_for.length > 0 ? data.wait_for[0] : 0
}),
new Element('span', {'text':'day(s) for a better quality.'})
),
new Element('div.qualities.ctrlHolder').adopt(
new Element('label', {'text': 'Search for'}),
self.type_container = new Element('ol.types'),
new Element('div.formHint', {
'html': "Search these qualities (2 minimum), from top to bottom. Use the checkbox, to stop searching after it found this quality."
})
),
new Element('div.wait_for.ctrlHolder').adopt(
// "Wait the entered number of days for a checked quality, before downloading a lower quality release."
new Element('span', {'text':'Wait'}),
new Element('input.inlay.wait_for_input.xsmall', {
'type':'text',
'value': data.wait_for && data.wait_for.length > 0 ? data.wait_for[0] : 0
}),
new Element('span', {'text':'day(s) for a better quality '}),
new Element('span.advanced', {'text':'and keep searching'}),
// "After a checked quality is found and downloaded, continue searching for even better quality releases for the entered number of days."
new Element('input.inlay.xsmall.stop_after_input.advanced', {
'type':'text',
'value': data.stop_after && data.stop_after.length > 0 ? data.stop_after[0] : 0
}),
new Element('span.advanced', {'text':'day(s) for a better (checked) quality.'})
)
);
@@ -116,7 +124,8 @@ var Profile = new Class({
var data = {
'id' : self.data._id,
'label' : self.el.getElement('.quality_label input').get('value'),
'wait_for' : self.el.getElement('.wait_for input').get('value'),
'wait_for' : self.el.getElement('.wait_for_input').get('value'),
'stop_after' : self.el.getElement('.stop_after_input').get('value'),
'types': []
};

View File

@@ -1,11 +1,12 @@
import traceback
import re
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.variable import mergeDicts, getExt, tryInt
from couchpotato.core.helpers.variable import mergeDicts, getExt, tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.quality.index import QualityIndex
@@ -21,23 +22,23 @@ class QualityPlugin(Plugin):
}
qualities = [
{'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']},
{'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts', 'x264', 'h264']},
{'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25', ('br', 'disk')], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']},
{'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts', 'ts'], 'tags': ['m2ts', 'x264', 'h264']},
{'identifier': '720p', 'hd': True, 'allow_3d': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']},
{'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':[], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd'], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':[], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip', ('br', 'rip')], 'allow': ['720p', '1080p'], 'ext':['mp4', 'avi'], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': ['webrip', ('web', 'rip')]},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':[]}
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p'], 'ext':[]},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p'], 'ext':[]},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p'], 'ext':[]},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p'], 'ext':[]}
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
threed_tags = {
'sbs': [('half', 'sbs'), 'hsbs', ('full', 'sbs'), 'fsbs'],
'ou': [('half', 'ou'), 'hou', ('full', 'ou'), 'fou'],
'3d': ['2d3d', '3d2d'],
'3d': ['2d3d', '3d2d', '3d'],
}
cached_qualities = None
@@ -51,6 +52,7 @@ class QualityPlugin(Plugin):
addEvent('quality.order', self.getOrder)
addEvent('quality.ishigher', self.isHigher)
addEvent('quality.isfinish', self.isFinish)
addEvent('quality.fill', self.fill)
addApiView('quality.size.save', self.saveSize)
addApiView('quality.list', self.allView, docs = {
@@ -93,15 +95,14 @@ class QualityPlugin(Plugin):
db = get_db()
qualities = db.all('quality', with_doc = True)
temp = []
for quality in qualities:
quality = quality['doc']
q = mergeDicts(self.getQuality(quality.get('identifier')), quality)
for quality in self.qualities:
quality_doc = db.get('quality', quality.get('identifier'), with_doc = True)['doc']
q = mergeDicts(quality, quality_doc)
temp.append(q)
self.cached_qualities = temp
if len(temp) == len(self.qualities):
self.cached_qualities = temp
return temp
@@ -152,24 +153,31 @@ class QualityPlugin(Plugin):
order = 0
for q in self.qualities:
db.insert({
'_t': 'quality',
'order': order,
'identifier': q.get('identifier'),
'size_min': tryInt(q.get('size')[0]),
'size_max': tryInt(q.get('size')[1]),
})
existing = None
try:
existing = db.get('quality', q.get('identifier'))
except RecordNotFound:
pass
log.info('Creating profile: %s', q.get('label'))
db.insert({
'_t': 'profile',
'order': order + 20, # Make sure it goes behind other profiles
'core': True,
'qualities': [q.get('identifier')],
'label': toUnicode(q.get('label')),
'finish': [True],
'wait_for': [0],
})
if not existing:
db.insert({
'_t': 'quality',
'order': order,
'identifier': q.get('identifier'),
'size_min': tryInt(q.get('size')[0]),
'size_max': tryInt(q.get('size')[1]),
})
log.info('Creating profile: %s', q.get('label'))
db.insert({
'_t': 'profile',
'order': order + 20, # Make sure it goes behind other profiles
'core': True,
'qualities': [q.get('identifier')],
'label': toUnicode(q.get('label')),
'finish': [True],
'wait_for': [0],
})
order += 1
@@ -200,22 +208,34 @@ class QualityPlugin(Plugin):
for cur_file in files:
words = re.split('\W+', cur_file.lower())
name_year = fireEvent('scanner.name_year', cur_file, file_name = cur_file, single = True)
threed_words = words
if name_year and name_year.get('name'):
split_name = splitString(name_year.get('name'), ' ')
threed_words = [x for x in words if x not in split_name]
for quality in qualities:
contains_score = self.containsTagScore(quality, words, cur_file)
threedscore = self.contains3D(quality, words, cur_file) if quality.get('allow_3d') else (0, None)
threedscore = self.contains3D(quality, threed_words, cur_file) if quality.get('allow_3d') else (0, None)
self.calcScore(score, quality, contains_score, threedscore)
# Evaluate score based on size
size_scores = []
for quality in qualities:
size_score = self.guessSizeScore(quality, size = size)
self.calcScore(score, quality, size_score, penalty = False)
# Try again with loose testing
for quality in qualities:
# Evaluate score based on size
size_score = self.guessSizeScore(quality, size = size)
loose_score = self.guessLooseScore(quality, extra = extra)
self.calcScore(score, quality, loose_score, penalty = False)
if size_score > 0:
size_scores.append(quality)
self.calcScore(score, quality, size_score + loose_score, penalty = False)
# Add additional size score if only 1 size validated
if len(size_scores) == 1:
self.calcScore(score, size_scores[0], 10, penalty = False)
del size_scores
# Return nothing if all scores are <= 0
has_non_zero = 0
@@ -241,6 +261,9 @@ class QualityPlugin(Plugin):
cur_file = ss(cur_file)
score = 0
extension = words[-1]
words = words[:-1]
points = {
'identifier': 10,
'label': 10,
@@ -260,7 +283,7 @@ class QualityPlugin(Plugin):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
if isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower():
if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words:
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type) / 2
@@ -270,8 +293,8 @@ class QualityPlugin(Plugin):
# Check extention
for ext in quality.get('ext', []):
if ext == words[-1]:
log.debug('Found %s extension in %s', (ext, cur_file))
if ext == extension:
log.debug('Found %s with .%s extension in %s', (quality['identifier'], ext, cur_file))
score += points['ext']
return score
@@ -283,14 +306,14 @@ class QualityPlugin(Plugin):
tags = self.threed_tags.get(key, [])
for tag in tags:
if (isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words)) or (isinstance(tag, (str, unicode)) and ss(tag.lower()) in cur_file.lower()):
if isinstance(tag, tuple):
if len(set(words) & set(tag)) == len(tag):
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
elif tag in words:
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
if list(set([key]) & set(words)):
log.debug('Found %s in %s', (key, cur_file))
return 1, key
return 0, None
def guessLooseScore(self, quality, extra = None):
@@ -325,6 +348,8 @@ class QualityPlugin(Plugin):
if tryInt(quality['size_min']) <= tryInt(size) <= tryInt(quality['size_max']):
log.debug('Found %s via release size: %s MB < %s MB < %s MB', (quality['identifier'], quality['size_min'], size, quality['size_max']))
score += 5
else:
score -= 5
return score
@@ -351,28 +376,34 @@ class QualityPlugin(Plugin):
# Give panelty for all lower qualities
for q in self.qualities[self.order.index(quality.get('identifier'))+1:]:
score[q.get('identifier')]['score'] -= 1
if score.get(q.get('identifier')):
score[q.get('identifier')]['score'] -= 1
def isFinish(self, quality, profile):
def isFinish(self, quality, profile, release_age = 0):
if not isinstance(profile, dict) or not profile.get('qualities'):
return False
# No profile so anything (scanned) is good enough
return True
try:
quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0]
return profile['finish'][quality_order]
index = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else False) == bool(quality.get('is_3d', False))][0]
if index == 0 or (profile['finish'][index] and int(release_age) >= int(profile.get('stop_after', [0])[0])):
return True
return False
except:
return False
def isHigher(self, quality, compare_with, profile = None):
if not isinstance(profile, dict) or not profile.get('qualities'):
profile = {'qualities': self.order}
profile = fireEvent('profile.default', single = True)
# Try to find quality in profile, if not found: a quality we do not want is lower than anything else
try:
quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0]
except:
log.debug('Quality %s not found in profile identifiers %s', (quality['identifier'] + (' 3D' if quality.get('is_3d', 0) else ''), \
[identifier + ('3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
[identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
return 'lower'
# Try to find compare quality in profile, if not found: anything is higher than a not wanted quality
@@ -412,6 +443,20 @@ class QualityPlugin(Plugin):
'Movie Name (2013) 2D + 3D': {'size': 49000, 'quality': 'bd50', 'is_3d': True},
'Movie Monuments 2013 BrRip 1080p': {'size': 1800, 'quality': 'brrip'},
'Movie Monuments 2013 BrRip 720p': {'size': 1300, 'quality': 'brrip'},
'The.Movie.2014.3D.1080p.BluRay.AVC.DTS-HD.MA.5.1-GroupName': {'size': 30000, 'quality': 'bd50', 'is_3d': True},
'/home/namehou/Movie Monuments (2013)/Movie Monuments.mkv': {'size': 4500, 'quality': '1080p', 'is_3d': False},
'/home/namehou/Movie Monuments (2013)/Movie Monuments Full-OU.mkv': {'size': 4500, 'quality': '1080p', 'is_3d': True},
'/volume1/Public/3D/Moviename/Moviename (2009).3D.SBS.ts': {'size': 7500, 'quality': '1080p', 'is_3d': True},
'/volume1/Public/Moviename/Moviename (2009).ts': {'size': 5500, 'quality': '1080p'},
'/movies/BluRay HDDVD H.264 MKV 720p EngSub/QuiQui le fou (criterion collection #123, 1915)/QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'},
'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'},
'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) half-sbs 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True},
'Moviename 2014 720p HDCAM XviD DualAudio': {'size': 4000, 'quality': 'cam'},
'Moviename (2014) - 720p CAM x264': {'size': 2250, 'quality': 'cam'},
'Movie Name (2014).mp4': {'size': 750, 'quality': 'brrip'},
'Moviename.2014.720p.R6.WEB-DL.x264.AC3-xyz': {'size': 750, 'quality': 'r5'},
'Movie name 2014 New Source 720p HDCAM x264 AC3 xyz': {'size': 750, 'quality': 'cam'},
'Movie.Name.2014.720p.HD.TS.AC3.x264': {'size': 750, 'quality': 'ts'}
}
correct = 0
@@ -419,7 +464,10 @@ class QualityPlugin(Plugin):
test_quality = self.guess(files = [name], extra = tests[name].get('extra', None), size = tests[name].get('size', None)) or {}
success = test_quality.get('identifier') == tests[name]['quality'] and test_quality.get('is_3d') == tests[name].get('is_3d', False)
if not success:
log.error('%s failed check, thinks it\'s %s', (name, self.guess([name]).get('identifier')))
log.error('%s failed check, thinks it\'s "%s" expecting "%s"', (name,
test_quality.get('identifier') + (' 3D' if test_quality.get('is_3d') else ''),
tests[name]['quality'] + (' 3D' if tests[name].get('is_3d') else '')
))
correct += success

View File

@@ -8,6 +8,7 @@ var QualityBase = new Class({
self.qualities = data.qualities;
self.profiles_list = null;
self.profiles = [];
Array.each(data.profiles, self.createProfilesClass.bind(self));
@@ -29,9 +30,14 @@ var QualityBase = new Class({
},
getQuality: function(identifier){
return this.qualities.filter(function(q){
return q.identifier == identifier;
}).pick();
try {
return this.qualities.filter(function(q){
return q.identifier == identifier;
}).pick();
}
catch(e){}
return {}
},
addSettings: function(){
@@ -101,14 +107,13 @@ var QualityBase = new Class({
createProfileOrdering: function(){
var self = this;
var profile_list;
self.settings.createGroup({
'label': 'Profile Defaults',
'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)'
'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)'
}).adopt(
new Element('.ctrlHolder#profile_ordering').adopt(
new Element('label[text=Order]'),
profile_list = new Element('ul'),
self.profiles_list = new Element('ul'),
new Element('p.formHint', {
'html': 'Change the order the profiles are in the dropdown list. Uncheck to hide it completely.<br />First one will be default.'
})
@@ -128,29 +133,37 @@ var QualityBase = new Class({
'text': profile.data.label
}),
new Element('span.handle')
).inject(profile_list);
).inject(self.profiles_list);
new Form.Check(check);
});
// Sortable
self.profile_sortable = new Sortables(profile_list, {
var sorted_changed = false;
self.profile_sortable = new Sortables(self.profiles_list, {
'revert': true,
'handle': '',
'handle': '.handle',
'opacity': 0.5,
'onComplete': self.saveProfileOrdering.bind(self)
'onSort': function(){
sorted_changed = true;
},
'onComplete': function(){
if(sorted_changed){
self.saveProfileOrdering();
sorted_changed = false;
}
}
});
},
saveProfileOrdering: function(){
var self = this;
var self = this,
ids = [],
hidden = [];
var ids = [];
var hidden = [];
self.profile_sortable.list.getElements('li').each(function(el, nr){
self.profiles_list.getElements('li').each(function(el, nr){
ids.include(el.get('data-id'));
hidden[nr] = +!el.getElement('input[type=checkbox]').get('checked');
});

View File

@@ -3,11 +3,11 @@ import os
import time
import traceback
from CodernityDB.database import RecordDeleted
from CodernityDB.database import RecordDeleted, RecordNotFound
from couchpotato import md5, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toUnicode, sp
from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@@ -58,7 +58,7 @@ class Release(Plugin):
addEvent('release.for_media', self.forMedia)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanDone)
addEvent('app.load', self.cleanDone, priority = 1000)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12)
def cleanDone(self):
@@ -79,6 +79,13 @@ class Release(Plugin):
try:
db.get('id', release.get('key'))
media_exist.append(release.get('key'))
try:
if release['doc'].get('status') == 'ignore':
release['doc']['status'] = 'ignored'
db.update(release['doc'])
except:
log.error('Failed fixing mis-status tag: %s', traceback.format_exc())
except RecordDeleted:
db.delete(release['doc'])
log.debug('Deleted orphaned release: %s', release['doc'])
@@ -87,9 +94,6 @@ class Release(Plugin):
del media_exist
# Reindex statuses
db.reindex_index('media_status')
# get movies last_edit more than a week ago
medias = fireEvent('media.with_status', 'done', single = True)
@@ -103,11 +107,13 @@ class Release(Plugin):
if rel['status'] in ['available']:
self.delete(rel['_id'])
# Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the move
# Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the media
elif rel['status'] in ['snatched', 'downloaded']:
self.updateStatus(rel['_id'], status = 'ignore')
self.updateStatus(rel['_id'], status = 'ignored')
def add(self, group, update_info = True):
fireEvent('media.untag', media.get('_id'), 'recent', single = True)
def add(self, group, update_info = True, update_id = None):
try:
db = get_db()
@@ -123,33 +129,49 @@ class Release(Plugin):
'profile_id': None,
}, search_after = False, update_after = update_info, notify_after = False, status = 'done', single = True)
# Add Release
release = {
'_t': 'release',
'media_id': media['_id'],
'identifier': release_identifier,
'quality': group['meta_data']['quality'].get('identifier'),
'is_3d': group['meta_data']['quality'].get('is_3d', 0),
'last_edit': int(time.time()),
'status': 'done'
}
try:
r = db.get('release_identifier', release_identifier, with_doc = True)['doc']
r['media_id'] = media['_id']
except:
r = db.insert(release)
release = None
if update_id:
try:
release = db.get('id', update_id)
release.update({
'identifier': release_identifier,
'last_edit': int(time.time()),
'status': 'done',
})
except:
log.error('Failed updating existing release: %s', traceback.format_exc())
else:
# Update with ref and _id
release.update({
'_id': r['_id'],
'_rev': r['_rev'],
})
# Add Release
if not release:
release = {
'_t': 'release',
'media_id': media['_id'],
'identifier': release_identifier,
'quality': group['meta_data']['quality'].get('identifier'),
'is_3d': group['meta_data']['quality'].get('is_3d', 0),
'last_edit': int(time.time()),
'status': 'done'
}
try:
r = db.get('release_identifier', release_identifier, with_doc = True)['doc']
r['media_id'] = media['_id']
except:
log.debug('Failed updating release by identifier "%s". Inserting new.', release_identifier)
r = db.insert(release)
# Update with ref and _id
release.update({
'_id': r['_id'],
'_rev': r['_rev'],
})
# Empty out empty file groups
release['files'] = dict((k, v) for k, v in group['files'].items() if v)
release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v)
db.update(release)
fireEvent('media.restatus', media['_id'])
fireEvent('media.restatus', media['_id'], single = True)
return True
except:
@@ -170,6 +192,9 @@ class Release(Plugin):
rel = db.get('id', release_id)
db.delete(rel)
return True
except RecordDeleted:
log.debug('Already deleted: %s', release_id)
return True
except:
log.error('Failed: %s', traceback.format_exc())
@@ -302,7 +327,7 @@ class Release(Plugin):
log_movie = '%s (%s) in %s' % (getTitle(media), media['info']['year'], rls['quality'])
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = media)
# Mark release as snatched
if renamer_enabled:
@@ -313,22 +338,14 @@ class Release(Plugin):
if media['status'] == 'active':
profile = db.get('id', media['profile_id'])
finished = False
if rls['quality'] in profile['qualities']:
nr = profile['qualities'].index(rls['quality'])
finished = profile['finish'][nr]
if finished:
if fireEvent('quality.isfinish', {'identifier': rls['quality'], 'is_3d': rls.get('is_3d', False)}, profile, single = True):
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls['_id'], status = 'done')
# Mark media done
mdia = db.get('id', media['_id'])
mdia['status'] = 'done'
mdia['last_edit'] = int(time.time())
db.update(mdia)
fireEvent('media.restatus', media['_id'], single = True)
return True
@@ -341,7 +358,7 @@ class Release(Plugin):
return True
def tryDownloadResult(self, results, media, quality_custom, manual = False):
def tryDownloadResult(self, results, media, quality_custom):
wait_for = False
let_through = False
@@ -355,7 +372,11 @@ class Release(Plugin):
continue
if rel['score'] <= 0:
log.info('Ignored, score to low: %s', rel['name'])
log.info('Ignored, score "%s" to low: %s', (rel['score'], rel['name']))
continue
if rel['size'] <= 50:
log.info('Ignored, size "%sMB" to low: %s', (rel['size'], rel['name']))
continue
rel['wait_for'] = False
@@ -375,7 +396,7 @@ class Release(Plugin):
wait_for = True
continue
downloaded = fireEvent('release.download', data = rel, media = media, manual = manual, single = True)
downloaded = fireEvent('release.download', data = rel, media = media, single = True)
if downloaded is True:
return True
elif downloaded != 'try_next':
@@ -453,7 +474,7 @@ class Release(Plugin):
rel = db.get('id', release_id)
if rel and rel.get('status') != status:
release_name = rel['info'].get('name')
release_name = None
if rel.get('files'):
for file_type in rel.get('files', {}):
if file_type == 'movie':
@@ -461,6 +482,9 @@ class Release(Plugin):
release_name = os.path.basename(release_file)
break
if not release_name and rel.get('info'):
release_name = rel['info'].get('name')
#update status in Db
log.debug('Marking release %s as %s', (release_name, status))
rel['status'] = status
@@ -484,8 +508,15 @@ class Release(Plugin):
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('release_status', s, with_doc = with_doc):
yield ms['doc'] if with_doc else ms
for ms in db.get_many('release_status', s):
if with_doc:
try:
doc = db.get('id', ms['_id'])
yield doc
except RecordNotFound:
log.debug('Record not found, skipping: %s', ms['_id'])
else:
yield ms
def forMedia(self, media_id):

View File

@@ -14,7 +14,6 @@ from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from scandir import scandir
from unrar2 import RarFile
import six
from six.moves import filter
@@ -124,11 +123,6 @@ class Renamer(Plugin):
no_process = [to_folder]
cat_list = fireEvent('category.all', single = True) or []
no_process.extend([item['destination'] for item in cat_list])
try:
if Env.setting('library', section = 'manage').strip():
no_process.extend([sp(manage_folder) for manage_folder in splitString(Env.setting('library', section = 'manage'), '::')])
except:
pass
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(base_folder) or not os.path.isdir(to_folder):
@@ -137,7 +131,7 @@ class Renamer(Plugin):
else:
for item in no_process:
if isSubFolder(item, base_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.')
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder. "%s" in "%s"', (item, base_folder))
return
# Check to see if the no_process folders are inside the provided media_folder
@@ -169,7 +163,7 @@ class Renamer(Plugin):
if media_folder:
for item in no_process:
if isSubFolder(item, media_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.')
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder. "%s" in "%s"', (item, media_folder))
return
# Make sure a checkSnatched marked all downloads/seeds as such
@@ -195,7 +189,7 @@ class Renamer(Plugin):
else:
# Get all files from the specified folder
try:
for root, folders, names in scandir.walk(media_folder):
for root, folders, names in os.walk(media_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
@@ -203,14 +197,18 @@ class Renamer(Plugin):
db = get_db()
# Extend the download info with info stored in the downloaded release
keep_original = self.moveTypeIsLinked()
is_torrent = False
if release_download:
release_download = self.extendReleaseDownload(release_download)
is_torrent = self.downloadIsTorrent(release_download)
keep_original = True if is_torrent and self.conf('file_action') not in ['move'] else keep_original
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files,
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download))
cleanup = self.conf('cleanup') and not keep_original)
groups = fireEvent('scanner.scan', folder = folder if folder else base_folder,
files = files, release_download = release_download, return_ignored = False, single = True) or []
@@ -228,6 +226,7 @@ class Renamer(Plugin):
for group_identifier in groups:
group = groups[group_identifier]
group['release_download'] = None
rename_files = {}
remove_files = []
remove_releases = []
@@ -326,7 +325,7 @@ class Renamer(Plugin):
if file_type is 'nfo' and not self.conf('rename_nfo'):
log.debug('Skipping, renaming of %s disabled', file_type)
for current_file in group['files'][file_type]:
if self.conf('cleanup') and (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
if self.conf('cleanup') and (not keep_original or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
continue
@@ -446,19 +445,22 @@ class Renamer(Plugin):
# Before renaming, remove the lower quality files
remove_leftovers = True
# Mark movie "done" once it's found the quality with the finish check
# Get media quality profile
profile = None
try:
if media.get('status') == 'active' and media.get('profile_id'):
if media.get('profile_id'):
try:
profile = db.get('id', media['profile_id'])
if fireEvent('quality.isfinish', group['meta_data']['quality'], profile, single = True):
mdia = db.get('id', media['_id'])
mdia['status'] = 'done'
mdia['last_edit'] = int(time.time())
db.update(mdia)
except:
# Set profile to None as it does not exist anymore
mdia = db.get('id', media['_id'])
mdia['profile_id'] = None
db.update(mdia)
log.error('Error getting quality profile for %s: %s', (media_title, traceback.format_exc()))
else:
log.debug('Media has no quality profile: %s', media_title)
except Exception as e:
log.error('Failed marking movie finished: %s', (traceback.format_exc()))
# Mark media for dashboard
mark_as_recent = False
# Go over current movie releases
for release in fireEvent('release.for_media', media['_id'], single = True):
@@ -468,7 +470,7 @@ class Renamer(Plugin):
# This is where CP removes older, lesser quality releases or releases that are not wanted anymore
is_higher = fireEvent('quality.ishigher', \
group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, profile, single = True)
group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, single = True)
if is_higher == 'higher':
log.info('Removing lesser or not wanted quality %s for %s.', (media_title, release.get('quality')))
@@ -493,7 +495,7 @@ class Renamer(Plugin):
self.tagRelease(group = group, tag = 'exists')
# Notify on rename fail
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('identifier'))
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('quality'))
fireEvent('movie.renaming.canceled', message = download_message, data = group)
remove_leftovers = False
@@ -505,13 +507,22 @@ class Renamer(Plugin):
if release_download['status'] == 'completed':
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
mark_as_recent = True
elif release_download['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', release['_id'], status = 'seeding', single = True)
mark_as_recent = True
elif release.get('identifier') == group['meta_data']['quality']['identifier']:
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
elif release.get('quality') == group['meta_data']['quality']['identifier']:
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
mark_as_recent = True
# Mark media for dashboard
if mark_as_recent:
fireEvent('media.tag', group['media'].get('_id'), 'recent', single = True)
# Remove leftover files
if not remove_leftovers: # Don't remove anything
@@ -520,7 +531,7 @@ class Renamer(Plugin):
log.debug('Removing leftover files')
for current_file in group['files']['leftover']:
if self.conf('cleanup') and not self.conf('move_leftover') and \
(not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
(not keep_original or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
# Remove files
@@ -567,7 +578,7 @@ class Renamer(Plugin):
self.makeDir(os.path.dirname(dst))
try:
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group))
self.moveFile(src, dst, use_default = not is_torrent or self.fileIsAdded(src, group))
group['renamed_files'].append(dst)
except:
log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
@@ -583,7 +594,7 @@ class Renamer(Plugin):
self.untagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(media_folder) and self.downloadIsTorrent(release_download):
if self.movieInFromFolder(media_folder) and keep_original:
self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases
@@ -594,7 +605,7 @@ class Renamer(Plugin):
except:
log.error('Failed removing %s: %s', (release, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download):
if group['dirname'] and group['parentdir'] and not keep_original:
if media_folder:
# Delete the movie folder
group_folder = media_folder
@@ -609,7 +620,7 @@ class Renamer(Plugin):
log.error('Failed removing %s: %s', (group_folder, traceback.format_exc()))
# Notify on download, search for trailers etc
download_message = 'Downloaded %s (%s)' % (media_title, replacements['quality'])
download_message = 'Downloaded %s (%s%s)' % (media_title, replacements['quality'], (' ' + replacements['3d']) if replacements['3d'] else '')
try:
fireEvent('renamer.after', message = download_message, group = group, in_order = True)
except:
@@ -664,7 +675,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Tag all files in release folder
elif release_download['folder']:
for root, folders, names in scandir.walk(sp(release_download['folder'])):
for root, folders, names in os.walk(sp(release_download['folder'])):
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
@@ -704,7 +715,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Untag all files in release folder
else:
for root, folders, names in scandir.walk(folder):
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
if not folder:
@@ -712,7 +723,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Find all .ignore files in folder
ignore_files = []
for root, dirnames, filenames in scandir.walk(folder):
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and delete if found
@@ -741,11 +752,11 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Find tag on all files in release folder
else:
for root, folders, names in scandir.walk(folder):
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
for root, dirnames, filenames in scandir.walk(folder):
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and return True found
@@ -756,10 +767,15 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return False
def moveFile(self, old, dest, forcemove = False):
def moveFile(self, old, dest, use_default = False):
dest = sp(dest)
try:
if forcemove or self.conf('file_action') not in ['copy', 'link']:
move_type = self.conf('file_action')
if use_default:
move_type = self.conf('default_file_action')
if move_type not in ['copy', 'link']:
try:
shutil.move(old, dest)
except:
@@ -768,16 +784,16 @@ Remove it if you want it to be renamed (again, or at least let it try again)
os.unlink(old)
else:
raise
elif self.conf('file_action') == 'copy':
elif move_type == 'copy':
shutil.copy(old, dest)
elif self.conf('file_action') == 'link':
else:
# First try to hardlink
try:
log.debug('Hardlinking file "%s" to "%s"...', (old, dest))
link(old, dest)
except:
# Try to simlink next
log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s.', (old, dest, traceback.format_exc()))
log.debug('Couldn\'t hardlink file "%s" to "%s". Symlinking instead. Error: %s.', (old, dest, traceback.format_exc()))
shutil.copy(old, dest)
try:
symlink(dest, old + '.link')
@@ -1077,6 +1093,9 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return False
return src in group['before_rename']
def moveTypeIsLinked(self):
return self.conf('default_file_action') in ['copy', 'link']
def statusInfoComplete(self, release_download):
return release_download.get('id') and release_download.get('downloader') and release_download.get('folder')
@@ -1102,7 +1121,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
check_file_date = False
if not files:
for root, folders, names in scandir.walk(folder):
for root, folders, names in os.walk(folder):
files.extend([sp(os.path.join(root, name)) for name in names])
# Find all archive files
@@ -1128,14 +1147,20 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.info('Archive %s found. Extracting...', os.path.basename(archive['file']))
try:
rar_handle = RarFile(archive['file'])
rar_handle = RarFile(archive['file'], custom_path = self.conf('unrar_path'))
extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder))
self.makeDir(extr_path)
for packedinfo in rar_handle.infolist():
if not packedinfo.isdir and not os.path.isfile(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))):
extr_file_path = sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))
if not packedinfo.isdir and not os.path.isfile(extr_file_path):
log.debug('Extracting %s...', packedinfo.filename)
rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False)
extr_files.append(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename))))
if self.conf('unrar_modify_date'):
try:
os.utime(extr_file_path, (os.path.getatime(archive['file']), os.path.getmtime(archive['file'])))
except:
log.error('Rar modify date enabled, but failed: %s', traceback.format_exc())
extr_files.append(extr_file_path)
del rar_handle
except Exception as e:
log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc()))
@@ -1270,6 +1295,18 @@ config = [{
'description': 'Extract rar files if found.',
'default': False,
},
{
'advanced': True,
'name': 'unrar_path',
'description': 'Custom path to unrar bin',
},
{
'advanced': True,
'name': 'unrar_modify_date',
'type': 'bool',
'description': ('Set modify date of unrar-ed files to the rar-file\'s date.', 'This will allow XBMC to recognize extracted files as recently added even if the movie was released some time ago.'),
'default': False,
},
{
'name': 'cleanup',
'type': 'bool',
@@ -1320,14 +1357,23 @@ config = [{
'label': 'Folder-Separator',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'name': 'default_file_action',
'label': 'Default File Action',
'default': 'move',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.',
'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy.'),
'advanced': True,
},
{
'name': 'file_action',
'label': 'Torrent File Action',
'default': 'link',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.',
'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'),
'description': 'See above. It is prefered to use link when downloading torrents as it will save you space, while still beeing able to seed.',
'advanced': True,
},
{

View File

@@ -13,7 +13,6 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from enzyme.exceptions import NoParserError, ParseError
from guessit import guess_movie_info
from scandir import scandir
from subliminal.videos import Video
import enzyme
from six.moves import filter, map, zip
@@ -106,7 +105,7 @@ class Scanner(Plugin):
'HDTV': ['hdtv']
}
clean = '([ _\,\.\(\)\[\]\-]|^)(3d|hsbs|sbs|ou|extended.cut|directors.cut|french|fr|swedisch|sw|danish|dutch|nl|swesub|subs|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
clean = '([ _\,\.\(\)\[\]\-]|^)(3d|hsbs|sbs|half.sbs|full.sbs|ou|half.ou|full.ou|extended|extended.cut|directors.cut|french|fr|swedisch|sw|danish|dutch|nl|swesub|subs|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
'|hdtvrip|webdl|web.dl|webrip|web.rip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|hc|\[.*\])(?=[ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
@@ -150,7 +149,7 @@ class Scanner(Plugin):
check_file_date = True
try:
files = []
for root, dirs, walk_files in scandir.walk(folder, followlinks=True):
for root, dirs, walk_files in os.walk(folder, followlinks=True):
files.extend([sp(os.path.join(sp(root), ss(filename))) for filename in walk_files])
# Break if CP wants to shut down
@@ -365,6 +364,7 @@ class Scanner(Plugin):
if return_ignored is False and identifier in ignored_identifiers:
log.debug('Ignore file found, ignoring release: %s', identifier)
total_found -= 1
continue
# Group extra (and easy) files first
@@ -385,6 +385,7 @@ class Scanner(Plugin):
if len(group['files']['movie']) == 0:
log.error('Couldn\'t find any movie files for %s', identifier)
total_found -= 1
continue
log.debug('Getting metadata for %s', identifier)
@@ -430,7 +431,7 @@ class Scanner(Plugin):
# Notify parent & progress on something found
if on_found:
on_found(group, total_found, total_found - len(processed_movies))
on_found(group, total_found, len(valid_files))
# Wait for all the async events calm down a bit
while threading.activeCount() > 100 and not self.shuttingDown():
@@ -472,7 +473,7 @@ class Scanner(Plugin):
data['size'] = data.get('size', 0) + self.getFileSize(cur_file)
data['quality'] = None
quality = fireEvent('quality.guess', size = data['size'], files = files, extra = data, single = True)
quality = fireEvent('quality.guess', size = data.get('size'), files = files, extra = data, single = True)
# Use the quality that we snatched but check if it matches our guess
if release_download and release_download.get('quality'):
@@ -633,7 +634,14 @@ class Scanner(Plugin):
name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None)
if name_year.get('name') and name_year.get('year'):
movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1)
search_q = '%(name)s %(year)s' % name_year
movie = fireEvent('movie.search', q = search_q, merge = True, limit = 1)
# Try with other
if len(movie) == 0 and name_year.get('other') and name_year['other'].get('name') and name_year['other'].get('year'):
search_q2 = '%(name)s %(year)s' % name_year.get('other')
if search_q2 != search_q:
movie = fireEvent('movie.search', q = search_q2, merge = True, limit = 1)
if len(movie) > 0:
imdb_id = movie[0].get('imdb')
@@ -851,7 +859,9 @@ class Scanner(Plugin):
if key in filename.lower() and key != 'default':
return self.resolutions[key]
except:
return self.resolutions['default']
pass
return self.resolutions['default']
def getGroup(self, file):
try:
@@ -900,6 +910,7 @@ class Scanner(Plugin):
log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc()))
# Backup to simple
release_name = os.path.basename(release_name.replace('\\', '/'))
cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
cleaned = re.sub(self.clean, ' ', cleaned)
@@ -934,8 +945,11 @@ class Scanner(Plugin):
pass
if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')):
cp_guess['other'] = guess
return cp_guess
elif guess == {}:
cp_guess['other'] = guess
return cp_guess
guess['other'] = cp_guess
return guess

View File

@@ -32,7 +32,7 @@ class Trailer(Plugin):
destination = os.path.join(group['destination_dir'], filename)
if not os.path.isfile(destination):
trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True)
if os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one
if trailer_file and os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one
os.unlink(trailer_file)
continue
else:

Some files were not shown because too many files have changed in this diff Show More