Merge branch 'refs/heads/develop'

This commit is contained in:
Ruud
2013-09-08 22:17:03 +02:00
251 changed files with 16229 additions and 6347 deletions

View File

@@ -4,16 +4,15 @@ from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import md5
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
from tornado import template
from tornado.web import RequestHandler
import os
import time
import traceback
log = CPLog(__name__)
views = {}
template_loader = template.Loader(os.path.join(os.path.dirname(__file__), 'templates'))
@@ -25,7 +24,12 @@ class WebHandler(RequestHandler):
if not views.get(route):
page_not_found(self)
return
self.write(views[route]())
try:
self.write(views[route]())
except:
log.error('Failed doing web request "%s": %s', (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
def addView(route, func, static = False):
views[route] = func
@@ -58,16 +62,22 @@ addView('docs', apiDocs)
class KeyHandler(RequestHandler):
def get(self, *args, **kwargs):
api = None
username = Env.setting('username')
password = Env.setting('password')
if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password):
api = Env.setting('api_key')
try:
username = Env.setting('username')
password = Env.setting('password')
if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password):
api = Env.setting('api_key')
self.write({
'success': api is not None,
'api_key': api
})
except:
log.error('Failed doing key request: %s', (traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
self.write({
'success': api is not None,
'api_key': api
})
def page_not_found(rh):
index_url = Env.get('web_base')

View File

@@ -1,38 +1,63 @@
from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog
from functools import wraps
from threading import Thread
from tornado.gen import coroutine
from tornado.web import RequestHandler, asynchronous
import json
import threading
import tornado
import traceback
import urllib
log = CPLog(__name__)
api = {}
api_locks = {}
api_nonblock = {}
api_docs = {}
api_docs_missing = []
def run_async(func):
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
# NonBlock API handler
class NonBlockHandler(RequestHandler):
stoppers = []
stopper = None
@asynchronous
def get(self, route, *args, **kwargs):
route = route.strip('/')
start, stop = api_nonblock[route]
self.stoppers.append(stop)
self.stopper = stop
start(self.onNewMessage, last_id = self.get_argument("last_id", None))
start(self.onNewMessage, last_id = self.get_argument('last_id', None))
def onNewMessage(self, response):
if self.request.connection.stream.closed():
return
self.finish(response)
try:
self.finish(response)
except:
log.error('Failed doing nonblock request: %s', (traceback.format_exc()))
self.finish({'success': False, 'error': 'Failed returning results'})
def on_connection_close(self):
for stop in self.stoppers:
stop(self.onNewMessage)
if self.stopper:
self.stopper(self.onNewMessage)
self.stoppers = []
self.stopper = None
def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
api_nonblock[route] = func_tuple
@@ -45,38 +70,61 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
# Blocking API handler
class ApiHandler(RequestHandler):
@coroutine
def get(self, route, *args, **kwargs):
route = route.strip('/')
if not api.get(route):
self.write('API call doesn\'t seem to exist')
return
kwargs = {}
for x in self.request.arguments:
kwargs[x] = urllib.unquote(self.get_argument(x))
api_locks[route].acquire()
# Split array arguments
kwargs = getParams(kwargs)
try:
# Remove t random string
try: del kwargs['t']
except: pass
kwargs = {}
for x in self.request.arguments:
kwargs[x] = urllib.unquote(self.get_argument(x))
# Check JSONP callback
result = api[route](**kwargs)
jsonp_callback = self.get_argument('callback_func', default = None)
# Split array arguments
kwargs = getParams(kwargs)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
elif isinstance(result, (tuple)) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
# Remove t random string
try: del kwargs['t']
except: pass
# Add async callback handler
@run_async
def run_handler(callback):
try:
result = api[route](**kwargs)
callback(result)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'})
result = yield tornado.gen.Task(run_handler)
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
api_locks[route].release()
def addApiView(route, func, static = False, docs = None, **kwargs):
if static: func(route)
else: api[route] = func
else:
api[route] = func
api_locks[route] = threading.Lock()
if docs:
api_docs[route[4:] if route[0:4] == 'api.' else route] = docs

View File

@@ -124,7 +124,7 @@ class Core(Plugin):
time.sleep(1)
log.debug('Save to shutdown/restart')
log.debug('Safe to shutdown/restart')
try:
IOLoop.current().stop()

View File

@@ -80,7 +80,7 @@ class ClientScript(Plugin):
for static_type in self.core_static:
for rel_path in self.core_static.get(static_type):
file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path)
core_url = 'api/%s/static/%s?%s' % (Env.setting('api_key'), rel_path, tryInt(os.path.getmtime(file_path)))
core_url = 'api/%s/static/%s' % (Env.setting('api_key'), rel_path)
if static_type == 'script':
self.registerScript(core_url, file_path, position = 'front')
@@ -111,7 +111,7 @@ class ClientScript(Plugin):
data = jsmin(f)
else:
data = self.prefix(f)
data = cssmin(f)
data = cssmin(data)
data = data.replace('../images/', '../static/images/')
data = data.replace('../fonts/', '../static/fonts/')
data = data.replace('../../static/', '../static/') # Replace inside plugins
@@ -165,6 +165,8 @@ class ClientScript(Plugin):
def register(self, api_path, file_path, type, location):
api_path = '%s?%s' % (api_path, tryInt(os.path.getmtime(file_path)))
if not self.urls[type].get(location):
self.urls[type][location] = []
self.urls[type][location].append(api_path)

View File

@@ -132,6 +132,7 @@ class BaseUpdater(Plugin):
update_failed = False
update_version = None
last_check = 0
auto_register_static = False
def doUpdate(self):
pass

View File

@@ -10,10 +10,15 @@ def requires_auth(handler_class):
def wrap_execute(handler_execute):
def require_basic_auth(handler, kwargs):
if Env.setting('username') and Env.setting('password'):
auth_header = handler.request.headers.get('Authorization')
auth_decoded = base64.decodestring(auth_header[6:]) if auth_header else None
username = ''
password = ''
if auth_decoded:
username, password = auth_decoded.split(':', 2)

View File

@@ -11,7 +11,7 @@ log = CPLog(__name__)
class Downloader(Provider):
type = []
protocol = []
http_time_between_calls = 0
torrent_sources = [
@@ -36,18 +36,23 @@ class Downloader(Provider):
def __init__(self):
addEvent('download', self._download)
addEvent('download.enabled', self._isEnabled)
addEvent('download.enabled_types', self.getEnabledDownloadType)
addEvent('download.enabled_protocols', self.getEnabledProtocol)
addEvent('download.status', self._getAllDownloadStatus)
addEvent('download.remove_failed', self._removeFailed)
addEvent('download.pause', self._pause)
addEvent('download.process_complete', self._processComplete)
def getEnabledDownloadType(self):
for download_type in self.type:
if self.isEnabled(manual = True, data = {'type': download_type}):
return self.type
def getEnabledProtocol(self):
for download_protocol in self.protocol:
if self.isEnabled(manual = True, data = {'protocol': download_protocol}):
return self.protocol
return []
def _download(self, data = {}, movie = {}, manual = False, filedata = None):
def _download(self, data = None, movie = None, manual = False, filedata = None):
if not movie: movie = {}
if not data: data = {}
if self.isDisabled(manual, data):
return
return self.download(data = data, movie = movie, filedata = filedata)
@@ -65,19 +70,35 @@ class Downloader(Provider):
if self.isDisabled(manual = True, data = {}):
return
if self.conf('delete_failed', default = True):
return self.removeFailed(item)
if item and item.get('downloader') == self.getName():
if self.conf('delete_failed'):
return self.removeFailed(item)
return False
return False
return
def removeFailed(self, item):
return
def isCorrectType(self, item_type):
is_correct = item_type in self.type
def _processComplete(self, item):
if self.isDisabled(manual = True, data = {}):
return
if item and item.get('downloader') == self.getName():
if self.conf('remove_complete', default = False):
return self.processComplete(item = item, delete_files = self.conf('delete_files', default = False))
return False
return
def processComplete(self, item, delete_files):
return
def isCorrectProtocol(self, item_protocol):
is_correct = item_protocol in self.protocol
if not is_correct:
log.debug("Downloader doesn't support this type")
log.debug("Downloader doesn't support this protocol")
return is_correct
@@ -101,7 +122,7 @@ class Downloader(Provider):
except:
log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source))
log.error('Failed converting magnet url to torrent: %s', (torrent_hash))
log.error('Failed converting magnet url to torrent: %s', torrent_hash)
return False
def downloadReturnId(self, download_id):
@@ -110,20 +131,38 @@ class Downloader(Provider):
'id': download_id
}
def isDisabled(self, manual, data):
def isDisabled(self, manual = False, data = None):
if not data: data = {}
return not self.isEnabled(manual, data)
def _isEnabled(self, manual, data = {}):
def _isEnabled(self, manual, data = None):
if not data: data = {}
if not self.isEnabled(manual, data):
return
return True
def isEnabled(self, manual, data = {}):
def isEnabled(self, manual = False, data = None):
if not data: data = {}
d_manual = self.conf('manual', default = False)
return super(Downloader, self).isEnabled() and \
((d_manual and manual) or (d_manual is False)) and \
(not data or self.isCorrectType(data.get('type')))
(d_manual and manual or d_manual is False) and \
(not data or self.isCorrectProtocol(data.get('protocol')))
def _pause(self, item, pause = True):
if self.isDisabled(manual = True, data = {}):
return
if item and item.get('downloader') == self.getName():
self.pause(item, pause)
return True
return False
def pause(self, item, pause):
return
class StatusList(list):

View File

@@ -7,22 +7,25 @@ import traceback
log = CPLog(__name__)
class Blackhole(Downloader):
type = ['nzb', 'torrent', 'torrent_magnet']
protocol = ['nzb', 'torrent', 'torrent_magnet']
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
if not data: data = {}
directory = self.conf('directory')
if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('type'))
log.error('No directory set for blackhole %s download.', data.get('protocol'))
else:
try:
if not filedata or len(filedata) < 50:
try:
if data.get('type') == 'torrent_magnet':
if data.get('protocol') == 'torrent_magnet':
filedata = self.magnetToTorrent(data.get('url'))
data['type'] = 'torrent'
data['protocol'] = 'torrent'
except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
@@ -34,7 +37,7 @@ class Blackhole(Downloader):
try:
if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.', (data.get('type'), fullPath))
log.info('Downloading %s to %s.', (data.get('protocol'), fullPath))
with open(fullPath, 'wb') as f:
f.write(filedata)
os.chmod(fullPath, Env.getPermission('file'))
@@ -53,20 +56,21 @@ class Blackhole(Downloader):
return False
def getEnabledDownloadType(self):
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledDownloadType()
return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual, data = {}):
for_type = ['both']
if data and 'torrent' in data.get('type'):
for_type.append('torrent')
def isEnabled(self, manual = False, data = None):
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_type.append(data.get('type'))
for_protocol.append(data.get('protocol'))
return super(Blackhole, self).isEnabled(manual, data) and \
((self.conf('use_for') in for_type))
((self.conf('use_for') in for_protocol))

View File

@@ -0,0 +1,90 @@
from .main import Deluge
def start():
return Deluge()
config = [{
'name': 'deluge',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'deluge',
'label': 'Deluge',
'description': 'Use <a href="http://www.deluge-torrent.org/" target="_blank">Deluge</a> to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'host',
'default': 'localhost:58846',
'description': 'Hostname with port. Usually <strong>localhost:58846</strong>',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Deluge download directory.',
},
{
'name': 'completed_directory',
'type': 'directory',
'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.',
'advanced': True,
},
{
'name': 'label',
'description': 'Label to add to torrents in the Deluge UI.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Remove the torrent from Deluge after it has finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]

View File

@@ -0,0 +1,244 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt, ss
from couchpotato.core.helpers.variable import tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from synchronousdeluge import DelugeClient
import os.path
import traceback
log = CPLog(__name__)
class Deluge(Downloader):
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
drpc = None
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.drpc:
self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return self.drpc
def download(self, data, movie, filedata = None):
log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Set parameters for Deluge
options = {
'add_paused': self.conf('paused', default = 0),
'label': self.conf('label')
}
if self.conf('directory'):
if os.path.isdir(self.conf('directory')):
options['download_location'] = self.conf('directory')
else:
log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if self.conf('completed_directory'):
if os.path.isdir(self.conf('completed_directory')):
options['move_completed'] = 1
options['move_completed_path'] = self.conf('completed_directory')
else:
log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if data.get('seed_ratio'):
options['stop_at_ratio'] = 1
options['stop_ratio'] = tryFloat(data.get('seed_ratio'))
# Deluge only has seed time as a global option. Might be added in
# in a future API release.
# if data.get('seed_time'):
# Send request to Deluge
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, movie)
remote_torrent = self.drpc.add_torrent_file(filename, b64encode(filedata), options)
if not remote_torrent:
log.error('Failed sending torrent to Deluge')
return False
log.info('Torrent sent to Deluge successfully.')
return self.downloadReturnId(remote_torrent)
def getAllDownloadStatus(self):
log.debug('Checking Deluge download status.')
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if not self.connect():
return False
statuses = StatusList(self)
queue = self.drpc.get_alltorrents()
if not queue:
log.debug('Nothing in queue or error')
return False
for torrent_id in queue:
item = queue[torrent_id]
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (item['name'], item['hash'], item['save_path'], item['move_completed_path'], item['hash'], item['progress'], item['state'], item['eta'], item['ratio'], item['stop_ratio'], item['is_seed'], item['is_finished'], item['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
if item['is_seed'] and tryFloat(item['ratio']) < tryFloat(item['stop_ratio']):
# We have item['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio.
# See above comment in download().
status = 'seeding'
elif item['is_seed'] and item['is_finished'] and item['paused'] and item['state'] == 'Paused':
status = 'completed'
download_dir = item['save_path']
if item['move_on_completed']:
download_dir = item['move_completed_path']
statuses.append({
'id': item['hash'],
'name': item['name'],
'status': status,
'original_status': item['state'],
'seed_ratio': item['ratio'],
'timeleft': str(timedelta(seconds = item['eta'])),
'folder': ss(os.path.join(download_dir, item['name'])),
})
return statuses
def pause(self, item, pause = True):
if pause:
return self.drpc.pause_torrent([item['id']])
else:
return self.drpc.resume_torrent([item['id']])
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
return self.drpc.remove_torrent(item['id'], True)
def processComplete(self, item, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(item['id'], remove_local_data = delete_files)
class DelugeRPC(object):
host = 'localhost'
port = 58846
username = None
password = None
client = None
def __init__(self, host = 'localhost', port = 58846, username = None, password = None):
super(DelugeRPC, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
def connect(self):
self.client = DelugeClient()
self.client.connect(self.host, int(self.port), self.username, self.password)
def add_torrent_magnet(self, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options).get()
if options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err:
log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def add_torrent_file(self, filename, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, torrent, options).get()
if options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err:
log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def get_alltorrents(self):
ret = False
try:
self.connect()
ret = self.client.core.get_torrents_status({}, {}).get()
except Exception, err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def pause_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.pause_torrent(torrent_ids).get()
except Exception, err:
log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def resume_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.resume_torrent(torrent_ids).get()
except Exception, err:
log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def remove_torrent(self, torrent_id, remove_local_data):
ret = False
try:
self.connect()
ret = self.client.core.remove_torrent(torrent_id, remove_local_data).get()
except Exception, err:
log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def disconnect(self):
self.client.disconnect()

View File

@@ -42,6 +42,7 @@ config = [{
},
{
'name': 'priority',
'advanced': True,
'default': '0',
'type': 'dropdown',
'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100)],
@@ -57,6 +58,7 @@ config = [{
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},

View File

@@ -12,13 +12,16 @@ import xmlrpclib
log = CPLog(__name__)
class NZBGet(Downloader):
type = ['nzb']
protocol = ['nzb']
url = 'http://%(username)s:%(password)s@%(host)s/xmlrpc'
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
if not data: data = {}
if not filedata:
log.error('Unable to get NZB file: %s', traceback.format_exc())
@@ -32,7 +35,7 @@ class NZBGet(Downloader):
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
log.info('Successfully connected to NZBGet')
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
@@ -73,7 +76,7 @@ class NZBGet(Downloader):
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to check status'):
log.info('Successfully connected to NZBGet')
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
@@ -142,7 +145,7 @@ class NZBGet(Downloader):
'status': 'completed' if item['ParStatus'] == 'SUCCESS' and item['ScriptStatus'] == 'SUCCESS' else 'failed',
'original_status': item['ParStatus'] + ', ' + item['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['DestDir']
'folder': ss(item['DestDir'])
})
return statuses
@@ -151,12 +154,12 @@ class NZBGet(Downloader):
log.info('%s failed downloading, deleting...', item['name'])
url = self.url % {'host': self.conf('host'), 'password': self.conf('password')}
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to delete some history'):
log.info('Successfully connected to NZBGet')
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
@@ -171,11 +174,15 @@ class NZBGet(Downloader):
try:
history = rpc.history()
nzb_id = None
path = None
for hist in history:
if hist['Parameters'] and hist['Parameters']['couchpotato'] and hist['Parameters']['couchpotato'] == item['id']:
nzb_id = hist['ID']
path = hist['DestDir']
if rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
shutil.rmtree(path, True)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))

View File

@@ -38,6 +38,7 @@ config = [{
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},

View File

@@ -16,13 +16,16 @@ import urllib2
log = CPLog(__name__)
class NZBVortex(Downloader):
type = ['nzb']
protocol = ['nzb']
api_level = None
session_id = None
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
if not data: data = {}
# Send the nzb
try:
@@ -55,8 +58,8 @@ class NZBVortex(Downloader):
'name': item['uiTitle'],
'status': status,
'original_status': item['state'],
'timeleft': -1,
'folder': item['destinationPath'],
'timeleft':-1,
'folder': ss(item['destinationPath']),
})
return statuses
@@ -96,9 +99,10 @@ class NZBVortex(Downloader):
return False
def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs):
def call(self, call, parameters = None, repeat = False, auth = True, *args, **kwargs):
# Login first
if not parameters: parameters = {}
if not self.session_id and auth:
self.login()
@@ -121,7 +125,7 @@ class NZBVortex(Downloader):
# Try login and do again
if not repeat:
self.login()
return self.call(call, parameters = parameters, repeat = True, *args, **kwargs)
return self.call(call, parameters = parameters, repeat = True, **kwargs)
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
except:
@@ -147,7 +151,8 @@ class NZBVortex(Downloader):
return self.api_level
def isEnabled(self, manual, data):
def isEnabled(self, manual = False, data = None):
if not data: data = {}
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()

View File

@@ -6,12 +6,15 @@ import traceback
log = CPLog(__name__)
class Pneumatic(Downloader):
type = ['nzb']
protocol = ['nzb']
strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
if not data: data = {}
directory = self.conf('directory')
if not directory or not os.path.isdir(directory):
@@ -26,7 +29,7 @@ class Pneumatic(Downloader):
try:
if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.', (data.get('type'), fullPath))
log.info('Downloading %s to %s.', (data.get('protocol'), fullPath))
with open(fullPath, 'wb') as f:
f.write(filedata)

View File

@@ -0,0 +1,71 @@
from .main import rTorrent
def start():
return rTorrent()
config = [{
'name': 'rtorrent',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'rtorrent',
'label': 'rTorrent',
'description': '',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'url',
'default': 'http://localhost:80/RPC2',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'label',
'description': 'Label to apply on added torrents.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'default': False,
'advanced': True,
'type': 'bool',
'description': 'Remove the torrent after it finishes seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]

View File

@@ -0,0 +1,202 @@
from base64 import b16encode, b32decode
from datetime import timedelta
from hashlib import sha1
import shutil
from couchpotato.core.helpers.encoding import ss
from rtorrent.err import MethodError
from bencode import bencode, bdecode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.logger import CPLog
from rtorrent import RTorrent
log = CPLog(__name__)
class rTorrent(Downloader):
protocol = ['torrent', 'torrent_magnet']
rt = None
def connect(self):
# Already connected?
if self.rt is not None:
return self.rt
# Ensure url is set
if not self.conf('url'):
log.error('Config properties are not filled in correctly, url is missing.')
return False
if self.conf('username') and self.conf('password'):
self.rt = RTorrent(
self.conf('url'),
self.conf('username'),
self.conf('password')
)
else:
self.rt = RTorrent(self.conf('url'))
return self.rt
def _update_provider_group(self, name, data):
if data.get('seed_time'):
log.info('seeding time ignored, not supported')
if not name:
return False
if not self.connect():
return False
views = self.rt.get_views()
if name not in views:
self.rt.create_group(name)
group = self.rt.get_group(name)
try:
if data.get('seed_ratio'):
ratio = int(float(data.get('seed_ratio')) * 100)
log.debug('Updating provider ratio to %s, group name: %s', (ratio, name))
# Explicitly set all group options to ensure it is setup correctly
group.set_upload('1M')
group.set_min(ratio)
group.set_max(ratio)
group.set_command('d.stop')
group.enable()
else:
# Reset group action and disable it
group.set_command()
group.disable()
except MethodError, err:
log.error('Unable to set group options: %s', err.message)
return False
return True
def download(self, data, movie, filedata = None):
log.debug('Sending "%s" to rTorrent.', (data.get('name')))
if not self.connect():
return False
group_name = 'cp_' + data.get('provider').lower()
if not self._update_provider_group(group_name, data):
return False
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Try download magnet torrents
if data.get('protocol') == 'torrent_magnet':
filedata = self.magnetToTorrent(data.get('url'))
if filedata is False:
return False
data['protocol'] = 'torrent'
info = bdecode(filedata)["info"]
torrent_hash = sha1(bencode(info)).hexdigest().upper()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to rTorrent
try:
# Send torrent to rTorrent
torrent = self.rt.load_torrent(filedata)
# Set label
if self.conf('label'):
torrent.set_custom(1, self.conf('label'))
# Set Ratio Group
torrent.set_visible(group_name)
# Start torrent
if not self.conf('paused', default = 0):
torrent.start()
return self.downloadReturnId(torrent_hash)
except Exception, err:
log.error('Failed to send torrent to rTorrent: %s', err)
return False
def getAllDownloadStatus(self):
log.debug('Checking rTorrent download status.')
if not self.connect():
return False
try:
torrents = self.rt.get_torrents()
statuses = StatusList(self)
for item in torrents:
status = 'busy'
if item.complete:
if item.active:
status = 'seeding'
else:
status = 'completed'
statuses.append({
'id': item.info_hash,
'name': item.name,
'status': status,
'seed_ratio': item.ratio,
'original_status': item.state,
'timeleft': str(timedelta(seconds = float(item.left_bytes) / item.down_rate)) if item.down_rate > 0 else -1,
'folder': ss(item.directory)
})
return statuses
except Exception, err:
log.error('Failed to get status from rTorrent: %s', err)
return False
def pause(self, download_info, pause = True):
if not self.connect():
return False
torrent = self.rt.find_torrent(download_info['id'])
if torrent is None:
return False
if pause:
return torrent.pause()
return torrent.resume()
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
return self.processComplete(item, delete_files = True)
def processComplete(self, item, delete_files):
log.debug('Requesting rTorrent to remove the torrent %s%s.',
(item['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
torrent = self.rt.find_torrent(item['id'])
if torrent is None:
return False
torrent.erase() # just removes the torrent, doesn't delete data
if delete_files:
shutil.rmtree(item['folder'], True)
return True

View File

@@ -34,6 +34,15 @@ config = [{
'label': 'Category',
'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>',
},
{
'name': 'priority',
'label': 'Priority',
'type': 'dropdown',
'default': '0',
'advanced': True,
'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)],
'description': 'Add to the queue with this priority.',
},
{
'name': 'manual',
'default': False,
@@ -41,9 +50,18 @@ config = [{
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'remove_complete',
'advanced': True,
'label': 'Remove NZB',
'default': False,
'type': 'bool',
'description': 'Remove the NZB from history after it completed.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},

View File

@@ -10,11 +10,14 @@ import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
protocol = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
if not data: data = {}
log.info('Sending "%s" to SABnzbd.', data.get('name'))
@@ -22,11 +25,13 @@ class Sabnzbd(Downloader):
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
'priority': self.conf('priority'),
}
nzb_filename = None
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
log.error('No proper nzb available: %s', filedata)
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
@@ -36,7 +41,7 @@ class Sabnzbd(Downloader):
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
if nzb_filename and req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
else:
sab_data = self.call(req_params)
@@ -107,7 +112,7 @@ class Sabnzbd(Downloader):
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
'folder': ss(item['storage']),
})
return statuses
@@ -129,6 +134,22 @@ class Sabnzbd(Downloader):
return True
def processComplete(self, item, delete_files = False):
log.debug('Requesting SabNZBd to remove the NZB %s.', item['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '0',
'value': item['id']
}, use_json = False)
except:
log.error('Failed removing: %s', traceback.format_exc(0))
return False
return True
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {

View File

@@ -9,13 +9,15 @@ log = CPLog(__name__)
class Synology(Downloader):
type = ['nzb', 'torrent', 'torrent_magnet']
protocol = ['nzb', 'torrent', 'torrent_magnet']
log = CPLog(__name__)
def download(self, data, movie, filedata = None):
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
if not data: data = {}
response = False
log.error('Sending "%s" (%s) to Synology.', (data['name'], data['type']))
log.error('Sending "%s" (%s) to Synology.', (data['name'], data['protocol']))
# Load host from config and split out port.
host = self.conf('host').split(':')
@@ -26,42 +28,44 @@ class Synology(Downloader):
try:
# Send request to Synology
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
if data['type'] == 'torrent_magnet':
if data['protocol'] == 'torrent_magnet':
log.info('Adding torrent URL %s', data['url'])
response = srpc.create_task(url = data['url'])
elif data['type'] in ['nzb', 'torrent']:
log.info('Adding %s' % data['type'])
elif data['protocol'] in ['nzb', 'torrent']:
log.info('Adding %s' % data['protocol'])
if not filedata:
log.error('No %s data found' % data['type'])
log.error('No %s data found' % data['protocol'])
else:
filename = data['name'] + '.' + data['type']
filename = data['name'] + '.' + data['protocol']
response = srpc.create_task(filename = filename, filedata = filedata)
except Exception, err:
log.error('Exception while adding torrent: %s', err)
finally:
return response
def getEnabledDownloadType(self):
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':
return super(Synology, self).getEnabledDownloadType()
return super(Synology, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual, data = {}):
for_type = ['both']
if data and 'torrent' in data.get('type'):
for_type.append('torrent')
def isEnabled(self, manual = False, data = None):
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_type.append(data.get('type'))
for_protocol.append(data.get('protocol'))
return super(Synology, self).isEnabled(manual, data) and\
((self.conf('use_for') in for_type))
((self.conf('use_for') in for_protocol))
class SynologyRPC(object):
'''SynologyRPC lite library'''
"""SynologyRPC lite library"""
def __init__(self, host = 'localhost', port = 5000, username = None, password = None):
@@ -98,7 +102,7 @@ class SynologyRPC(object):
req = requests.post(url, data = args, files = files)
req.raise_for_status()
response = json.loads(req.text)
if response['success'] == True:
if response['success']:
log.info('Synology action successfull')
return response
except requests.ConnectionError, err:
@@ -111,11 +115,11 @@ class SynologyRPC(object):
return response
def create_task(self, url = None, filename = None, filedata = None):
''' Creates new download task in Synology DownloadStation. Either specify
""" Creates new download task in Synology DownloadStation. Either specify
url or pair (filename, filedata).
Returns True if task was created, False otherwise
'''
"""
result = False
# login
if self._login():

View File

@@ -25,6 +25,13 @@ config = [{
'default': 'localhost:9091',
'description': 'Hostname with port. Usually <strong>localhost:9091</strong>',
},
{
'name': 'rpc_url',
'type': 'string',
'default': 'transmission',
'advanced': True,
'description': 'Change if you don\'t run Transmission RPC at the default url.',
},
{
'name': 'username',
},
@@ -32,30 +39,33 @@ config = [{
'name': 'password',
'type': 'password',
},
{
'name': 'paused',
'type': 'bool',
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Transmission download directory.',
},
{
'name': 'ratio',
'default': 10,
'type': 'float',
'name': 'remove_complete',
'label': 'Remove torrent',
'default': True,
'advanced': True,
'description': 'Stop transfer when reaching ratio',
'type': 'bool',
'description': 'Remove the torrent from Transmission after it finished seeding.',
},
{
'name': 'ratiomode',
'default': 0,
'type': 'int',
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': '0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.',
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
@@ -64,6 +74,20 @@ config = [{
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'stalled_as_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Consider a stalled torrent as failed',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],

View File

@@ -1,6 +1,7 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.helpers.encoding import isInt, ss
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
@@ -8,7 +9,6 @@ import httplib
import json
import os.path
import re
import traceback
import urllib2
log = CPLog(__name__)
@@ -16,151 +16,140 @@ log = CPLog(__name__)
class Transmission(Downloader):
type = ['torrent', 'torrent_magnet']
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
trpc = None
def download(self, data, movie, filedata = None):
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type')))
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Set parameters for Transmission
params = {
'paused': self.conf('paused', default = 0),
}
if not self.trpc:
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url'), username = self.conf('username'), password = self.conf('password'))
if len(self.conf('directory', default = '')) > 0:
folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1]
params['download-dir'] = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
return self.trpc
torrent_params = {}
if self.conf('ratio'):
torrent_params = {
'seedRatioLimit': self.conf('ratio'),
'seedRatioMode': self.conf('ratiomode')
}
def download(self, data, movie, filedata = None):
if not filedata and data.get('type') == 'torrent':
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Send request to Transmission
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
if data.get('type') == 'torrent_magnet':
remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
# Set parameters for adding torrent
params = {
'paused': self.conf('paused', default = False)
}
if self.conf('directory'):
if os.path.isdir(self.conf('directory')):
params['download-dir'] = self.conf('directory')
else:
remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params)
log.error('Download directory from Transmission settings: %s doesn\'t exist', self.conf('directory'))
if not remote_torrent:
return False
# Change parameters of torrent
torrent_params = {}
if data.get('seed_ratio'):
torrent_params['seedRatioLimit'] = tryFloat(data.get('seed_ratio'))
torrent_params['seedRatioMode'] = 1
# Change settings of added torrents
elif torrent_params:
trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
if data.get('seed_time'):
torrent_params['seedIdleLimit'] = tryInt(data.get('seed_time')) * 60
torrent_params['seedIdleMode'] = 1
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
except:
log.error('Failed to change settings for transfer: %s', traceback.format_exc())
# Send request to Transmission
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
else:
remote_torrent = self.trpc.add_torrent_file(b64encode(filedata), arguments = params)
if not remote_torrent:
log.error('Failed sending torrent to Transmission')
return False
# Change settings of added torrents
if torrent_params:
self.trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
def getAllDownloadStatus(self):
log.debug('Checking Transmission download status.')
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
if not self.connect():
return False
# Go through Queue
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isFinished', 'downloadDir', 'uploadRatio']
}
queue = trpc.get_alltorrents(return_params)
except Exception, err:
log.error('Failed getting queue: %s', err)
return False
if not queue:
return []
statuses = StatusList(self)
# Get torrents status
# CouchPotato Status
#status = 'busy'
#status = 'failed'
#status = 'completed'
# Transmission Status
#status = 0 => "Torrent is stopped"
#status = 1 => "Queued to check files"
#status = 2 => "Checking files"
#status = 3 => "Queued to download"
#status = 4 => "Downloading"
#status = 4 => "Queued to seed"
#status = 6 => "Seeding"
#To do :
# add checking file
# manage no peer in a range time => fail
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit']
}
queue = self.trpc.get_alltorrents(return_params)
if not (queue and queue.get('torrents')):
log.debug('Nothing in queue or error')
return False
for item in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / confRatio=%s / isFinished=%s', (item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], self.conf('ratio'), item['isFinished']))
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / isFinished=%s',
(item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], item['isFinished']))
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if (item['percentDone'] * 100) >= 100 and (item['status'] == 6 or item['status'] == 0) and item['uploadRatio'] > self.conf('ratio'):
try:
trpc.stop_torrent(item['hashString'], {})
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'completed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': os.path.join(item['downloadDir'], item['name']),
})
except Exception, err:
log.error('Failed to stop and remove torrent "%s" with error: %s', (item['name'], err))
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'failed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
})
else:
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'busy',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = item['eta'])), # Is ETA in seconds??
})
status = 'busy'
if item['isStalled'] and self.conf('stalled_as_failed'):
status = 'failed'
elif item['status'] == 0 and item['percentDone'] == 1:
status = 'completed'
elif item['status'] in [5, 6]:
status = 'seeding'
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'seed_ratio': item['uploadRatio'],
'timeleft': str(timedelta(seconds = item['eta'])),
'folder': ss(os.path.join(item['downloadDir'], item['name'])),
})
return statuses
def pause(self, item, pause = True):
if pause:
return self.trpc.stop_torrent(item['id'])
else:
return self.trpc.start_torrent(item['id'])
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
return self.trpc.remove_torrent(item['hashString'], True)
def processComplete(self, item, delete_files = False):
log.debug('Requesting Transmission to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.trpc.remove_torrent(item['hashString'], delete_files)
class TransmissionRPC(object):
"""TransmissionRPC lite library"""
def __init__(self, host = 'localhost', port = 9091, username = None, password = None):
def __init__(self, host = 'localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
super(TransmissionRPC, self).__init__()
self.url = 'http://' + host + ':' + str(port) + '/transmission/rpc'
self.url = 'http://' + host + ':' + str(port) + '/' + rpc_url + '/rpc'
self.tag = 0
self.session_id = 0
self.session = {}
@@ -184,7 +173,7 @@ class TransmissionRPC(object):
log.debug('request: %s', json.dumps(ojson))
log.debug('response: %s', json.dumps(response))
if response['result'] == 'success':
log.debug('Transmission action successfull')
log.debug('Transmission action successful')
return response['arguments']
else:
log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
@@ -236,13 +225,15 @@ class TransmissionRPC(object):
post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag}
return self._request(post_data)
def stop_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-stop', 'tag': self.tag}
def stop_torrent(self, torrent_id):
post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-stop', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, remove_local_data, arguments):
arguments['ids'] = torrent_id
arguments['delete-local-data'] = remove_local_data
post_data = {'arguments': arguments, 'method': 'torrent-remove', 'tag': self.tag}
def start_torrent(self, torrent_id):
post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-start', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, delete_local_data):
post_data = {'arguments': {'ids': torrent_id, 'delete-local-data': delete_local_data}, 'method': 'torrent-remove', 'tag': self.tag}
return self._request(post_data)

View File

@@ -11,7 +11,7 @@ config = [{
'list': 'download_providers',
'name': 'utorrent',
'label': 'uTorrent',
'description': 'Use <a href="http://www.utorrent.com/" target="_blank">uTorrent</a> to download torrents.',
'description': 'Use <a href="http://www.utorrent.com/" target="_blank">uTorrent</a> (3.0+) to download torrents.',
'wizard': True,
'options': [
{
@@ -36,9 +36,26 @@ config = [{
'name': 'label',
'description': 'Label to add torrent as.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Remove the torrent from uTorrent after it finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
@@ -49,6 +66,13 @@ config = [{
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],

View File

@@ -1,7 +1,8 @@
from base64 import b16encode, b32decode
from bencode import bencode, bdecode
from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt, ss
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
@@ -9,123 +10,170 @@ from multipartpost import MultipartPostHandler
import cookielib
import httplib
import json
import os
import re
import stat
import time
import urllib
import urllib2
log = CPLog(__name__)
class uTorrent(Downloader):
type = ['torrent', 'torrent_magnet']
protocol = ['torrent', 'torrent_magnet']
utorrent_api = None
def download(self, data, movie, filedata = None):
log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('type')))
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return self.utorrent_api
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
if not data: data = {}
log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
settings = self.utorrent_api.get_settings()
if not settings:
return False
#Fix settings in case they are not set for CPS compatibility
new_settings = {}
if not (settings.get('seed_prio_limitul') == 0 and settings['seed_prio_limitul_flag']):
new_settings['seed_prio_limitul'] = 0
new_settings['seed_prio_limitul_flag'] = True
log.info('Updated uTorrent settings to set a torrent to complete after it the seeding requirements are met.')
if settings.get('bt.read_only_on_complete'): #This doesn't work as this option seems to be not available through the api. Mitigated with removeReadOnly function
new_settings['bt.read_only_on_complete'] = False
log.info('Updated uTorrent settings to not set the files to read only after completing.')
if new_settings:
self.utorrent_api.set_settings(new_settings)
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
if not filedata and data.get('type') == 'torrent':
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
if data.get('type') == 'torrent_magnet':
if data.get('protocol') == 'torrent_magnet':
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers)
else:
info = bdecode(filedata)["info"]
torrent_hash = sha1(bencode(info)).hexdigest().upper()
torrent_hash = sha1(benc(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, movie)
if data.get('seed_ratio'):
torrent_params['seed_override'] = 1
torrent_params['seed_ratio'] = tryInt(tryFloat(data['seed_ratio']) * 1000)
if data.get('seed_time'):
torrent_params['seed_override'] = 1
torrent_params['seed_time'] = tryInt(data['seed_time']) * 3600
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to uTorrent
try:
if not self.utorrent_api:
self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
if data.get('protocol') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(data.get('url'))
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata)
if data.get('type') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(data.get('url'))
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata)
# Change settings of added torrent
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
# Change settings of added torrents
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
return self.downloadReturnId(torrent_hash)
except Exception, err:
log.error('Failed to send torrent to uTorrent: %s', err)
return False
return self.downloadReturnId(torrent_hash)
def getAllDownloadStatus(self):
log.debug('Checking uTorrent download status.')
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
try:
self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
except Exception, err:
log.error('Failed to get uTorrent object: %s', err)
return False
data = ''
try:
data = self.utorrent_api.get_status()
queue = json.loads(data)
if queue.get('error'):
log.error('Error getting data from uTorrent: %s', queue.get('error'))
return False
except Exception, err:
log.error('Failed to get status from uTorrent: %s', err)
return False
if queue.get('torrents', []) == []:
log.debug('Nothing in queue')
if not self.connect():
return False
statuses = StatusList(self)
data = self.utorrent_api.get_status()
if not data:
log.error('Error getting data from uTorrent')
return False
queue = json.loads(data)
if queue.get('error'):
log.error('Error getting data from uTorrent: %s', queue.get('error'))
return False
if not queue.get('torrents'):
log.debug('Nothing in queue')
return False
# Get torrents
for item in queue.get('torrents', []):
for item in queue['torrents']:
# item[21] = Paused | Downloading | Seeding | Finished
status = 'busy'
if item[21] == 'Finished' or item[21] == 'Seeding':
if 'Finished' in item[21]:
status = 'completed'
self.removeReadOnly(item[26])
elif 'Seeding' in item[21]:
status = 'seeding'
self.removeReadOnly(item[26])
statuses.append({
'id': item[0],
'name': item[2],
'status': status,
'seed_ratio': float(item[7]) / 1000,
'original_status': item[1],
'timeleft': str(timedelta(seconds = item[10])),
'folder': item[26],
'folder': ss(item[26]),
})
return statuses
def pause(self, item, pause = True):
if not self.connect():
return False
return self.utorrent_api.pause_torrent(item['id'], pause)
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
if not self.connect():
return False
return self.utorrent_api.remove_torrent(item['id'], remove_data = True)
def processComplete(self, item, delete_files = False):
log.debug('Requesting uTorrent to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
return self.utorrent_api.remove_torrent(item['id'], remove_data = delete_files)
def removeReadOnly(self, folder):
#Removes all read-only flags in a folder
if folder and os.path.isdir(folder):
for root, folders, filenames in os.walk(folder):
for filename in filenames:
os.chmod(os.path.join(root, filename), stat.S_IWRITE)
class uTorrentAPI(object):
@@ -190,8 +238,22 @@ class uTorrentAPI(object):
action += "&s=%s&v=%s" % (k, v)
return self._request(action)
def pause_torrent(self, hash):
action = "action=pause&hash=%s" % hash
def pause_torrent(self, hash, pause = True):
if pause:
action = "action=pause&hash=%s" % hash
else:
action = "action=unpause&hash=%s" % hash
return self._request(action)
def stop_torrent(self, hash):
action = "action=stop&hash=%s" % hash
return self._request(action)
def remove_torrent(self, hash, remove_data = False):
if remove_data:
action = "action=removedata&hash=%s" % hash
else:
action = "action=remove&hash=%s" % hash
return self._request(action)
def get_status(self):
@@ -219,3 +281,13 @@ class uTorrentAPI(object):
log.error('Failed to get settings from uTorrent: %s', err)
return settings_dict
def set_settings(self, settings_dict = None):
if not settings_dict: settings_dict = {}
for key in settings_dict:
if isinstance(settings_dict[key], bool):
settings_dict[key] = 1 if settings_dict[key] else 0
action = 'action=setsetting' + ''.join(['&s=%s&v=%s' % (key, value) for (key, value) in settings_dict.items()])
return self._request(action)

View File

@@ -21,9 +21,11 @@ def addEvent(name, handler, priority = 100):
def createHandle(*args, **kwargs):
h = None
try:
# Open handler
has_parent = hasattr(handler, 'im_self')
parent = None
if has_parent:
parent = handler.im_self
bc = hasattr(parent, 'beforeCall')
@@ -33,7 +35,7 @@ def addEvent(name, handler, priority = 100):
h = runHandler(name, handler, *args, **kwargs)
# Close handler
if has_parent:
if parent and has_parent:
ac = hasattr(parent, 'afterCall')
if ac: parent.afterCall(handler)
except:
@@ -53,11 +55,6 @@ def removeEvent(name, handler):
def fireEvent(name, *args, **kwargs):
if not events.has_key(name): return
e = Event(name = name, threads = 10, asynch = kwargs.get('async', False), exc_info = True, traceback = True, lock = threading.RLock())
for event in events[name]:
e.handle(event['handler'], priority = event['priority'])
#log.debug('Firing event %s', name)
try:
@@ -67,7 +64,6 @@ def fireEvent(name, *args, **kwargs):
'single': False, # Return single handler
'merge': False, # Merge items
'in_order': False, # Fire them in specific order, waits for the other to finish
'async': False
}
# Do options
@@ -78,12 +74,32 @@ def fireEvent(name, *args, **kwargs):
options[x] = val
except: pass
# Make sure only 1 event is fired at a time when order is wanted
kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None
kwargs['event_return_on_result'] = options['single']
if len(events[name]) == 1:
# Fire
result = e(*args, **kwargs)
single = None
try:
single = events[name][0]['handler'](*args, **kwargs)
except:
log.error('Failed running single event: %s', traceback.format_exc())
# Don't load thread for single event
result = {
'single': (single is not None, single),
}
else:
e = Event(name = name, threads = 10, exc_info = True, traceback = True, lock = threading.RLock())
for event in events[name]:
e.handle(event['handler'], priority = event['priority'])
# Make sure only 1 event is fired at a time when order is wanted
kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None
kwargs['event_return_on_result'] = options['single']
# Fire
result = e(*args, **kwargs)
if options['single'] and not options['merge']:
results = None

View File

@@ -11,7 +11,8 @@ log = CPLog(__name__)
def toSafeString(original):
valid_chars = "-_.() %s%s" % (ascii_letters, digits)
cleanedFilename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore')
return ''.join(c for c in cleanedFilename if c in valid_chars)
valid_string = ''.join(c for c in cleanedFilename if c in valid_chars)
return ' '.join(valid_string.split())
def simplifyString(original):
string = stripAccents(original.lower())
@@ -62,7 +63,7 @@ def stripAccents(s):
def tryUrlencode(s):
new = u''
if isinstance(s, (dict)):
if isinstance(s, dict):
for key, value in s.iteritems():
new += u'&%s=%s' % (key, tryUrlencode(value))

View File

@@ -8,7 +8,7 @@ def getParams(params):
reg = re.compile('^[a-z0-9_\.]+$')
current = temp = {}
temp = {}
for param, value in sorted(params.iteritems()):
nest = re.split("([\[\]]+)", param)

View File

@@ -6,7 +6,7 @@ log = CPLog(__name__)
class RSS(object):
def getTextElements(self, xml, path):
''' Find elements and return tree'''
""" Find elements and return tree"""
textelements = []
try:
@@ -28,7 +28,7 @@ class RSS(object):
return elements
def getElement(self, xml, path):
''' Find element and return text'''
""" Find element and return text"""
try:
return xml.find(path)
@@ -36,7 +36,7 @@ class RSS(object):
return
def getTextElement(self, xml, path):
''' Find element and return text'''
""" Find element and return text"""
try:
return xml.find(path).text

View File

@@ -106,6 +106,11 @@ def md5(text):
def sha1(text):
return hashlib.sha1(text).hexdigest()
def isLocalIP(ip):
ip = ip.lstrip('htps:/')
regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
def getExt(filename):
return os.path.splitext(filename)[1][1:]
@@ -113,8 +118,8 @@ def cleanHost(host):
if not host.startswith(('http://', 'https://')):
host = 'http://' + host
if not host.endswith('/'):
host += '/'
host = host.rstrip('/')
host += '/'
return host
@@ -128,7 +133,7 @@ def getImdb(txt, check_inside = True, multiple = False):
try:
ids = re.findall('(tt\d{7})', txt)
if multiple:
return ids if len(ids) > 0 else []
return list(set(ids)) if len(ids) > 0 else []
return ids[0]
except IndexError:
pass
@@ -140,7 +145,11 @@ def tryInt(s):
except: return 0
def tryFloat(s):
try: return float(s) if '.' in s else tryInt(s)
try:
if isinstance(s, str):
return float(s) if '.' in s else tryInt(s)
else:
return float(s)
except: return 0
def natsortKey(s):
@@ -170,11 +179,15 @@ def getTitle(library_dict):
def possibleTitles(raw_title):
titles = []
titles = [
toSafeString(raw_title).lower(),
raw_title.lower(),
simplifyString(raw_title)
]
titles.append(toSafeString(raw_title).lower())
titles.append(raw_title.lower())
titles.append(simplifyString(raw_title))
# replace some chars
new_title = raw_title.replace('&', 'and')
titles.append(simplifyString(new_title))
return list(set(titles))

View File

@@ -6,15 +6,24 @@ import traceback
log = CPLog(__name__)
class Loader(object):
class Loader(object):
plugins = {}
providers = {}
modules = {}
def preload(self, root = ''):
def addPath(self, root, base_path, priority, recursive = False):
for filename in os.listdir(os.path.join(root, *base_path)):
path = os.path.join(os.path.join(root, *base_path), filename)
if os.path.isdir(path) and filename[:2] != '__':
if u'__init__.py' in os.listdir(path):
new_base_path = ''.join(s + '.' for s in base_path) + filename
self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path)
if recursive:
self.addPath(root, base_path + [filename], priority, recursive = True)
def preload(self, root = ''):
core = os.path.join(root, 'couchpotato', 'core')
self.paths = {
@@ -25,12 +34,10 @@ class Loader(object):
}
# Add providers to loader
provider_dir = os.path.join(root, 'couchpotato', 'core', 'providers')
for provider in os.listdir(provider_dir):
path = os.path.join(provider_dir, provider)
if os.path.isdir(path):
self.paths[provider + '_provider'] = (25, 'couchpotato.core.providers.' + provider, path)
self.addPath(root, ['couchpotato', 'core', 'providers'], 25, recursive = False)
# Add media to loader
self.addPath(root, ['couchpotato', 'core', 'media'], 25, recursive = True)
for plugin_type, plugin_tuple in self.paths.iteritems():
priority, module, dir_name = plugin_tuple
@@ -43,7 +50,13 @@ class Loader(object):
for module_name, plugin in sorted(self.modules[priority].iteritems()):
# Load module
try:
m = getattr(self.loadModule(module_name), plugin.get('name'))
if plugin.get('name')[:2] == '__':
continue
m = self.loadModule(module_name)
if m is None:
continue
m = getattr(m, plugin.get('name'))
log.info('Loading %s: %s', (plugin['type'], plugin['name']))
@@ -53,7 +66,7 @@ class Loader(object):
self.loadPlugins(m, plugin.get('name'))
except ImportError as e:
# todo:: subclass ImportError for missing requirements.
if (e.message.lower().startswith("missing")):
if e.message.lower().startswith("missing"):
log.error(e.message)
pass
# todo:: this needs to be more descriptive.
@@ -73,19 +86,21 @@ class Loader(object):
splitted = module.split('.')
for sub in splitted[1:]:
m = getattr(m, sub)
if hasattr(m, 'config'):
fireEvent('settings.options', splitted[-1] + '_config', getattr(m, 'config'))
except:
raise
for cur_file in glob.glob(os.path.join(dir_name, '*')):
name = os.path.basename(cur_file)
if os.path.isdir(os.path.join(dir_name, name)):
if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(cur_file, '__init__.py')):
module_name = '%s.%s' % (module, name)
self.addModule(priority, plugin_type, module_name, name)
def loadSettings(self, module, name, save = True):
if not hasattr(module, 'config'):
log.debug('Skip loading settings for plugin %s as it has no config section' % module.__file__)
return False
try:
for section in module.config:
fireEvent('settings.options', section['name'], section)
@@ -100,15 +115,14 @@ class Loader(object):
return False
def loadPlugins(self, module, name):
if not hasattr(module, 'start'):
log.debug('Skip startup for plugin %s as it has no start section' % module.__file__)
return False
try:
klass = module.start()
klass.registerPlugin()
if klass and getattr(klass, 'auto_register_static'):
klass.registerStatic(module.__file__)
module.start()
return True
except Exception, e:
except:
log.error('Failed loading plugin "%s": %s', (module.__file__, traceback.format_exc()))
return False
@@ -131,5 +145,8 @@ class Loader(object):
for sub in splitted[1:-1]:
m = getattr(m, sub)
return m
except ImportError:
log.debug('Skip loading module plugin %s: %s', (name, traceback.format_exc()))
return None
except:
raise

View File

@@ -0,0 +1,13 @@
from couchpotato.core.event import addEvent
from couchpotato.core.plugins.base import Plugin
class MediaBase(Plugin):
_type = None
def initType(self):
addEvent('media.types', self.getType)
def getType(self):
return self._type

View File

@@ -0,0 +1,13 @@
from couchpotato.core.event import addEvent
from couchpotato.core.plugins.base import Plugin
class LibraryBase(Plugin):
_type = None
def initType(self):
addEvent('library.types', self.getType)
def getType(self):
return self._type

View File

@@ -0,0 +1,75 @@
from .main import Searcher
def start():
return Searcher()
config = [{
'name': 'searcher',
'order': 20,
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'label': 'Basics',
'description': 'General search options',
'options': [
{
'name': 'preferred_method',
'label': 'First search',
'description': 'Which of the methods do you prefer',
'default': 'both',
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrents', 'torrent')],
},
],
}, {
'tab': 'searcher',
'subtab': 'category',
'subtab_label': 'Categories',
'name': 'filter',
'label': 'Global filters',
'description': 'Prefer, ignore & required words in release names',
'options': [
{
'name': 'preferred_words',
'label': 'Preferred',
'default': '',
'placeholder': 'Example: CtrlHD, Amiable, Wiki',
'description': 'Words that give the releases a higher score.'
},
{
'name': 'required_words',
'label': 'Required',
'default': '',
'placeholder': 'Example: DTS, AC3 & English',
'description': 'Release should contain at least one set of words. Sets are separated by "," and each word within a set must be separated with "&"'
},
{
'name': 'ignored_words',
'label': 'Ignored',
'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs',
'description': 'Ignores releases that match any of these sets. (Works like explained above)'
},
],
},
],
}, {
'name': 'nzb',
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'label': 'NZB',
'wizard': True,
'options': [
{
'name': 'retention',
'label': 'Usenet Retention',
'default': 1500,
'type': 'int',
'unit': 'days'
},
],
},
],
}]

View File

@@ -0,0 +1,45 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class SearcherBase(Plugin):
in_progress = False
def __init__(self):
super(SearcherBase, self).__init__()
addEvent('searcher.progress', self.getProgress)
addEvent('%s.searcher.progress' % self.getType(), self.getProgress)
self.initCron()
def initCron(self):
""" Set the searcher cronjob
Make sure to reset cronjob after setting has changed
"""
_type = self.getType()
def setCrons():
fireEvent('schedule.cron', '%s.searcher.all' % _type, self.searchAll,
day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
addEvent('app.load', setCrons)
addEvent('setting.save.%s_searcher.cron_day.after' % _type, setCrons)
addEvent('setting.save.%s_searcher.cron_hour.after' % _type, setCrons)
addEvent('setting.save.%s_searcher.cron_minute.after' % _type, setCrons)
def getProgress(self, **kwargs):
""" Return progress of current searcher"""
progress = {
self.getType(): self.in_progress
}
return progress

View File

@@ -0,0 +1,234 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.variable import md5, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
import datetime
import re
import time
import traceback
log = CPLog(__name__)
class Searcher(SearcherBase):
def __init__(self):
addEvent('searcher.protocols', self.getSearchProtocols)
addEvent('searcher.contains_other_quality', self.containsOtherQuality)
addEvent('searcher.correct_year', self.correctYear)
addEvent('searcher.correct_name', self.correctName)
addEvent('searcher.download', self.download)
addApiView('searcher.full_search', self.searchAllView, docs = {
'desc': 'Starts a full search for all media',
})
addApiView('searcher.progress', self.getProgressForAll, docs = {
'desc': 'Get the progress of all media searches',
'return': {'type': 'object', 'example': """{
'movie': False || object, total & to_go,
'show': False || object, total & to_go,
}"""},
})
def searchAllView(self):
results = {}
for _type in fireEvent('media.types'):
results[_type] = fireEvent('%s.searcher.all_view' % _type)
return results
def getProgressForAll(self):
progress = fireEvent('searcher.progress', merge = True)
return progress
def download(self, data, movie, manual = False):
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if downloader_enabled:
snatched_status = fireEvent('status.get', 'snatched', single = True)
# Download movie to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
download_result = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
done_status = fireEvent('status.get', 'done', single = True)
rls.status_id = done_status.get('id') if not renamer_enabled else snatched_status.get('id')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark movie done
if not renamer_enabled:
active_status = fireEvent('status.get', 'active', single = True)
done_status = fireEvent('status.get', 'done', single = True)
try:
if movie['status_id'] == active_status.get('id'):
for profile_type in movie['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking movie as finished: %s', log_movie)
# Mark release done
rls.status_id = done_status.get('id')
rls.last_edit = int(time.time())
db.commit()
# Mark movie done
mvie = db.query(Movie).filter_by(id = movie['id']).first()
mvie.status_id = done_status.get('id')
mvie.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())
except:
log.error('Failed marking movie finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol', '')))
return False
def getSearchProtocols(self):
download_protocols = fireEvent('download.enabled_protocols', merge = True)
provider_protocols = fireEvent('provider.enabled_protocols', merge = True)
if download_protocols and len(list(set(provider_protocols) & set(download_protocols))) == 0:
log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_protocols))
return []
for useless_provider in list(set(provider_protocols) - set(download_protocols)):
log.debug('Provider for "%s" enabled, but no downloader.', useless_provider)
search_protocols = download_protocols
if len(search_protocols) == 0:
log.error('There aren\'t any downloaders enabled. Please pick one in settings.')
return []
return search_protocols
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None):
if not preferred_quality: preferred_quality = {}
name = nzb['name']
size = nzb.get('size', 0)
nzb_words = re.split('\W+', simplifyString(name))
qualities = fireEvent('quality.all', single = True)
found = {}
for quality in qualities:
# Main in words
if quality['identifier'] in nzb_words:
found[quality['identifier']] = True
# Alt in words
if list(set(nzb_words) & set(quality['alternative'])):
found[quality['identifier']] = True
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
if guess:
found[guess['identifier']] = True
# Hack for older movies that don't contain quality tag
year_name = fireEvent('scanner.name_year', name, single = True)
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
if size > 3000: # Assume dvdr
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size)
found['dvdr'] = True
else: # Assume dvdrip
log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', size)
found['dvdrip'] = True
# Allow other qualities
for allowed in preferred_quality.get('allow'):
if found.get(allowed):
del found[allowed]
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def correctYear(self, haystack, year, year_range):
if not isinstance(haystack, (list, tuple, set)):
haystack = [haystack]
year_name = {}
for string in haystack:
year_name = fireEvent('scanner.name_year', string, single = True)
if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)):
log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year))
return True
log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year))
return False
def correctName(self, check_name, movie_name):
check_names = [check_name]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(check_name.split('['), key = len))
except: pass
for check_name in list(set(check_names)):
check_movie = fireEvent('scanner.name_year', check_name, single = True)
try:
check_words = filter(None, re.split('\W+', check_movie.get('name', '')))
movie_words = filter(None, re.split('\W+', simplifyString(movie_name)))
if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0:
return True
except:
pass
return False
class SearchSetupError(Exception):
pass

View File

@@ -0,0 +1,6 @@
from couchpotato.core.media import MediaBase
class MovieTypeBase(MediaBase):
_type = 'movie'

View File

@@ -0,0 +1,6 @@
from .main import MovieBase
def start():
return MovieBase()
config = []

View File

@@ -2,9 +2,10 @@ from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.helpers.variable import getImdb, splitString
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt, \
mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Library, LibraryTitle, Movie, \
Release
from couchpotato.environment import Env
@@ -16,17 +17,23 @@ import time
log = CPLog(__name__)
class MoviePlugin(Plugin):
class MovieBase(MovieTypeBase):
default_dict = {
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}, 'files':{}, 'info': {}},
'library': {'titles': {}, 'files':{}},
'files': {},
'status': {}
'status': {},
'category': {},
}
def __init__(self):
# Initialize this type
super(MovieBase, self).__init__()
self.initType()
addApiView('movie.search', self.search, docs = {
'desc': 'Search the movie providers for a movie',
'params': {
@@ -139,7 +146,7 @@ class MoviePlugin(Plugin):
imdb_id = getImdb(str(movie_id))
if(imdb_id):
if imdb_id:
m = db.query(Movie).filter(Movie.library.has(identifier = imdb_id)).first()
else:
m = db.query(Movie).filter_by(id = movie_id).first()
@@ -161,19 +168,33 @@ class MoviePlugin(Plugin):
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
# query movie ids
q = db.query(Movie) \
.outerjoin(Movie.releases, Movie.library, Library.titles) \
.filter(LibraryTitle.default == True) \
.with_entities(Movie.id) \
.group_by(Movie.id)
# Filter on movie status
if status and len(status) > 0:
q = q.filter(or_(*[Movie.status.has(identifier = s) for s in status]))
statuses = fireEvent('status.get', status, single = len(status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Movie.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.filter(or_(*[Release.status.has(identifier = s) for s in release_status]))
q = q.join(Movie.releases)
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Release.status_id.in_(statuses))
# Only join when searching / ordering
if starts_with or search or order != 'release_order':
q = q.join(Movie.library, Library.titles) \
.filter(LibraryTitle.default == True)
# Add search filters
filter_or = []
if starts_with:
starts_with = toUnicode(starts_with.lower())
@@ -188,47 +209,79 @@ class MoviePlugin(Plugin):
if search:
filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%'))
if filter_or:
if len(filter_or) > 0:
q = q.filter(or_(*filter_or))
total_count = q.count()
if total_count == 0:
return 0, []
if order == 'release_order':
q = q.order_by(desc(Release.last_edit))
else:
q = q.order_by(asc(LibraryTitle.simple_title))
q = q.subquery()
q2 = db.query(Movie).join((q, q.c.id == Movie.id)) \
.options(joinedload_all('releases')) \
.options(joinedload_all('profile.types')) \
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
q = q.limit(limit).offset(offset)
# Get all movie_ids in sorted order
movie_ids = [m.id for m in q.all()]
# List release statuses
releases = db.query(Release) \
.filter(Release.movie_id.in_(movie_ids)) \
.all()
release_statuses = dict((m, set()) for m in movie_ids)
releases_count = dict((m, 0) for m in movie_ids)
for release in releases:
release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.movie_id] += 1
# Get main movie data
q2 = db.query(Movie) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
q2 = q2.limit(limit).offset(offset)
q2 = q2.filter(Movie.id.in_(movie_ids))
results = q2.all()
movies = []
# Create dict by movie id
movie_dict = {}
for movie in results:
movies.append(movie.to_dict({
'profile': {'types': {}},
'releases': {'files':{}, 'info': {}},
movie_dict[movie.id] = movie
# List movies based on movie_ids order
movies = []
for movie_id in movie_ids:
releases = []
for r in release_statuses.get(movie_id):
x = splitString(r)
releases.append({'status_id': x[0], 'quality_id': x[1]})
# Merge releases with movie dict
movies.append(mergeDicts(movie_dict[movie_id].to_dict({
'library': {'titles': {}, 'files':{}},
'files': {},
}), {
'releases': releases,
'releases_count': releases_count.get(movie_id),
}))
db.expire_all()
return (total_count, movies)
return total_count, movies
def availableChars(self, status = None, release_status = None):
chars = ''
status = status or []
release_status = release_status or []
db = get_session()
@@ -238,37 +291,53 @@ class MoviePlugin(Plugin):
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
q = db.query(Movie) \
.outerjoin(Movie.releases, Movie.library, Library.titles, Movie.status) \
.options(joinedload_all('library.titles'))
q = db.query(Movie)
# Filter on movie status
if status and len(status) > 0:
q = q.filter(or_(*[Movie.status.has(identifier = s) for s in status]))
statuses = fireEvent('status.get', status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Movie.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.filter(or_(*[Release.status.has(identifier = s) for s in release_status]))
results = q.all()
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
for movie in results:
char = movie.library.titles[0].simple_title[0]
char = char if char in ascii_lowercase else '#'
if char not in chars:
chars += str(char)
q = q.join(Movie.releases) \
.filter(Release.status_id.in_(statuses))
q = q.join(Library, LibraryTitle) \
.with_entities(LibraryTitle.simple_title) \
.filter(LibraryTitle.default == True)
titles = q.all()
chars = set()
for title in titles:
try:
char = title[0][0]
char = char if char in ascii_lowercase else '#'
chars.add(str(char))
except:
log.error('Failed getting title for %s', title.libraries_id)
if len(chars) == 25:
break
db.expire_all()
return ''.join(sorted(chars, key = str.lower))
return ''.join(sorted(chars))
def listView(self, **kwargs):
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
limit_offset = kwargs.get('limit_offset', None)
starts_with = kwargs.get('starts_with', None)
search = kwargs.get('search', None)
order = kwargs.get('order', None)
status = splitString(kwargs.get('status'))
release_status = splitString(kwargs.get('release_status'))
limit_offset = kwargs.get('limit_offset')
starts_with = kwargs.get('starts_with')
search = kwargs.get('search')
order = kwargs.get('order')
total_movies, movies = self.list(
status = status,
@@ -313,7 +382,7 @@ class MoviePlugin(Plugin):
if title.default: default_title = title.title
fireEvent('notify.frontend', type = 'movie.busy.%s' % x, data = True)
fireEventAsync('library.update', identifier = movie.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
fireEventAsync('library.update.movie', identifier = movie.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
db.expire_all()
return {
@@ -339,7 +408,8 @@ class MoviePlugin(Plugin):
'movies': movies,
}
def add(self, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None):
def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None):
if not params: params = {}
if not params.get('identifier'):
msg = 'Can\'t add movie without imdb identifier.'
@@ -358,23 +428,26 @@ class MoviePlugin(Plugin):
pass
library = fireEvent('library.add', single = True, attrs = params, update_after = update_library)
library = fireEvent('library.add.movie', single = True, attrs = params, update_after = update_library)
# Status
status_active, snatched_status, ignored_status, done_status, downloaded_status = \
fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True)
default_profile = fireEvent('profile.default', single = True)
cat_id = params.get('category_id')
db = get_session()
m = db.query(Movie).filter_by(library_id = library.get('id')).first()
added = True
do_search = False
search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
if not m:
m = Movie(
library_id = library.get('id'),
profile_id = params.get('profile_id', default_profile.get('id')),
status_id = status_id if status_id else status_active.get('id'),
category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None,
)
db.add(m)
db.commit()
@@ -383,7 +456,7 @@ class MoviePlugin(Plugin):
if search_after:
onComplete = self.createOnComplete(m.id)
fireEventAsync('library.update', params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete)
fireEventAsync('library.update.movie', params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete)
search_after = False
elif force_readd:
@@ -396,6 +469,7 @@ class MoviePlugin(Plugin):
fireEvent('release.delete', release.id, single = True)
m.profile_id = params.get('profile_id', default_profile.get('id'))
m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None
else:
log.debug('Movie already exists, not updating: %s', params)
added = False
@@ -452,6 +526,10 @@ class MoviePlugin(Plugin):
m.profile_id = kwargs.get('profile_id')
cat_id = kwargs.get('category_id')
if cat_id is not None:
m.category_id = tryInt(cat_id) if tryInt(cat_id) > 0 else None
# Remove releases
for rel in m.releases:
if rel.status_id is available_status.get('id'):
@@ -468,7 +546,7 @@ class MoviePlugin(Plugin):
fireEvent('movie.restatus', m.id)
movie_dict = m.to_dict(self.default_dict)
fireEventAsync('searcher.single', movie_dict, on_complete = self.createNotifyFront(movie_id))
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(movie_id))
db.expire_all()
return {
@@ -503,7 +581,7 @@ class MoviePlugin(Plugin):
total_deleted = 0
new_movie_status = None
for release in movie.releases:
if delete_from in ['wanted', 'snatched']:
if delete_from in ['wanted', 'snatched', 'late']:
if release.status_id != done_status.get('id'):
db.delete(release)
total_deleted += 1
@@ -544,7 +622,7 @@ class MoviePlugin(Plugin):
log.debug('Can\'t restatus movie, doesn\'t seem to exist.')
return False
log.debug('Changing status for %s', (m.library.titles[0].title))
log.debug('Changing status for %s', m.library.titles[0].title)
if not m.profile:
m.status_id = done_status.get('id')
else:
@@ -566,7 +644,7 @@ class MoviePlugin(Plugin):
def onComplete():
db = get_session()
movie = db.query(Movie).filter_by(id = movie_id).first()
fireEventAsync('searcher.single', movie.to_dict(self.default_dict), on_complete = self.createNotifyFront(movie_id))
fireEventAsync('movie.searcher.single', movie.to_dict(self.default_dict), on_complete = self.createNotifyFront(movie_id))
db.expire_all()
return onComplete

View File

@@ -273,8 +273,25 @@ var MovieList = new Class({
})
).addClass('search');
var available_chars;
self.filter_menu.addEvent('open', function(){
self.navigation_search_input.focus();
// Get available chars and highlight
if(!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible()))
Api.request('movie.available_chars', {
'data': Object.merge({
'status': self.options.status
}, self.filter),
'onSuccess': function(json){
available_chars = json.chars
json.chars.split('').each(function(c){
self.letters[c.capitalize()].addClass('available')
})
}
});
});
self.filter_menu.addLink(
@@ -311,21 +328,6 @@ var MovieList = new Class({
}).inject(self.navigation_alpha);
});
// Get available chars and highlight
if(self.navigation.isDisplayed() || self.navigation.isVisible())
Api.request('movie.available_chars', {
'data': Object.merge({
'status': self.options.status
}, self.filter),
'onSuccess': function(json){
json.chars.split('').each(function(c){
self.letters[c.capitalize()].addClass('available')
})
}
});
// Add menu or hide
if (self.options.menu.length > 0)
self.options.menu.each(function(menu_item){
@@ -566,7 +568,7 @@ var MovieList = new Class({
}
self.store(json.movies);
self.addMovies(json.movies, json.total);
self.addMovies(json.movies, json.total || json.movies.length);
if(self.scrollspy) {
self.load_more.set('text', 'load more movies');
self.scrollspy.start();

View File

@@ -1,5 +1,5 @@
var MovieAction = new Class({
Implements: [Options],
class_name: 'action icon2',
@@ -124,6 +124,46 @@ MA.Release = new Class({
else
self.showHelper();
App.addEvent('movie.searcher.ended.'+self.movie.data.id, function(notification){
self.releases = null;
if(self.options_container){
self.options_container.destroy();
self.options_container = null;
}
});
},
show: function(e){
var self = this;
if(e)
(e).preventDefault();
if(self.releases)
self.createReleases();
else {
self.movie.busy(true);
Api.request('release.for_movie', {
'data': {
'id': self.movie.data.id
},
'onComplete': function(json){
self.movie.busy(false, 1);
if(json && json.releases){
self.releases = json.releases;
self.createReleases();
}
else
alert('Something went wrong, check the logs.');
}
});
}
},
createReleases: function(){
@@ -145,7 +185,7 @@ MA.Release = new Class({
new Element('span.provider', {'text': 'Provider'})
).inject(self.release_container)
self.movie.data.releases.sortBy('-info.score').each(function(release){
self.releases.each(function(release){
var status = Status.get(release.status_id),
quality = Quality.getProfile(release.quality_id) || {},
@@ -211,13 +251,11 @@ MA.Release = new Class({
}
});
if(self.last_release){
if(self.last_release)
self.release_container.getElement('#release_'+self.last_release.id).addClass('last_release');
}
if(self.next_release){
if(self.next_release)
self.release_container.getElement('#release_'+self.next_release.id).addClass('next_release');
}
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
@@ -230,7 +268,9 @@ MA.Release = new Class({
self.last_release ? new Element('a.button.orange', {
'text': 'the same release again',
'events': {
'click': self.trySameRelease.bind(self)
'click': function(){
self.download(self.last_release);
}
}
}) : null,
self.next_release && self.last_release ? new Element('span.or', {
@@ -239,7 +279,9 @@ MA.Release = new Class({
self.next_release ? [new Element('a.button.green', {
'text': self.last_release ? 'another release' : 'the best release',
'events': {
'click': self.tryNextRelease.bind(self)
'click': function(){
self.download(self.next_release);
}
}
}),
new Element('span.or', {
@@ -248,18 +290,15 @@ MA.Release = new Class({
)
}
self.last_release = null;
self.next_release = null;
}
},
show: function(e){
var self = this;
if(e)
(e).preventDefault();
self.createReleases();
// Show it
self.options_container.inject(self.movie, 'top');
self.movie.slide('in', self.options_container);
},
showHelper: function(e){
@@ -267,15 +306,29 @@ MA.Release = new Class({
if(e)
(e).preventDefault();
self.createReleases();
var has_available = false,
has_snatched = false;
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
self.movie.data.releases.each(function(release){
if(has_available && has_snatched) return;
var status = Status.get(release.status_id);
if(['snatched', 'downloaded', 'seeding'].contains(status.identifier))
has_snatched = true;
if(['available'].contains(status.identifier))
has_available = true;
});
if(has_available || has_snatched){
self.trynext_container = new Element('div.buttons.trynext').inject(self.movie.info_container);
self.trynext_container.adopt(
self.next_release ? [new Element('a.icon2.readd', {
'text': self.last_release ? 'Download another release' : 'Download the best release',
has_available ? [new Element('a.icon2.readd', {
'text': has_snatched ? 'Download another release' : 'Download the best release',
'events': {
'click': self.tryNextRelease.bind(self)
}
@@ -291,24 +344,7 @@ MA.Release = new Class({
new Element('a.icon2.completed', {
'text': 'mark this movie done',
'events': {
'click': function(){
Api.request('movie.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': 'wanted'
},
'onComplete': function(){
var movie = $(self.movie);
movie.set('tween', {
'duration': 300,
'onComplete': function(){
self.movie.destroy()
}
});
movie.tween('height', 0);
}
});
}
'click': self.markMovieDone.bind(self)
}
})
)
@@ -326,14 +362,14 @@ MA.Release = new Class({
var release_el = self.release_container.getElement('#release_'+release.id),
icon = release_el.getElement('.download.icon2');
self.movie.busy(true);
icon.addClass('icon spinner').removeClass('download');
Api.request('release.download', {
'data': {
'id': release.id
},
'onComplete': function(json){
self.movie.busy(false);
icon.removeClass('icon spinner');
if(json.success)
icon.addClass('completed');
@@ -365,24 +401,36 @@ MA.Release = new Class({
},
tryNextRelease: function(movie_id){
markMovieDone: function(){
var self = this;
self.createReleases();
if(self.last_release)
self.ignore(self.last_release);
if(self.next_release)
self.download(self.next_release);
Api.request('movie.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': 'wanted'
},
'onComplete': function(){
var movie = $(self.movie);
movie.set('tween', {
'duration': 300,
'onComplete': function(){
self.movie.destroy()
}
});
movie.tween('height', 0);
}
});
},
trySameRelease: function(movie_id){
tryNextRelease: function(movie_id){
var self = this;
if(self.last_release)
self.download(self.last_release);
Api.request('movie.searcher.try_next', {
'data': {
'id': self.movie.get('id')
}
});
}
@@ -408,7 +456,7 @@ MA.Trailer = new Class({
watch: function(offset){
var self = this;
var data_url = 'http://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18'
var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18'
var url = data_url.substitute({
'title': encodeURI(self.getTitle()),
'year': self.get('year'),
@@ -521,6 +569,11 @@ MA.Edit = new Class({
self.profile_select = new Element('select', {
'name': 'profile'
}),
self.category_select = new Element('select', {
'name': 'category'
}).grab(
new Element('option', {'value': -1, 'text': 'None'})
),
new Element('a.button.edit', {
'text': 'Save & Search',
'events': {
@@ -540,7 +593,34 @@ MA.Edit = new Class({
});
Quality.getActiveProfiles().each(function(profile){
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
self.category_select.hide();
else {
self.category_select.show();
categories.each(function(category){
var category_id = category.data.id;
new Element('option', {
'value': category_id,
'text': category.data.label
}).inject(self.category_select);
if(self.movie.category && self.movie.category.data && self.movie.category.data.id == category_id)
self.category_select.set('value', category_id);
});
}
// Fill profiles
var profiles = Quality.getActiveProfiles();
if(profiles.length == 1)
self.profile_select.hide();
profiles.each(function(profile){
var profile_id = profile.id ? profile.id : profile.data.id;
@@ -549,8 +629,9 @@ MA.Edit = new Class({
'text': profile.label ? profile.label : profile.data.label
}).inject(self.profile_select);
if(self.movie.profile && self.movie.profile.data && self.movie.profile.data.id == profile_id)
if(self.movie.get('profile_id') == profile_id)
self.profile_select.set('value', profile_id);
});
}
@@ -566,7 +647,8 @@ MA.Edit = new Class({
'data': {
'id': self.movie.get('id'),
'default_title': self.title_select.get('value'),
'profile_id': self.profile_select.get('value')
'profile_id': self.profile_select.get('value'),
'category_id': self.category_select.get('value')
},
'useSpinner': true,
'spinnerTarget': $(self.movie),
@@ -697,6 +779,7 @@ MA.Delete = new Class({
var self = this;
(e).preventDefault();
self.movie.removeView();
self.movie.slide('out');
},
@@ -745,16 +828,45 @@ MA.Files = new Class({
self.el = new Element('a.directory', {
'title': 'Available files',
'events': {
'click': self.showFiles.bind(self)
'click': self.show.bind(self)
}
});
},
showFiles: function(e){
show: function(e){
var self = this;
(e).preventDefault();
if(self.releases)
self.showFiles();
else {
self.movie.busy(true);
Api.request('release.for_movie', {
'data': {
'id': self.movie.data.id
},
'onComplete': function(json){
self.movie.busy(false, 1);
if(json && json.releases){
self.releases = json.releases;
self.showFiles();
}
else
alert('Something went wrong, check the logs.');
}
});
}
},
showFiles: function(){
var self = this;
if(!self.options_container){
self.options_container = new Element('div.options').adopt(
self.files_container = new Element('div.files.table')
@@ -767,7 +879,7 @@ MA.Files = new Class({
new Element('span.is_available', {'text': 'Available'})
).inject(self.files_container)
Array.each(self.movie.data.releases, function(release){
Array.each(self.releases, function(release){
var rel = new Element('div.release').inject(self.files_container);

View File

@@ -425,7 +425,9 @@
}
.movies .data .quality .available { background-color: #578bc3; }
.movies .data .quality .snatched { background-color: #369545; }
.movies .data .quality .failed { background-color: #a43d34; }
.movies .data .quality .snatched { background-color: #a2a232; }
.movies .data .quality .seeding { background-color: #0a6819; }
.movies .data .quality .done {
background-color: #369545;
opacity: 1;
@@ -639,6 +641,12 @@
position: absolute;
z-index: 10;
}
@media only screen and (device-width: 768px) {
.trailer_container iframe {
margin-top: 25px;
}
}
.trailer_container.hide {
height: 0 !important;
}

View File

@@ -14,6 +14,7 @@ var Movie = new Class({
self.el = new Element('div.movie');
self.profile = Quality.getProfile(data.profile_id) || {};
self.category = CategoryList.getCategory(data.category_id) || {};
self.parent(self, options);
self.addEvents();
@@ -28,14 +29,14 @@ var Movie = new Class({
self.update.delay(2000, self, notification);
});
['movie.busy', 'searcher.started'].each(function(listener){
['movie.busy', 'movie.searcher.started'].each(function(listener){
App.addEvent(listener+'.'+self.data.id, function(notification){
if(notification.data)
self.busy(true)
});
})
App.addEvent('searcher.ended.'+self.data.id, function(notification){
App.addEvent('movie.searcher.ended.'+self.data.id, function(notification){
if(notification.data)
self.busy(false)
});
@@ -52,12 +53,12 @@ var Movie = new Class({
// Remove events
App.removeEvents('movie.update.'+self.data.id);
['movie.busy', 'searcher.started'].each(function(listener){
['movie.busy', 'movie.searcher.started'].each(function(listener){
App.removeEvents(listener+'.'+self.data.id);
})
},
busy: function(set_busy){
busy: function(set_busy, timeout){
var self = this;
if(!set_busy){
@@ -71,9 +72,9 @@ var Movie = new Class({
self.spinner.el.destroy();
self.spinner = null;
self.mask = null;
}, 400);
}, timeout || 400);
}
}, 1000)
}, timeout || 1000)
}
else if(!self.spinner) {
self.createMask();
@@ -111,6 +112,7 @@ var Movie = new Class({
self.removeView();
self.profile = Quality.getProfile(self.data.profile_id) || {};
self.category = CategoryList.getCategory(self.data.category_id) || {};
self.create();
self.busy(false);
@@ -177,20 +179,21 @@ var Movie = new Class({
});
// Add releases
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
if(self.data.releases)
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
Object.each(self.options.actions, function(action, key){
self.action[key.toLowerCase()] = action = new self.options.actions[key](self)

View File

@@ -159,13 +159,15 @@
display: inline-block;
margin-right: 10px;
}
.movie_result .options select[name=title] { width: 180px; }
.movie_result .options select[name=title] { width: 170px; }
.movie_result .options select[name=profile] { width: 90px; }
.movie_result .options select[name=category] { width: 80px; }
@media all and (max-width: 480px) {
.movie_result .options select[name=title] { width: 90px; }
.movie_result .options select[name=profile] { width: 60px; }
.movie_result .options select[name=profile] { width: 50px; }
.movie_result .options select[name=category] { width: 50px; }
}
@@ -217,26 +219,51 @@
position: absolute;
top: 20%;
left: 15px;
right: 60px;
right: 7px;
vertical-align: middle;
}
.movie_result .info h2 {
margin: 0;
font-weight: normal;
font-size: 20px;
padding: 0;
}
.search_form .info h2 {
position: absolute;
width: 100%;
}
.movie_result .info h2 .title {
display: block;
margin: 0;
text-overflow: ellipsis;
overflow: hidden;
white-space: nowrap;
width: 100%;
}
.movie_result .info h2 span {
padding: 0 5px;
position: absolute;
right: -60px;
}
.search_form .info h2 .title {
position: absolute;
width: 88%;
}
.movie_result .info h2 .year {
padding: 0 5px;
text-align: center;
position: absolute;
width: 12%;
right: 0;
}
@media all and (max-width: 480px) {
.search_form .info h2 .year {
font-size: 12px;
margin-top: 7px;
}
}
.search_form .mask,
.movie_result .mask {

View File

@@ -215,10 +215,11 @@ Block.Search.Item = new Class({
'click': self.showOptions.bind(self)
}
}).adopt(
new Element('div.info').adopt(
self.title = new Element('h2', {
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
}).adopt(
self.info_container = new Element('div.info').adopt(
new Element('h2').adopt(
self.title = new Element('span.title', {
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
}),
self.year = info.year ? new Element('span.year', {
'text': info.year
}) : null
@@ -274,7 +275,9 @@ Block.Search.Item = new Class({
add: function(e){
var self = this;
(e).preventDefault();
if(e)
(e).preventDefault();
self.loadingMask();
@@ -282,7 +285,8 @@ Block.Search.Item = new Class({
'data': {
'identifier': self.info.imdb,
'title': self.title_select.get('value'),
'profile_id': self.profile_select.get('value')
'profile_id': self.profile_select.get('value'),
'category_id': self.category_select.get('value')
},
'onComplete': function(json){
self.options_el.empty();
@@ -322,10 +326,10 @@ Block.Search.Item = new Class({
self.options_el.grab(
new Element('div', {
'class': self.info.in_wanted && self.info.in_wanted.profile || in_library ? 'in_library_wanted' : ''
'class': self.info.in_wanted && self.info.in_wanted.profile_id || in_library ? 'in_library_wanted' : ''
}).adopt(
self.info.in_wanted && self.info.in_wanted.profile ? new Element('span.in_wanted', {
'text': 'Already in wanted list: ' + self.info.in_wanted.profile.label
self.info.in_wanted && self.info.in_wanted.profile_id ? new Element('span.in_wanted', {
'text': 'Already in wanted list: ' + Quality.getProfile(self.info.in_wanted.profile_id).get('label')
}) : (in_library ? new Element('span.in_library', {
'text': 'Already in library: ' + in_library.join(', ')
}) : null),
@@ -335,7 +339,12 @@ Block.Search.Item = new Class({
self.profile_select = new Element('select', {
'name': 'profile'
}),
new Element('a.button', {
self.category_select = new Element('select', {
'name': 'category'
}).grab(
new Element('option', {'value': -1, 'text': 'None'})
),
self.add_button = new Element('a.button', {
'text': 'Add',
'events': {
'click': self.add.bind(self)
@@ -350,7 +359,28 @@ Block.Search.Item = new Class({
}).inject(self.title_select)
})
Quality.getActiveProfiles().each(function(profile){
// Fill categories
var categories = CategoryList.getAll();
if(categories.length == 0)
self.category_select.hide();
else {
self.category_select.show();
categories.each(function(category){
new Element('option', {
'value': category.data.id,
'text': category.data.label
}).inject(self.category_select);
});
}
// Fill profiles
var profiles = Quality.getActiveProfiles();
if(profiles.length == 1)
self.profile_select.hide();
profiles.each(function(profile){
new Element('option', {
'value': profile.id ? profile.id : profile.data.id,
'text': profile.label ? profile.label : profile.data.label
@@ -358,6 +388,11 @@ Block.Search.Item = new Class({
});
self.options_el.addClass('set');
if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
!(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
self.add();
}
},

View File

@@ -0,0 +1,6 @@
from .main import MovieLibraryPlugin
def start():
return MovieLibraryPlugin()
config = []

View File

@@ -2,7 +2,7 @@ from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.media._base.library import LibraryBase
from couchpotato.core.settings.model import Library, LibraryTitle, File
from string import ascii_letters
import time
@@ -10,16 +10,20 @@ import traceback
log = CPLog(__name__)
class LibraryPlugin(Plugin):
class MovieLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files':{}}
def __init__(self):
addEvent('library.add', self.add)
addEvent('library.update', self.update)
addEvent('library.update_release_date', self.updateReleaseDate)
addEvent('library.add.movie', self.add)
addEvent('library.update.movie', self.update)
addEvent('library.update.movie.release_date', self.updateReleaseDate)
def add(self, attrs = {}, update_after = True):
def add(self, attrs = None, update_after = True):
if not attrs: attrs = {}
primary_provider = attrs.get('primary_provider', 'imdb')
db = get_session()
@@ -32,7 +36,7 @@ class LibraryPlugin(Plugin):
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {},
info = {}
)
title = LibraryTitle(
@@ -48,7 +52,7 @@ class LibraryPlugin(Plugin):
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
handle('library.update.movie', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
@@ -57,29 +61,30 @@ class LibraryPlugin(Plugin):
def update(self, identifier, default_title = '', force = False):
if self.shuttingDown():
return
db = get_session()
library = db.query(Library).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
library_dict = None
if library:
library_dict = library.to_dict(self.default_dict)
do_update = True
if library.status_id == done_status.get('id') and not force:
do_update = False
else:
info = fireEvent('movie.info', merge = True, identifier = identifier)
info = fireEvent('movie.info', merge = True, identifier = identifier)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Main info
if do_update:

View File

@@ -0,0 +1,73 @@
from .main import MovieSearcher
import random
def start():
return MovieSearcher()
config = [{
'name': 'moviesearcher',
'order': 20,
'groups': [
{
'tab': 'searcher',
'name': 'movie_searcher',
'label': 'Movie search',
'description': 'Search options for movies',
'advanced': True,
'options': [
{
'name': 'always_search',
'default': False,
'migrate_from': 'searcher',
'type': 'bool',
'label': 'Always search',
'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.',
},
{
'name': 'run_on_launch',
'migrate_from': 'searcher',
'label': 'Run on launch',
'advanced': True,
'default': 0,
'type': 'bool',
'description': 'Force run the searcher after (re)start.',
},
{
'name': 'search_on_add',
'label': 'Search after add',
'advanced': True,
'default': 1,
'type': 'bool',
'description': 'Disable this to only search for movies on cron.',
},
{
'name': 'cron_day',
'migrate_from': 'searcher',
'label': 'Day',
'advanced': True,
'default': '*',
'type': 'string',
'description': '<strong>*</strong>: Every day, <strong>*/2</strong>: Every 2 days, <strong>1</strong>: Every first of the month. See <a href="http://packages.python.org/APScheduler/cronschedule.html">APScheduler</a> for details.',
},
{
'name': 'cron_hour',
'migrate_from': 'searcher',
'label': 'Hour',
'advanced': True,
'default': random.randint(0, 23),
'type': 'string',
'description': '<strong>*</strong>: Every hour, <strong>*/8</strong>: Every 8 hours, <strong>3</strong>: At 3, midnight.',
},
{
'name': 'cron_minute',
'migrate_from': 'searcher',
'label': 'Minute',
'advanced': True,
'default': random.randint(0, 59),
'type': 'string',
'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour."
},
],
},
],
}]

View File

@@ -3,15 +3,14 @@ from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.variable import md5, getTitle, splitString, \
possibleTitles
possibleTitles, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
from couchpotato.environment import Env
from datetime import date
from inspect import ismethod, isfunction
from sqlalchemy.exc import InterfaceError
import datetime
import random
import re
import time
@@ -20,30 +19,32 @@ import traceback
log = CPLog(__name__)
class Searcher(Plugin):
class MovieSearcher(SearcherBase, MovieTypeBase):
in_progress = False
def __init__(self):
addEvent('searcher.all', self.allMovies)
addEvent('searcher.single', self.single)
addEvent('searcher.correct_movie', self.correctMovie)
addEvent('searcher.download', self.download)
addEvent('searcher.try_next_release', self.tryNextRelease)
addEvent('searcher.could_be_released', self.couldBeReleased)
super(MovieSearcher, self).__init__()
addApiView('searcher.try_next', self.tryNextReleaseView, docs = {
addEvent('movie.searcher.all', self.searchAll)
addEvent('movie.searcher.all_view', self.searchAllView)
addEvent('movie.searcher.single', self.single)
addEvent('movie.searcher.correct_movie', self.correctMovie)
addEvent('movie.searcher.try_next_release', self.tryNextRelease)
addEvent('movie.searcher.could_be_released', self.couldBeReleased)
addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
'params': {
'id': {'desc': 'The id of the movie'},
},
})
addApiView('searcher.full_search', self.allMoviesView, docs = {
addApiView('movie.searcher.full_search', self.searchAllView, docs = {
'desc': 'Starts a full search for all wanted movies',
})
addApiView('searcher.progress', self.getProgress, docs = {
addApiView('movie.searcher.progress', self.getProgress, docs = {
'desc': 'Get the progress of current full search',
'return': {'type': 'object', 'example': """{
'progress': False || object, total & to_go,
@@ -51,42 +52,25 @@ class Searcher(Plugin):
})
if self.conf('run_on_launch'):
addEvent('app.load', self.allMovies)
addEvent('app.load', self.searchAll)
addEvent('app.load', self.setCrons)
addEvent('setting.save.searcher.cron_day.after', self.setCrons)
addEvent('setting.save.searcher.cron_hour.after', self.setCrons)
addEvent('setting.save.searcher.cron_minute.after', self.setCrons)
def searchAllView(self, **kwargs):
def setCrons(self):
fireEvent('schedule.cron', 'searcher.all', self.allMovies, day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
def allMoviesView(self, **kwargs):
in_progress = self.in_progress
if not in_progress:
fireEventAsync('searcher.all')
fireEvent('notify.frontend', type = 'searcher.started', data = True, message = 'Full search started')
else:
fireEvent('notify.frontend', type = 'searcher.already_started', data = True, message = 'Full search already in progress')
fireEventAsync('movie.searcher.all')
return {
'success': not in_progress
'success': not self.in_progress
}
def getProgress(self, **kwargs):
return {
'progress': self.in_progress
}
def allMovies(self):
def searchAll(self):
if self.in_progress:
log.info('Search already in progress')
fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress')
return
self.in_progress = True
fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started')
db = get_session()
@@ -101,21 +85,22 @@ class Searcher(Plugin):
}
try:
search_types = self.getSearchTypes()
search_protocols = fireEvent('searcher.protocols', single = True)
for movie in movies:
movie_dict = movie.to_dict({
'category': {},
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
'files': {},
})
try:
self.single(movie_dict, search_types)
self.single(movie_dict, search_protocols)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
fireEvent('library.update', movie_dict['library']['identifier'], force = True)
fireEvent('library.update.movie', movie_dict['library']['identifier'], force = True)
except:
log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
@@ -130,25 +115,25 @@ class Searcher(Plugin):
self.in_progress = False
def single(self, movie, search_types = None):
def single(self, movie, search_protocols = None, manual = False):
# Find out search type
try:
if not search_types:
search_types = self.getSearchTypes()
if not search_protocols:
search_protocols = fireEvent('searcher.protocols', single = True)
except SearchSetupError:
return
done_status = fireEvent('status.get', 'done', single = True)
if not movie['profile'] or movie['status_id'] == done_status.get('id'):
if not movie['profile'] or (movie['status_id'] == done_status.get('id') and not manual):
log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.')
return
db = get_session()
pre_releases = fireEvent('quality.pre_releases', single = True)
release_dates = fireEvent('library.update_release_date', identifier = movie['library']['identifier'], merge = True)
release_dates = fireEvent('library.update.movie.release_date', identifier = movie['library']['identifier'], merge = True)
available_status, ignored_status, failed_status = fireEvent('status.get', ['available', 'ignored', 'failed'], single = True)
found_releases = []
@@ -160,7 +145,7 @@ class Searcher(Plugin):
fireEvent('movie.delete', movie['id'], single = True)
return
fireEvent('notify.frontend', type = 'searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title)
fireEvent('notify.frontend', type = 'movie.searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title)
ret = False
@@ -183,18 +168,18 @@ class Searcher(Plugin):
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = []
for search_type in search_types:
type_results = fireEvent('%s.search' % search_type, movie, quality, merge = True)
if type_results:
results += type_results
for search_protocol in search_protocols:
protocol_results = fireEvent('provider.search.%s.movie' % search_protocol, movie, quality, merge = True)
if protocol_results:
results += protocol_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
if len(sorted_results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
download_preference = self.conf('preferred_method')
download_preference = self.conf('preferred_method', section = 'searcher')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['type'], reverse = (download_preference == 'torrent'))
sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent'))
# Check if movie isn't deleted while searching
if not db.query(Movie).filter_by(id = movie.get('id')).first():
@@ -252,7 +237,7 @@ class Searcher(Plugin):
log.info('Ignored, score to low: %s', nzb['name'])
continue
downloaded = self.download(data = nzb, movie = movie)
downloaded = fireEvent('searcher.download', data = nzb, movie = movie, manual = manual, single = True)
if downloaded is True:
ret = True
break
@@ -276,107 +261,10 @@ class Searcher(Plugin):
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
fireEvent('notify.frontend', type = 'searcher.ended.%s' % movie['id'], data = True)
fireEvent('notify.frontend', type = 'movie.searcher.ended.%s' % movie['id'], data = True)
return ret
def download(self, data, movie, manual = False):
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if downloader_enabled:
snatched_status = fireEvent('status.get', 'snatched', single = True)
# Download movie to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
download_result = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
done_status = fireEvent('status.get', 'done', single = True)
rls.status_id = done_status.get('id') if not renamer_enabled else snatched_status.get('id')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark movie done
if not renamer_enabled:
active_status = fireEvent('status.get', 'active', single = True)
done_status = fireEvent('status.get', 'done', single = True)
try:
if movie['status_id'] == active_status.get('id'):
for profile_type in movie['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking movie as finished: %s', log_movie)
# Mark release done
rls.status_id = done_status.get('id')
rls.last_edit = int(time.time())
db.commit()
# Mark movie done
mvie = db.query(Movie).filter_by(id = movie['id']).first()
mvie.status_id = done_status.get('id')
mvie.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())
except:
log.error('Failed marking movie finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('type', '')))
return False
def getSearchTypes(self):
download_types = fireEvent('download.enabled_types', merge = True)
provider_types = fireEvent('provider.enabled_types', merge = True)
if download_types and len(list(set(provider_types) & set(download_types))) == 0:
log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_types))
raise NoProviders
for useless_provider in list(set(provider_types) - set(download_types)):
log.debug('Provider for "%s" enabled, but no downloader.', useless_provider)
search_types = download_types
if len(search_types) == 0:
log.error('There aren\'t any downloaders enabled. Please pick one in settings.')
raise NoDownloaders
return search_types
def correctMovie(self, nzb = None, movie = None, quality = None, **kwargs):
imdb_results = kwargs.get('imdb_results', False)
@@ -392,24 +280,30 @@ class Searcher(Plugin):
nzb_words = re.split('\W+', nzb_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words').lower())
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = list(set(required_words + splitString(movie['category']['required'].lower())))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(nzb_words) & set(req))) == len(req)
if self.conf('required_words') and req_match == 0:
if len(required_words) > 0 and req_match == 0:
log.info2('Wrong: Required word missing: %s', nzb['name'])
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words').lower())
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = list(set(ignored_words + splitString(movie['category']['ignored'].lower())))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(nzb_words) & set(ignored))) == len(ignored)
if self.conf('ignored_words') and ignored_match:
if len(ignored_words) > 0 and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name']))
return False
@@ -423,7 +317,7 @@ class Searcher(Plugin):
preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
if self.containsOtherQuality(nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality):
if fireEvent('searcher.contains_other_quality', nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality, single = True):
log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label']))
return False
@@ -453,112 +347,25 @@ class Searcher(Plugin):
return True
# Check if nzb contains imdb link
if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']):
if getImdb(nzb.get('description', '')) == movie['library']['identifier']:
return True
for raw_title in movie['library']['titles']:
for movie_title in possibleTitles(raw_title['title']):
movie_words = re.split('\W+', simplifyString(movie_title))
if self.correctName(nzb['name'], movie_title):
if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and self.correctYear([nzb['name']], movie['library']['year'], 1):
if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], movie['library']['year'], 1, single = True):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and self.correctYear([nzb['name']], movie['library']['year'], 0):
if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], movie['library']['year'], 0, single = True):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], movie_name, movie['library']['year']))
return False
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}):
name = nzb['name']
size = nzb.get('size', 0)
nzb_words = re.split('\W+', simplifyString(name))
qualities = fireEvent('quality.all', single = True)
found = {}
for quality in qualities:
# Main in words
if quality['identifier'] in nzb_words:
found[quality['identifier']] = True
# Alt in words
if list(set(nzb_words) & set(quality['alternative'])):
found[quality['identifier']] = True
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
if guess:
found[guess['identifier']] = True
# Hack for older movies that don't contain quality tag
year_name = fireEvent('scanner.name_year', name, single = True)
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
if size > 3000: # Assume dvdr
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', (size))
found['dvdr'] = True
else: # Assume dvdrip
log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', (size))
found['dvdrip'] = True
# Allow other qualities
for allowed in preferred_quality.get('allow'):
if found.get(allowed):
del found[allowed]
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def checkIMDB(self, haystack, imdbId):
for string in haystack:
if 'imdb.com/title/' + imdbId in string:
return True
return False
def correctYear(self, haystack, year, year_range):
for string in haystack:
year_name = fireEvent('scanner.name_year', string, single = True)
if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)):
log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year))
return True
log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year))
return False
def correctName(self, check_name, movie_name):
check_names = [check_name]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(check_name.split('['), key = len))
except: pass
for check_name in list(set(check_names)):
check_movie = fireEvent('scanner.name_year', check_name, single = True)
try:
check_words = filter(None, re.split('\W+', check_movie.get('name', '')))
movie_words = filter(None, re.split('\W+', simplifyString(movie_name)))
if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0:
return True
except:
pass
return False
def couldBeReleased(self, is_pre_release, dates, year = None):
now = int(time.time())
@@ -569,7 +376,7 @@ class Searcher(Plugin):
else:
# For movies before 1972
if dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True
if is_pre_release:
@@ -596,7 +403,7 @@ class Searcher(Plugin):
def tryNextReleaseView(self, id = None, **kwargs):
trynext = self.tryNextRelease(id)
trynext = self.tryNextRelease(id, manual = True)
return {
'success': trynext
@@ -604,14 +411,14 @@ class Searcher(Plugin):
def tryNextRelease(self, movie_id, manual = False):
snatched_status, ignored_status = fireEvent('status.get', ['snatched', 'ignored'], single = True)
snatched_status, done_status, ignored_status = fireEvent('status.get', ['snatched', 'done', 'ignored'], single = True)
try:
db = get_session()
rels = db.query(Release).filter_by(
status_id = snatched_status.get('id'),
movie_id = movie_id
).all()
rels = db.query(Release) \
.filter_by(movie_id = movie_id) \
.filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \
.all()
for rel in rels:
rel.status_id = ignored_status.get('id')
@@ -619,7 +426,7 @@ class Searcher(Plugin):
movie_dict = fireEvent('movie.get', movie_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict['library']))
fireEvent('searcher.single', movie_dict)
fireEvent('movie.searcher.single', movie_dict, manual = manual)
return True
@@ -629,9 +436,3 @@ class Searcher(Plugin):
class SearchSetupError(Exception):
pass
class NoDownloaders(SearchSetupError):
pass
class NoProviders(SearchSetupError):
pass

View File

@@ -0,0 +1,17 @@
from migrate.changeset.schema import create_column
from sqlalchemy.schema import MetaData, Column, Table, Index
from sqlalchemy.types import Integer
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
category_column = Column('category_id', Integer)
movie = Table('movie', meta, category_column)
create_column(category_column, movie)
Index('ix_movie_category_id', movie.c.category_id).create()
def downgrade(migrate_engine):
pass

View File

@@ -32,7 +32,9 @@ class Notification(Provider):
addEvent(listener, self.createNotifyHandler(listener))
def createNotifyHandler(self, listener):
def notify(message = None, group = {}, data = None):
def notify(message = None, group = None, data = None):
if not group: group = {}
if not self.conf('on_snatch', default = True) and listener == 'movie.snatched':
return
return self._notify(message = message, data = data if data else group, listener = listener)
@@ -45,9 +47,10 @@ class Notification(Provider):
def _notify(self, *args, **kwargs):
if self.isEnabled():
return self.notify(*args, **kwargs)
return False
def notify(self, message = '', data = {}, listener = None):
pass
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
def test(self, **kwargs):

View File

@@ -10,7 +10,8 @@ class Boxcar(Notification):
url = 'https://boxcar.io/devices/providers/7MNNXY3UIzVBwvzkKwkC/notifications'
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
message = message.strip()

View File

@@ -7,6 +7,7 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.core.settings.model import Notification as Notif
from couchpotato.environment import Env
from operator import itemgetter
from sqlalchemy.sql.expression import or_
import threading
import time
@@ -18,9 +19,7 @@ log = CPLog(__name__)
class CoreNotifier(Notification):
m_lock = threading.Lock()
messages = []
listeners = []
m_lock = None
def __init__(self):
super(CoreNotifier, self).__init__()
@@ -51,10 +50,15 @@ class CoreNotifier(Notification):
addApiView('notification.listener', self.listener)
fireEvent('schedule.interval', 'core.check_messages', self.checkMessages, hours = 12, single = True)
fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds = 15, single = True)
addEvent('app.load', self.clean)
addEvent('app.load', self.checkMessages)
self.messages = []
self.listeners = []
self.m_lock = threading.Lock()
def clean(self):
db = get_session()
@@ -113,7 +117,7 @@ class CoreNotifier(Notification):
prop_name = 'messages.last_check'
last_check = tryInt(Env.prop(prop_name, default = 0))
messages = fireEvent('cp.messages', last_check = last_check, single = True)
messages = fireEvent('cp.messages', last_check = last_check, single = True) or []
for message in messages:
if message.get('time') > last_check:
@@ -124,7 +128,8 @@ class CoreNotifier(Notification):
Env.prop(prop_name, value = last_check)
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
db = get_session()
@@ -145,7 +150,8 @@ class CoreNotifier(Notification):
return True
def frontend(self, type = 'notification', data = {}, message = None):
def frontend(self, type = 'notification', data = None, message = None):
if not data: data = {}
log.debug('Notifying frontend')
@@ -169,8 +175,8 @@ class CoreNotifier(Notification):
except:
log.debug('Failed sending to listener: %s', traceback.format_exc())
self.listeners = []
self.m_lock.release()
self.cleanMessages()
log.debug('Done notifying frontend')
@@ -184,11 +190,14 @@ class CoreNotifier(Notification):
'result': messages,
})
self.m_lock.acquire()
self.listeners.append((callback, last_id))
self.m_lock.release()
def removeListener(self, callback):
self.m_lock.acquire()
for list_tuple in self.listeners:
try:
listener, last_id = list_tuple
@@ -196,15 +205,18 @@ class CoreNotifier(Notification):
self.listeners.remove(list_tuple)
except:
log.debug('Failed removing listener: %s', traceback.format_exc())
self.m_lock.release()
def cleanMessages(self):
if len(self.messages) == 0:
return
log.debug('Cleaning messages')
self.m_lock.acquire()
for message in self.messages:
if message['time'] < (time.time() - 15):
self.messages.remove(message)
time_ago = (time.time() - 15)
self.messages[:] = [m for m in self.messages if (m['time'] > time_ago)]
self.m_lock.release()
log.debug('Done cleaning messages')
@@ -215,16 +227,16 @@ class CoreNotifier(Notification):
self.m_lock.acquire()
recent = []
index = 0
for i in xrange(len(self.messages)):
index = len(self.messages) - i - 1
if self.messages[index]["message_id"] == last_id: break
recent = self.messages[index:]
try:
index = map(itemgetter('message_id'), self.messages).index(last_id)
recent = self.messages[index + 1:]
except:
pass
self.m_lock.release()
log.debug('Returning for %s %s messages', (last_id, len(recent or [])))
log.debug('Returning for %s %s messages', (last_id, len(recent)))
return recent or []
return recent
def listener(self, init = False, **kwargs):
@@ -237,6 +249,7 @@ class CoreNotifier(Notification):
notifications = db.query(Notif) \
.filter(or_(Notif.read == False, Notif.added > (time.time() - 259200))) \
.all()
for n in notifications:
ndict = n.to_dict()
ndict['type'] = 'notification'

View File

@@ -11,7 +11,8 @@ log = CPLog(__name__)
class Email(Notification):
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
# Extract all the settings from settings
from_address = self.conf('from')

View File

@@ -43,7 +43,8 @@ class Growl(Notification):
else:
log.error('Failed register of growl: %s', traceback.format_exc())
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
self.register()

View File

@@ -23,16 +23,15 @@ class NMJ(Notification):
def autoConfig(self, host = 'localhost', **kwargs):
database = ''
mount = ''
try:
terminal = telnetlib.Telnet(host)
except Exception:
log.error('Warning: unable to get a telnet session to %s', (host))
log.error('Warning: unable to get a telnet session to %s', host)
return self.failed()
log.debug('Connected to %s via telnet', (host))
log.debug('Connected to %s via telnet', host)
terminal.read_until('sh-3.00# ')
terminal.write('cat /tmp/source\n')
terminal.write('cat /tmp/netshare\n')
@@ -46,7 +45,7 @@ class NMJ(Notification):
device = match.group(2)
log.info('Found NMJ database %s on device %s', (database, device))
else:
log.error('Could not get current NMJ database on %s, NMJ is probably not running!', (host))
log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host)
return self.failed()
if device.startswith('NETWORK_SHARE/'):
@@ -54,7 +53,7 @@ class NMJ(Notification):
if match:
mount = match.group().replace('127.0.0.1', host)
log.info('Found mounting url on the Popcorn Hour in configuration: %s', (mount))
log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount)
else:
log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url')
return self.failed()
@@ -65,17 +64,18 @@ class NMJ(Notification):
'mount': mount,
}
def addToLibrary(self, message = None, group = {}):
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
host = self.conf('host')
mount = self.conf('mount')
database = self.conf('database')
if mount:
log.debug('Try to mount network drive via url: %s', (mount))
log.debug('Try to mount network drive via url: %s', mount)
try:
data = self.urlopen(mount)
self.urlopen(mount)
except:
return False
@@ -98,11 +98,11 @@ class NMJ(Notification):
et = etree.fromstring(response)
result = et.findtext('returnValue')
except SyntaxError, e:
log.error('Unable to parse XML returned from the Popcorn Hour: %s', (e))
log.error('Unable to parse XML returned from the Popcorn Hour: %s', e)
return False
if int(result) > 0:
log.error('Popcorn Hour returned an errorcode: %s', (result))
log.error('Popcorn Hour returned an errorcode: %s', result)
return False
else:
log.info('NMJ started background scan')

View File

@@ -12,7 +12,8 @@ class Notifo(Notification):
url = 'https://api.notifo.com/v1/send_notification'
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
params = {

View File

@@ -8,19 +8,17 @@ log = CPLog(__name__)
class NotifyMyAndroid(Notification):
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
nma = pynma.PyNMA()
keys = splitString(self.conf('api_key'))
nma.addkey(keys)
nma.developerkey(self.conf('dev_key'))
# hacky fix for the event type
# as it seems to be part of the message now
self.event = message.split(' ')[0]
response = nma.push(
application = self.default_title,
event = self.event,
event = message.split(' ')[0],
description = message,
priority = self.conf('priority'),
batch_mode = len(keys) > 1

View File

@@ -1,9 +1,10 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from urllib2 import URLError
from urlparse import urlparse
from xml.dom import minidom
import traceback
@@ -16,16 +17,17 @@ class Plex(Notification):
super(Plex, self).__init__()
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = {}):
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
log.info('Sending notification to Plex')
hosts = [cleanHost(x.strip() + ':32400') for x in self.conf('host').split(",")]
hosts = self.getHosts(port = 32400)
for host in hosts:
source_type = ['movie']
base_url = '%slibrary/sections' % host
base_url = '%s/library/sections' % host
refresh_url = '%s/%%s/refresh' % base_url
try:
@@ -36,7 +38,7 @@ class Plex(Notification):
for s in sections:
if s.getAttribute('type') in source_type:
url = refresh_url % s.getAttribute('key')
x = self.urlopen(url)
self.urlopen(url)
except:
log.error('Plex library update failed for %s, Media Server not running: %s', (host, traceback.format_exc(1)))
@@ -44,9 +46,10 @@ class Plex(Notification):
return True
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
hosts = [x.strip() + ':3000' for x in self.conf('host').split(",")]
hosts = self.getHosts(port = 3000)
successful = 0
for host in hosts:
if self.send({'command': 'ExecBuiltIn', 'parameter': 'Notification(CouchPotato, %s)' % message}, host):
@@ -56,8 +59,7 @@ class Plex(Notification):
def send(self, command, host):
url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, tryUrlencode(command))
url = '%s/xbmcCmds/xbmcHttp/?%s' % (host, tryUrlencode(command))
headers = {}
try:
@@ -88,3 +90,18 @@ class Plex(Notification):
return {
'success': success or success2
}
def getHosts(self, port = None):
raw_hosts = splitString(self.conf('host'))
hosts = []
for h in raw_hosts:
h = cleanHost(h)
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
hosts.append(h)
return hosts

View File

@@ -12,7 +12,8 @@ class Prowl(Notification):
'api': 'https://api.prowlapp.com/publicapi/add'
}
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
data = {
'apikey': self.conf('api_key'),

View File

@@ -11,7 +11,8 @@ class Pushalot(Notification):
'api': 'https://pushalot.com/api/sendmessage'
}
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
data = {
'AuthorizationToken': self.conf('auth_token'),

View File

@@ -11,7 +11,8 @@ class Pushover(Notification):
app_token = 'YkxHMYDZp285L265L3IwH3LmzkTaCy'
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
http_handler = HTTPSConnection("api.pushover.net:443")

View File

@@ -15,8 +15,9 @@ class Synoindex(Notification):
super(Synoindex, self).__init__()
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = {}):
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
command = [self.index_path, '-A', group.get('destination_dir')]
log.info('Executing synoindex command: %s ', command)
@@ -27,9 +28,8 @@ class Synoindex(Notification):
return True
except OSError, e:
log.error('Unable to run synoindex: %s', e)
return False
return True
return False
def test(self, **kwargs):
return {

View File

@@ -11,7 +11,8 @@ class Toasty(Notification):
'api': 'http://api.supertoasty.com/notify/%s?%s'
}
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
data = {
'title': self.default_title,

View File

@@ -13,7 +13,8 @@ class Trakt(Notification):
listen_to = ['movie.downloaded']
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
post_data = {
'username': self.conf('automation_username'),

View File

@@ -4,7 +4,8 @@ from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.environment import Env
from pytwitter import Api, parse_qsl
from pytwitter import Api
from urlparse import parse_qsl
import oauth2
log = CPLog(__name__)
@@ -29,7 +30,8 @@ class Twitter(Notification):
addApiView('notify.%s.auth_url' % self.getName().lower(), self.getAuthorizationUrl)
addApiView('notify.%s.credentials' % self.getName().lower(), self.getCredentials)
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
api = Api(self.consumer_key, self.consumer_secret, self.conf('access_token_key'), self.conf('access_token_secret'))
@@ -50,7 +52,7 @@ class Twitter(Notification):
try:
if direct_message:
for user in direct_message_users.split():
api.PostDirectMessage(user, '[%s] %s' % (self.default_title, message))
api.PostDirectMessage('[%s] %s' % (self.default_title, message), screen_name = user)
else:
update_message = '[%s] %s' % (self.default_title, message)
if len(update_message) > 140:

View File

@@ -38,6 +38,14 @@ config = [{
'advanced': True,
'description': 'Only update the first host when movie snatched, useful for synced XBMC',
},
{
'name': 'remote_dir_scan',
'label': 'Remote Folder Scan',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Only scan new movie folder at remote XBMC servers. Works if movie location is the same.',
},
{
'name': 'on_snatch',
'default': 0,

View File

@@ -13,11 +13,12 @@ log = CPLog(__name__)
class XBMC(Notification):
listen_to = ['renamer.after']
listen_to = ['renamer.after', 'movie.snatched']
use_json_notifications = {}
http_time_between_calls = 0
def notify(self, message = '', data = {}, listener = None):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
hosts = splitString(self.conf('host'))
@@ -33,15 +34,19 @@ class XBMC(Notification):
('GUI.ShowNotification', {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}),
]
if not self.conf('only_first') or hosts.index(host) == 0:
calls.append(('VideoLibrary.Scan', {}))
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
param = {}
if self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0]):
param = {'directory': data['destination_dir']}
calls.append(('VideoLibrary.Scan', param))
max_successful += len(calls)
response = self.request(host, calls)
else:
response = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message})
if not self.conf('only_first') or hosts.index(host) == 0:
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
response += self.request(host, [('VideoLibrary.Scan', {})])
max_successful += 1
@@ -49,9 +54,9 @@ class XBMC(Notification):
try:
for result in response:
if (result.get('result') and result['result'] == 'OK'):
if result.get('result') and result['result'] == 'OK':
successful += 1
elif (result.get('error')):
elif result.get('error'):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
except:
@@ -68,7 +73,7 @@ class XBMC(Notification):
('JSONRPC.Version', {})
])
for result in response:
if (result.get('result') and type(result['result']['version']).__name__ == 'int'):
if result.get('result') and type(result['result']['version']).__name__ == 'int':
# only v2 and v4 return an int object
# v6 (as of XBMC v12(Frodo)) is required to send notifications
xbmc_rpc_version = str(result['result']['version'])
@@ -81,15 +86,15 @@ class XBMC(Notification):
# send the text message
resp = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message})
for result in resp:
if (result.get('result') and result['result'] == 'OK'):
if result.get('result') and result['result'] == 'OK':
log.debug('Message delivered successfully!')
success = True
break
elif (result.get('error')):
elif result.get('error'):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
break
elif (result.get('result') and type(result['result']['version']).__name__ == 'dict'):
elif result.get('result') and type(result['result']['version']).__name__ == 'dict':
# XBMC JSON-RPC v6 returns an array object containing
# major, minor and patch number
xbmc_rpc_version = str(result['result']['version']['major'])
@@ -104,16 +109,16 @@ class XBMC(Notification):
# send the text message
resp = self.request(host, [('GUI.ShowNotification', {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})])
for result in resp:
if (result.get('result') and result['result'] == 'OK'):
if result.get('result') and result['result'] == 'OK':
log.debug('Message delivered successfully!')
success = True
break
elif (result.get('error')):
elif result.get('error'):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
break
# error getting version info (we do have contact with XBMC though)
elif (result.get('error')):
elif result.get('error'):
log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code']))
log.debug('Use JSON notifications: %s ', self.use_json_notifications)

View File

@@ -26,6 +26,10 @@ class Automation(Plugin):
movie_ids = []
for imdb_id in movies:
if self.shuttingDown():
break
prop_name = 'automation.added.%s' % imdb_id
added = Env.prop(prop_name, default = False)
if not added:
@@ -35,5 +39,11 @@ class Automation(Plugin):
Env.prop(prop_name, True)
for movie_id in movie_ids:
if self.shuttingDown():
break
movie_dict = fireEvent('movie.get', movie_id, single = True)
fireEvent('searcher.single', movie_dict)
fireEvent('movie.searcher.single', movie_dict)
return True

View File

@@ -2,7 +2,7 @@ from StringIO import StringIO
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \
toUnicode
from couchpotato.core.helpers.variable import getExt, md5
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from multipartpost import MultipartPostHandler
@@ -12,6 +12,7 @@ from urlparse import urlparse
import cookielib
import glob
import gzip
import inspect
import math
import os.path
import re
@@ -24,10 +25,14 @@ log = CPLog(__name__)
class Plugin(object):
_class_name = None
plugin_path = None
enabled_option = 'enabled'
auto_register_static = True
_needs_shutdown = False
_running = None
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20130519 Firefox/24.0'
http_last_use = {}
@@ -35,16 +40,29 @@ class Plugin(object):
http_failed_request = {}
http_failed_disabled = {}
def __new__(typ, *args, **kwargs):
new_plugin = super(Plugin, typ).__new__(typ)
new_plugin.registerPlugin()
return new_plugin
def registerPlugin(self):
addEvent('app.do_shutdown', self.doShutdown)
addEvent('plugin.running', self.isRunning)
self._running = []
def conf(self, attr, value = None, default = None):
return Env.setting(attr, self.getName().lower(), value = value, default = default)
if self.auto_register_static:
self.registerStatic(inspect.getfile(self.__class__))
def conf(self, attr, value = None, default = None, section = None):
class_name = self.getName().lower().split(':')
return Env.setting(attr, section = section if section else class_name[0].lower(), value = value, default = default)
def getName(self):
return self.__class__.__name__
return self._class_name or self.__class__.__name__
def setName(self, name):
self._class_name = name
def renderTemplate(self, parent_file, templ, **params):
@@ -124,7 +142,7 @@ class Plugin(object):
if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error:
raise
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests')
else:
return ''
else:
@@ -187,7 +205,7 @@ class Plugin(object):
self.http_failed_request[host] += 1
# Disable temporarily
if self.http_failed_request[host] > 5:
if self.http_failed_request[host] > 5 and not isLocalIP(host):
self.http_failed_disabled[host] = time.time()
except:
@@ -241,8 +259,8 @@ class Plugin(object):
def getCache(self, cache_key, url = None, **kwargs):
cache_key = md5(ss(cache_key))
cache = Env.get('cache').get(cache_key)
cache_key_md5 = md5(ss(cache_key))
cache = Env.get('cache').get(cache_key_md5)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
return cache
@@ -266,8 +284,9 @@ class Plugin(object):
return ''
def setCache(self, cache_key, value, timeout = 300):
cache_key_md5 = md5(ss(cache_key))
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key, value, timeout)
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, movie):
@@ -276,9 +295,9 @@ class Plugin(object):
def createFileName(self, data, filedata, movie):
name = os.path.join(self.createNzbName(data, movie))
if data.get('type') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
if data.get('protocol', data.get('type')) == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('type'))
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, movie):
if Env.setting('enabled', 'renamer'):
@@ -290,4 +309,4 @@ class Plugin(object):
return not self.isEnabled()
def isEnabled(self):
return self.conf(self.enabled_option) or self.conf(self.enabled_option) == None
return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None

View File

@@ -12,7 +12,7 @@ if os.name == 'nt':
except:
# todo:: subclass ImportError for missing dependencies, vs. broken plugins?
raise ImportError("Missing the win32file module, which is a part of the prerequisite \
pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/");
pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/")
else:
import win32file #@UnresolvedImport

View File

@@ -0,0 +1,6 @@
from .main import CategoryPlugin
def start():
return CategoryPlugin()
config = []

View File

@@ -0,0 +1,122 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie, Category
log = CPLog(__name__)
class CategoryPlugin(Plugin):
def __init__(self):
addEvent('category.all', self.all)
addApiView('category.save', self.save)
addApiView('category.save_order', self.saveOrder)
addApiView('category.delete', self.delete)
addApiView('category.list', self.allView, docs = {
'desc': 'List all available categories',
'return': {'type': 'object', 'example': """{
'success': True,
'list': array, categories
}"""}
})
def allView(self, **kwargs):
return {
'success': True,
'list': self.all()
}
def all(self):
db = get_session()
categories = db.query(Category).all()
temp = []
for category in categories:
temp.append(category.to_dict())
db.expire_all()
return temp
def save(self, **kwargs):
db = get_session()
c = db.query(Category).filter_by(id = kwargs.get('id')).first()
if not c:
c = Category()
db.add(c)
c.order = kwargs.get('order', c.order if c.order else 0)
c.label = toUnicode(kwargs.get('label'))
c.path = toUnicode(kwargs.get('path'))
c.ignored = toUnicode(kwargs.get('ignored'))
c.preferred = toUnicode(kwargs.get('preferred'))
c.required = toUnicode(kwargs.get('required'))
c.destination = toUnicode(kwargs.get('destination'))
db.commit()
category_dict = c.to_dict()
return {
'success': True,
'category': category_dict
}
def saveOrder(self, **kwargs):
db = get_session()
order = 0
for category_id in kwargs.get('ids', []):
c = db.query(Category).filter_by(id = category_id).first()
c.order = order
order += 1
db.commit()
return {
'success': True
}
def delete(self, id = None, **kwargs):
db = get_session()
success = False
message = ''
try:
c = db.query(Category).filter_by(id = id).first()
db.delete(c)
db.commit()
# Force defaults on all empty category movies
self.removeFromMovie(id)
success = True
except Exception, e:
message = log.error('Failed deleting category: %s', e)
db.expire_all()
return {
'success': success,
'message': message
}
def removeFromMovie(self, category_id):
db = get_session()
movies = db.query(Movie).filter(Movie.category_id == category_id).all()
if len(movies) > 0:
for movie in movies:
movie.category_id = None
db.commit()

View File

@@ -0,0 +1,82 @@
.add_new_category {
padding: 20px;
display: block;
text-align: center;
font-size: 20px;
border-bottom: 1px solid rgba(255,255,255,0.2);
}
.category {
border-bottom: 1px solid rgba(255,255,255,0.2);
position: relative;
}
.category > .delete {
position: absolute;
padding: 16px;
right: 0;
cursor: pointer;
opacity: 0.6;
color: #fd5353;
}
.category > .delete:hover {
opacity: 1;
}
.category .ctrlHolder:hover {
background: none;
}
.category .formHint {
width: 250px !important;
margin: 0 !important;
opacity: 0.1;
}
.category:hover .formHint {
opacity: 1;
}
#category_ordering {
}
#category_ordering ul {
float: left;
margin: 0;
width: 275px;
padding: 0;
}
#category_ordering li {
cursor: -webkit-grab;
cursor: -moz-grab;
cursor: grab;
border-bottom: 1px solid rgba(255,255,255,0.2);
padding: 0 5px;
}
#category_ordering li:last-child { border: 0; }
#category_ordering li .check {
margin: 2px 10px 0 0;
vertical-align: top;
}
#category_ordering li > span {
display: inline-block;
height: 20px;
vertical-align: top;
line-height: 20px;
}
#category_ordering li .handle {
background: url('../../static/profile_plugin/handle.png') center;
width: 20px;
float: right;
}
#category_ordering .formHint {
clear: none;
float: right;
width: 250px;
margin: 0;
}

View File

@@ -0,0 +1,332 @@
var CategoryListBase = new Class({
initialize: function(){
var self = this;
App.addEvent('load', self.addSettings.bind(self));
},
setup: function(categories){
var self = this;
self.categories = []
Array.each(categories, self.createCategory.bind(self));
},
addSettings: function(){
var self = this;
self.settings = App.getPage('Settings')
self.settings.addEvent('create', function(){
var tab = self.settings.createSubTab('category', {
'label': 'Categories',
'name': 'category',
'subtab_label': 'Category & filtering'
}, self.settings.tabs.searcher ,'searcher');
self.tab = tab.tab;
self.content = tab.content;
self.createList();
self.createOrdering();
})
// Add categories in renamer
self.settings.addEvent('create', function(){
var renamer_group = self.settings.tabs.renamer.groups.renamer;
self.categories.each(function(category){
var input = new Option.Directory('section_name', 'option.name', category.get('destination'), {
'name': category.get('label')
});
input.inject(renamer_group.getElement('.renamer_to'));
input.fireEvent('injected');
input.save = function(){
category.data.destination = input.getValue();
category.save();
};
});
})
},
createList: function(){
var self = this;
var count = self.categories.length;
self.settings.createGroup({
'label': 'Categories',
'description': 'Create categories, each one extending global filters. (Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)'
}).inject(self.content).adopt(
self.category_container = new Element('div.container'),
new Element('a.add_new_category', {
'text': count > 0 ? 'Create another category' : 'Click here to create a category.',
'events': {
'click': function(){
var category = self.createCategory();
$(category).inject(self.category_container)
}
}
})
);
// Add categories, that aren't part of the core (for editing)
Array.each(self.categories, function(category){
$(category).inject(self.category_container)
});
},
getCategory: function(id){
return this.categories.filter(function(category){
return category.data.id == id
}).pick()
},
getAll: function(){
return this.categories;
},
createCategory: function(data){
var self = this;
var data = data || {'id': randomString()}
var category = new Category(data)
self.categories.include(category)
return category;
},
createOrdering: function(){
var self = this;
var category_list;
var group = self.settings.createGroup({
'label': 'Category ordering'
}).adopt(
new Element('.ctrlHolder#category_ordering').adopt(
new Element('label[text=Order]'),
category_list = new Element('ul'),
new Element('p.formHint', {
'html': 'Change the order the categories are in the dropdown list.<br />First one will be default.'
})
)
).inject(self.content)
Array.each(self.categories, function(category){
new Element('li', {'data-id': category.data.id}).adopt(
new Element('span.category_label', {
'text': category.data.label
}),
new Element('span.handle')
).inject(category_list);
});
// Sortable
self.category_sortable = new Sortables(category_list, {
'revert': true,
'handle': '',
'opacity': 0.5,
'onComplete': self.saveOrdering.bind(self)
});
},
saveOrdering: function(){
var self = this;
var ids = [];
self.category_sortable.list.getElements('li').each(function(el, nr){
ids.include(el.get('data-id'));
});
Api.request('category.save_order', {
'data': {
'ids': ids
}
});
}
})
window.CategoryList = new CategoryListBase();
var Category = new Class({
data: {},
initialize: function(data){
var self = this;
self.data = data;
self.create();
self.el.addEvents({
'change:relay(select)': self.save.bind(self, 0),
'keyup:relay(input[type=text])': self.save.bind(self, [300])
});
},
create: function(){
var self = this;
var data = self.data;
self.el = new Element('div.category').adopt(
self.delete_button = new Element('span.delete.icon2', {
'events': {
'click': self.del.bind(self)
}
}),
new Element('.category_label.ctrlHolder').adopt(
new Element('label', {'text':'Name'}),
new Element('input.inlay', {
'type':'text',
'value': data.label,
'placeholder': 'Example: Kids, Horror or His'
}),
new Element('p.formHint', {'text': 'See global filters for explanation.'})
),
new Element('.category_preferred.ctrlHolder').adopt(
new Element('label', {'text':'Preferred'}),
new Element('input.inlay', {
'type':'text',
'value': data.preferred,
'placeholder': 'Blu-ray, DTS'
})
),
new Element('.category_required.ctrlHolder').adopt(
new Element('label', {'text':'Required'}),
new Element('input.inlay', {
'type':'text',
'value': data.required,
'placeholder': 'Example: DTS, AC3 & English'
})
),
new Element('.category_ignored.ctrlHolder').adopt(
new Element('label', {'text':'Ignored'}),
new Element('input.inlay', {
'type':'text',
'value': data.ignored,
'placeholder': 'Example: dubbed, swesub, french'
})
)
);
self.makeSortable()
},
save: function(delay){
var self = this;
if(self.save_timer) clearTimeout(self.save_timer);
self.save_timer = (function(){
var data = self.getData();
Api.request('category.save', {
'data': self.getData(),
'useSpinner': true,
'spinnerOptions': {
'target': self.el
},
'onComplete': function(json){
if(json.success){
self.data = json.category;
}
}
});
}).delay(delay || 0, self)
},
getData: function(){
var self = this;
var data = {
'id' : self.data.id,
'label' : self.el.getElement('.category_label input').get('value'),
'required' : self.el.getElement('.category_required input').get('value'),
'preferred' : self.el.getElement('.category_preferred input').get('value'),
'ignored' : self.el.getElement('.category_ignored input').get('value'),
'destination': self.data.destination
}
return data
},
del: function(){
var self = this;
if(self.data.label == undefined){
self.el.destroy();
return;
}
var label = self.el.getElement('.category_label input').get('value');
var qObj = new Question('Are you sure you want to delete <strong>"'+label+'"</strong>?', '', [{
'text': 'Delete "'+label+'"',
'class': 'delete',
'events': {
'click': function(e){
(e).preventDefault();
Api.request('category.delete', {
'data': {
'id': self.data.id
},
'useSpinner': true,
'spinnerOptions': {
'target': self.el
},
'onComplete': function(json){
if(json.success) {
qObj.close();
self.el.destroy();
} else {
alert(json.message);
}
}
});
}
}
}, {
'text': 'Cancel',
'cancel': true
}]);
},
makeSortable: function(){
var self = this;
self.sortable = new Sortables(self.category_container, {
'revert': true,
'handle': '.handle',
'opacity': 0.5,
'onComplete': self.save.bind(self, 300)
});
},
get: function(attr){
return this.data[attr]
},
toElement: function(){
return this.el
}
});

Binary file not shown.

After

Width:  |  Height:  |  Size: 160 B

View File

@@ -4,8 +4,9 @@ from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie
from couchpotato.core.settings.model import Movie, Library, LibraryTitle
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import asc
import random as rndm
import time
@@ -40,67 +41,81 @@ class Dashboard(Plugin):
profile_pre[profile.get('id')] = contains
# Get all active movies
active_status, snatched_status, downloaded_status, available_status = fireEvent('status.get', ['active', 'snatched', 'downloaded', 'available'], single = True)
subq = db.query(Movie).filter(Movie.status_id == active_status.get('id')).subquery()
q = db.query(Movie).join((subq, subq.c.id == Movie.id)) \
.options(joinedload_all('releases')) \
.options(joinedload_all('profile.types')) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
# Add limit
limit = 12
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = tryInt(splt[0])
all_movies = q.all()
# Get all active movies
active_status = fireEvent('status.get', ['active'], single = True)
q = db.query(Movie) \
.join(Library) \
.filter(Movie.status_id == active_status.get('id')) \
.with_entities(Movie.id, Movie.profile_id, Library.info, Library.year) \
.group_by(Movie.id)
if random:
rndm.shuffle(all_movies)
if not random:
q = q.join(LibraryTitle) \
.filter(LibraryTitle.default == True) \
.order_by(asc(LibraryTitle.simple_title))
active = q.all()
movies = []
for movie in all_movies:
pp = profile_pre.get(movie.profile.id)
eta = movie.library.info.get('release_date', {}) or {}
coming_soon = False
# Theater quality
if pp.get('theater') and fireEvent('searcher.could_be_released', True, eta, movie.library.year, single = True):
coming_soon = True
if pp.get('dvd') and fireEvent('searcher.could_be_released', False, eta, movie.library.year, single = True):
coming_soon = True
if len(active) > 0:
# Skip if movie is snatched/downloaded/available
skip = False
for release in movie.releases:
if release.status_id in [snatched_status.get('id'), downloaded_status.get('id'), available_status.get('id')]:
skip = True
break
if skip:
continue
# Do the shuffle
if random:
rndm.shuffle(active)
if coming_soon:
temp = movie.to_dict({
'profile': {'types': {}},
'releases': {'files':{}, 'info': {}},
'library': {'titles': {}, 'files':{}},
'files': {},
})
movie_ids = []
for movie in active:
movie_id, profile_id, info, year = movie
# Don't list older movies
if ((not late and ((not eta.get('dvd') and not eta.get('theater')) or (eta.get('dvd') and eta.get('dvd') > (now - 2419200)))) or \
(late and (eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200))):
movies.append(temp)
pp = profile_pre.get(profile_id)
if not pp: continue
if len(movies) >= limit:
break
eta = info.get('release_date', {}) or {}
coming_soon = False
# Theater quality
if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, year, single = True):
coming_soon = True
elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, year, single = True):
coming_soon = True
if coming_soon:
# Don't list older movies
if ((not late and (not eta.get('dvd') and not eta.get('theater') or eta.get('dvd') and eta.get('dvd') > (now - 2419200))) or
(late and (eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200))):
movie_ids.append(movie_id)
if len(movie_ids) >= limit:
break
if len(movie_ids) > 0:
# Get all movie information
movies_raw = db.query(Movie) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('files')) \
.filter(Movie.id.in_(movie_ids)) \
.all()
# Create dict by movie id
movie_dict = {}
for movie in movies_raw:
movie_dict[movie.id] = movie
for movie_id in movie_ids:
movies.append(movie_dict[movie_id].to_dict({
'library': {'titles': {}, 'files':{}},
'files': {},
}))
db.expire_all()
return {
'success': True,
'empty': len(movies) == 0,

View File

@@ -83,7 +83,8 @@ class FileManager(Plugin):
Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': Env.get('cache_dir')})])
def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = {}):
def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None):
if not urlopen_kwargs: urlopen_kwargs = {}
if not dest: # to Cache
dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url)))
@@ -100,7 +101,9 @@ class FileManager(Plugin):
self.createFile(dest, filedata, binary = True)
return dest
def add(self, path = '', part = 1, type_tuple = (), available = 1, properties = {}):
def add(self, path = '', part = 1, type_tuple = (), available = 1, properties = None):
if not properties: properties = {}
type_id = self.getType(type_tuple).get('id')
db = get_session()

View File

@@ -1,6 +0,0 @@
from .main import LibraryPlugin
def start():
return LibraryPlugin()
config = []

View File

@@ -90,7 +90,6 @@ class Logging(Plugin):
if not os.path.isfile(path):
break
reversed_lines = []
f = open(path, 'r')
reversed_lines = toUnicode(f.read()).split('[0m\n')
reversed_lines.reverse()
@@ -120,7 +119,7 @@ class Logging(Plugin):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
if not os.path.isfile(path):
break
continue
try:

View File

@@ -28,6 +28,14 @@ config = [{
'description': 'Remove movie from db if it can\'t be found after re-scan.',
'default': True,
},
{
'label': 'Scan at startup',
'name': 'startup_scan',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Do a quick scan on startup. On slow systems better disable this.',
},
],
},
],

View File

@@ -26,7 +26,8 @@ class Manage(Plugin):
addEvent('manage.diskspace', self.getDiskSpace)
# Add files after renaming
def after_rename(message = None, group = {}):
def after_rename(message = None, group = None):
if not group: group = {}
return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'])
addEvent('renamer.after', after_rename, priority = 110)
@@ -44,7 +45,7 @@ class Manage(Plugin):
}"""},
})
if not Env.get('dev'):
if not Env.get('dev') and self.conf('startup_scan'):
addEvent('app.load', self.updateLibraryQuick)
def getProgress(self, **kwargs):
@@ -168,7 +169,9 @@ class Manage(Plugin):
fireEvent('notify.frontend', type = 'manage.updating', data = False)
self.in_progress = False
def createAddToLibrary(self, folder, added_identifiers = []):
def createAddToLibrary(self, folder, added_identifiers = None):
if not added_identifiers: added_identifiers = []
def addToLibrary(group, total_found, to_go):
if self.in_progress[folder]['total'] is None:
self.in_progress[folder] = {
@@ -182,9 +185,9 @@ class Manage(Plugin):
# Add it to release and update the info
fireEvent('release.add', group = group)
fireEventAsync('library.update', identifier = identifier, on_complete = self.createAfterUpdate(folder, identifier))
fireEventAsync('library.update.movie', identifier = identifier, on_complete = self.createAfterUpdate(folder, identifier))
else:
self.in_progress[folder]['to_go'] = self.in_progress[folder]['to_go'] - 1
self.in_progress[folder]['to_go'] -= 1
return addToLibrary
@@ -192,7 +195,10 @@ class Manage(Plugin):
# Notify frontend
def afterUpdate():
self.in_progress[folder]['to_go'] = self.in_progress[folder]['to_go'] - 1
if not self.in_progress or self.shuttingDown():
return
self.in_progress[folder]['to_go'] -= 1
total = self.in_progress[folder]['total']
movie_dict = fireEvent('movie.get', identifier, single = True)

View File

@@ -1,6 +0,0 @@
from .main import MoviePlugin
def start():
return MoviePlugin()
config = []

View File

@@ -5,6 +5,7 @@ from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Profile, ProfileType, Movie
from sqlalchemy.orm import joinedload_all
log = CPLog(__name__)
@@ -55,7 +56,9 @@ class ProfilePlugin(Plugin):
def all(self):
db = get_session()
profiles = db.query(Profile).all()
profiles = db.query(Profile) \
.options(joinedload_all('types')) \
.all()
temp = []
for profile in profiles:
@@ -104,7 +107,9 @@ class ProfilePlugin(Plugin):
def default(self):
db = get_session()
default = db.query(Profile).first()
default = db.query(Profile) \
.options(joinedload_all('types')) \
.first()
default_dict = default.to_dict(self.to_dict)
db.expire_all()
@@ -155,7 +160,7 @@ class ProfilePlugin(Plugin):
def fill(self):
db = get_session();
db = get_session()
profiles = [{
'label': 'Best',

View File

@@ -19,10 +19,10 @@ class QualityPlugin(Plugin):
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': '1080p', 'hd': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']},
{'identifier': '720p', 'hd': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':['avi']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':['avi'], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg'], 'tags': ['webrip', ('web', 'rip')]},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
@@ -102,7 +102,7 @@ class QualityPlugin(Plugin):
def fill(self):
db = get_session();
db = get_session()
order = 0
for q in self.qualities:
@@ -152,45 +152,61 @@ class QualityPlugin(Plugin):
return True
def guess(self, files, extra = {}):
def guess(self, files, extra = None):
if not extra: extra = {}
# Create hash for cache
hash = md5(str([f.replace('.' + getExt(f), '') for f in files]))
cached = self.getCache(hash)
if cached and extra is {}: return cached
cache_key = md5(str([f.replace('.' + getExt(f), '') for f in files]))
cached = self.getCache(cache_key)
if cached and len(extra) == 0: return cached
qualities = self.all()
for cur_file in files:
words = re.split('\W+', cur_file.lower())
for quality in self.all():
found = {}
for quality in qualities:
contains = self.containsTag(quality, words, cur_file)
if contains:
found[quality['identifier']] = True
# Check tags
for quality in qualities:
# Check identifier
if quality['identifier'] in words:
log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file))
return self.setCache(hash, quality)
if len(found) == 0 or len(found) == 1 and found.get(quality['identifier']):
log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file))
return self.setCache(cache_key, quality)
if list(set(quality.get('alternative', [])) & set(words)):
log.debug('Found %s via alt %s in %s', (quality['identifier'], quality.get('alternative'), cur_file))
return self.setCache(hash, quality)
for tag in quality.get('tags', []):
if isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
if list(set(quality.get('tags', [])) & set(words)):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
# Check alt and tags
contains = self.containsTag(quality, words, cur_file)
if contains:
return self.setCache(cache_key, quality)
# Try again with loose testing
quality = self.guessLoose(hash, files = files, extra = extra)
quality = self.guessLoose(cache_key, files = files, extra = extra)
if quality:
return self.setCache(hash, quality)
return self.setCache(cache_key, quality)
log.debug('Could not identify quality for: %s', files)
return None
def guessLoose(self, hash, files = None, extra = None):
def containsTag(self, quality, words, cur_file = ''):
# Check alt and tags
for tag_type in ['alternative', 'tags']:
for alt in quality.get(tag_type, []):
if isinstance(alt, tuple) and '.'.join(alt) in '.'.join(words):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True
if list(set(quality.get(tag_type, [])) & set(words)):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True
return
def guessLoose(self, cache_key, files = None, extra = None):
if extra:
for quality in self.all():
@@ -198,15 +214,15 @@ class QualityPlugin(Plugin):
# Check width resolution, range 20
if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0)))
return self.setCache(hash, quality)
return self.setCache(cache_key, quality)
# Check height resolution, range 20
if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0)))
return self.setCache(hash, quality)
return self.setCache(cache_key, quality)
if 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Found as dvdrip')
return self.setCache(hash, self.single('dvdrip'))
return self.setCache(cache_key, self.single('dvdrip'))
return None

View File

@@ -41,7 +41,8 @@ var QualityBase = new Class({
self.settings.addEvent('create', function(){
var tab = self.settings.createSubTab('profile', {
'label': 'Quality',
'name': 'profile'
'name': 'profile',
'subtab_label': 'Qualities'
}, self.settings.tabs.searcher ,'searcher');
self.tab = tab.tab;
@@ -102,7 +103,8 @@ var QualityBase = new Class({
var profile_list;
var group = self.settings.createGroup({
'label': 'Profile Defaults'
'label': 'Profile Defaults',
'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)'
}).adopt(
new Element('.ctrlHolder#profile_ordering').adopt(
new Element('label[text=Order]'),

View File

@@ -6,8 +6,10 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.scanner.main import Scanner
from couchpotato.core.settings.model import File, Release as Relea, Movie
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import and_, or_
import os
import traceback
log = CPLog(__name__)
@@ -35,6 +37,12 @@ class Release(Plugin):
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
}
})
addApiView('release.for_movie', self.forMovie, docs = {
'desc': 'Returns all releases for a movie. Ordered by score(desc)',
'params': {
'id': {'type': 'id', 'desc': 'ID of the movie'}
}
})
addEvent('release.delete', self.delete)
addEvent('release.clean', self.clean)
@@ -88,8 +96,8 @@ class Release(Plugin):
added_files = db.query(File).filter(or_(*[File.id == x for x in added_files])).all()
rel.files.extend(added_files)
db.commit()
except Exception, e:
log.debug('Failed to attach "%s" to release: %s', (cur_file, e))
except:
log.debug('Failed to attach "%s" to release: %s', (added_files, traceback.format_exc()))
fireEvent('movie.restatus', movie.id)
@@ -174,7 +182,7 @@ class Release(Plugin):
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
if item['type'] != 'torrent_magnet':
if item.get('protocol', item.get('type')) != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
success = fireEvent('searcher.download', data = item, movie = rel.movie.to_dict({
@@ -203,3 +211,22 @@ class Release(Plugin):
return {
'success': False
}
def forMovie(self, id = None, **kwargs):
db = get_session()
releases_raw = db.query(Relea) \
.options(joinedload_all('info')) \
.options(joinedload_all('files')) \
.filter(Relea.movie_id == id) \
.all()
releases = [r.to_dict({'info':{}, 'files':{}}) for r in releases_raw]
releases = sorted(releases, key = lambda k: k['info'].get('score', 0), reverse = True)
return {
'releases': releases,
'success': True
}

15
couchpotato/core/plugins/renamer/__init__.py Normal file → Executable file
View File

@@ -27,6 +27,7 @@ rename_options = {
'imdb_id': 'IMDB id (tt0123456)',
'cd': 'CD number (cd1)',
'cd_nr': 'Just the cd nr. (1)',
'mpaa': 'MPAA Rating',
},
}
@@ -54,7 +55,7 @@ config = [{
{
'name': 'to',
'type': 'directory',
'description': 'Folder where the movies should be moved to.',
'description': 'Default folder where the movies are moved to.',
},
{
'name': 'folder_name',
@@ -72,6 +73,12 @@ config = [{
'type': 'choice',
'options': rename_options
},
{
'name': 'unrar',
'type': 'bool',
'description': 'Extract rar files if found.',
'default': False,
},
{
'name': 'cleanup',
'type': 'bool',
@@ -119,10 +126,10 @@ config = [{
{
'name': 'file_action',
'label': 'Torrent File Action',
'default': 'move',
'default': 'link',
'type': 'dropdown',
'values': [('Move', 'move'), ('Copy', 'copy'), ('Hard link', 'hardlink'), ('Sym link', 'symlink'), ('Move & Sym link', 'move_symlink')],
'description': 'Define which kind of file operation you want to use for torrents. Before you start using <a href="http://en.wikipedia.org/wiki/Hard_link">hard links</a> or <a href="http://en.wikipedia.org/wiki/Sym_link">sym links</a>, PLEASE read about their possible drawbacks.',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': '<strong>Link</strong> or <strong>Copy</strong> after downloading completed (and allow for seeding), or <strong>Move</strong> after seeding completed. Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy.',
'advanced': True,
},
{

View File

@@ -9,7 +9,9 @@ from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library, File, Profile, Release, \
ReleaseInfo
from couchpotato.environment import Env
from unrar2 import RarFile
import errno
import fnmatch
import os
import re
import shutil
@@ -38,7 +40,6 @@ class Renamer(Plugin):
addEvent('renamer.check_snatched', self.checkSnatched)
addEvent('app.load', self.scan)
addEvent('app.load', self.checkSnatched)
addEvent('app.load', self.setCrons)
# Enable / disable interval
@@ -60,23 +61,24 @@ class Renamer(Plugin):
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', None))
movie_folder = kwargs.get('movie_folder', None)
downloader = kwargs.get('downloader', None)
download_id = kwargs.get('download_id', None)
async = tryInt(kwargs.get('async', 0))
movie_folder = kwargs.get('movie_folder')
downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id')
download_info = {'folder': movie_folder} if movie_folder else None
if download_info:
download_info.update({'id': download_id, 'downloader': downloader} if download_id else {})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan',
movie_folder = movie_folder,
download_info = {'id': download_id, 'downloader': downloader} if download_id else None
)
fire_handle('renamer.scan', download_info)
return {
'success': True
}
def scan(self, movie_folder = None, download_info = None):
def scan(self, download_info = None):
if self.isDisabled():
return
@@ -85,6 +87,8 @@ class Renamer(Plugin):
log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return
movie_folder = download_info and download_info.get('folder')
# Check to see if the "to" folder is inside the "from" folder.
if movie_folder and not os.path.isdir(movie_folder) or not os.path.isdir(self.conf('from')) or not os.path.isdir(self.conf('to')):
l = log.debug if movie_folder else log.error
@@ -93,10 +97,14 @@ class Renamer(Plugin):
elif self.conf('from') in self.conf('to'):
log.error('The "to" can\'t be inside of the "from" folder. You\'ll get an infinite loop.')
return
elif (movie_folder and movie_folder in [self.conf('to'), self.conf('from')]):
elif movie_folder and movie_folder in [self.conf('to'), self.conf('from')]:
log.error('The "to" and "from" folders can\'t be inside of or the same as the provided movie folder.')
return
# Make sure a checkSnatched marked all downloads/seeds as such
if not download_info and self.conf('run_every') > 0:
fireEvent('renamer.check_snatched')
self.renaming_started = True
# make sure the movie folder name is included in the search
@@ -119,10 +127,15 @@ class Renamer(Plugin):
# Extend the download info with info stored in the downloaded release
download_info = self.extendDownloadInfo(download_info)
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files,
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(download_info))
groups = fireEvent('scanner.scan', folder = folder if folder else self.conf('from'),
files = files, download_info = download_info, return_ignored = False, single = True)
destination = self.conf('to')
folder_name = self.conf('folder_name')
file_name = self.conf('file_name')
trailer_name = self.conf('trailer_name')
@@ -148,17 +161,35 @@ class Renamer(Plugin):
continue
# Rename the files using the library data
else:
group['library'] = fireEvent('library.update', identifier = group['library']['identifier'], single = True)
group['library'] = fireEvent('library.update.movie', identifier = group['library']['identifier'], single = True)
if not group['library']:
log.error('Could not rename, no library item to work with: %s', group_identifier)
continue
library = group['library']
library_ent = db.query(Library).filter_by(identifier = group['library']['identifier']).first()
movie_title = getTitle(library)
# Overwrite destination when set in category
destination = self.conf('to')
for movie in library_ent.movies:
if movie.category and movie.category.destination and len(movie.category.destination) > 0:
destination = movie.category.destination
log.debug('Setting category destination for "%s": %s' % (movie_title, destination))
else:
log.debug('No category destination found for "%s"' % movie_title)
break
# Find subtitle for renaming
group['before_rename'] = []
fireEvent('renamer.before', group)
# Add extracted files to the before_rename list
if extr_files:
group['before_rename'].extend(extr_files)
# Remove weird chars from moviename
movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', movie_title)
@@ -185,6 +216,7 @@ class Renamer(Plugin):
'imdb_id': library['identifier'],
'cd': '',
'cd_nr': '',
'mpaa': library['info'].get('mpaa', ''),
}
for file_type in group['files']:
@@ -192,8 +224,8 @@ class Renamer(Plugin):
# Move nfo depending on settings
if file_type is 'nfo' and not self.conf('rename_nfo'):
log.debug('Skipping, renaming of %s disabled', file_type)
if self.conf('cleanup'):
for current_file in group['files'][file_type]:
for current_file in group['files'][file_type]:
if self.conf('cleanup') and (not self.downloadIsTorrent(download_info) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
continue
@@ -307,19 +339,18 @@ class Renamer(Plugin):
cd += 1
# Before renaming, remove the lower quality files
library = db.query(Library).filter_by(identifier = group['library']['identifier']).first()
remove_leftovers = True
# Add it to the wanted list before we continue
if len(library.movies) == 0:
if len(library_ent.movies) == 0:
profile = db.query(Profile).filter_by(core = True, label = group['meta_data']['quality']['label']).first()
fireEvent('movie.add', params = {'identifier': group['library']['identifier'], 'profile_id': profile.id}, search_after = False)
db.expire_all()
library = db.query(Library).filter_by(identifier = group['library']['identifier']).first()
library_ent = db.query(Library).filter_by(identifier = group['library']['identifier']).first()
for movie in library.movies:
for movie in library_ent.movies:
# Mark movie "done" onces it found the quality with the finish check
# Mark movie "done" once it's found the quality with the finish check
try:
if movie.status_id == active_status.get('id') and movie.profile:
for profile_type in movie.profile.types:
@@ -357,7 +388,7 @@ class Renamer(Plugin):
self.tagDir(group, 'exists')
# Notify on rename fail
download_message = 'Renaming of %s (%s) canceled, exists in %s already.' % (movie.library.titles[0].title, group['meta_data']['quality']['label'], release.quality.label)
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (movie.library.titles[0].title, group['meta_data']['quality']['label'], release.quality.label)
fireEvent('movie.renaming.canceled', message = download_message, data = group)
remove_leftovers = False
@@ -374,14 +405,15 @@ class Renamer(Plugin):
db.commit()
# Remove leftover files
if self.conf('cleanup') and not self.conf('move_leftover') and remove_leftovers and \
not (self.conf('file_action') != 'move' and self.downloadIsTorrent(download_info)):
log.debug('Removing leftover files')
for current_file in group['files']['leftover']:
remove_files.append(current_file)
elif not remove_leftovers: # Don't remove anything
if not remove_leftovers: # Don't remove anything
break
log.debug('Removing leftover files')
for current_file in group['files']['leftover']:
if self.conf('cleanup') and not self.conf('move_leftover') and \
(not self.downloadIsTorrent(download_info) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
# Remove files
delete_folders = []
for src in remove_files:
@@ -425,14 +457,15 @@ class Renamer(Plugin):
self.makeDir(os.path.dirname(dst))
try:
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(download_info))
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(download_info) or self.fileIsAdded(src, group))
group['renamed_files'].append(dst)
except:
log.error('Failed moving the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
self.tagDir(group, 'failed_rename')
if self.conf('file_action') != 'move' and self.downloadIsTorrent(download_info):
self.tagDir(group, 'renamed already')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(movie_folder) and self.downloadIsTorrent(download_info):
self.tagDir(group, 'renamed_already')
# Remove matching releases
for release in remove_releases:
@@ -442,7 +475,7 @@ class Renamer(Plugin):
except:
log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc()))
if group['dirname'] and group['parentdir']:
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(download_info):
try:
log.info('Deleting folder: %s', group['parentdir'])
self.deleteEmptyFolder(group['parentdir'])
@@ -462,7 +495,9 @@ class Renamer(Plugin):
self.renaming_started = False
def getRenameExtras(self, extra_type = '', replacements = {}, folder_name = '', file_name = '', destination = '', group = {}, current_file = '', remove_multiple = False):
def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False):
if not group: group = {}
if not replacements: replacements = {}
replacements = replacements.copy()
rename_files = {}
@@ -483,9 +518,15 @@ class Renamer(Plugin):
def tagDir(self, group, tag):
ignore_file = None
for movie_file in sorted(list(group['files']['movie'])):
ignore_file = '%s.ignore' % os.path.splitext(movie_file)[0]
break
if isinstance(group, dict):
for movie_file in sorted(list(group['files']['movie'])):
ignore_file = '%s.%s.ignore' % (os.path.splitext(movie_file)[0], tag)
break
else:
if not os.path.isdir(group) or not tag:
return
ignore_file = os.path.join(group, '%s.ignore' % tag)
text = """This file is from CouchPotato
It has marked this release as "%s"
@@ -496,21 +537,48 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if ignore_file:
self.createFile(ignore_file, text)
def untagDir(self, folder, tag = ''):
if not os.path.isdir(folder):
return
# Remove any .ignore files
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, '*%s.ignore' % tag):
os.remove((os.path.join(root, filename)))
def hastagDir(self, folder, tag = ''):
if not os.path.isdir(folder):
return False
# Find any .ignore files
for root, dirnames, filenames in os.walk(folder):
if fnmatch.filter(filenames, '*%s.ignore' % tag):
return True
return False
def moveFile(self, old, dest, forcemove = False):
dest = ss(dest)
try:
if forcemove:
shutil.move(old, dest)
elif self.conf('file_action') == 'hardlink':
link(old, dest)
elif self.conf('file_action') == 'symlink':
symlink(old, dest)
elif self.conf('file_action') == 'copy':
shutil.copy(old, dest)
elif self.conf('file_action') == 'move_symlink':
shutil.move(old, dest)
symlink(dest, old)
elif self.conf('file_action') == 'link':
# First try to hardlink
try:
log.debug('Hardlinking file "%s" to "%s"...', (old, dest))
link(old, dest)
except:
# Try to simlink next
log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s. ', (old, dest, traceback.format_exc()))
shutil.copy(old, dest)
try:
symlink(dest, old + '.link')
os.unlink(old)
os.rename(old + '.link', old)
except:
log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc()))
else:
shutil.move(old, dest)
@@ -536,9 +604,9 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return True
def doReplace(self, string, replacements, remove_multiple = False):
'''
"""
replace confignames with the real thing
'''
"""
replacements = replacements.copy()
if remove_multiple:
@@ -584,19 +652,21 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if self.checking_snatched:
log.debug('Already checking snatched')
return False
self.checking_snatched = True
snatched_status, ignored_status, failed_status, done_status = \
fireEvent('status.get', ['snatched', 'ignored', 'failed', 'done'], single = True)
snatched_status, ignored_status, failed_status, done_status, seeding_status, downloaded_status = \
fireEvent('status.get', ['snatched', 'ignored', 'failed', 'done', 'seeding', 'downloaded'], single = True)
db = get_session()
rels = db.query(Release).filter_by(status_id = snatched_status.get('id')).all()
rels.extend(db.query(Release).filter_by(status_id = seeding_status.get('id')).all())
scan_items = []
scan_required = False
if rels:
self.checking_snatched = True
log.debug('Checking status snatched releases...')
statuses = fireEvent('download.status', merge = True)
@@ -608,17 +678,6 @@ Remove it if you want it to be renamed (again, or at least let it try again)
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
# Get current selected title
default_title = getTitle(rel.movie.library)
# Check if movie has already completed and is manage tab (legacy db correction)
if rel.movie.status_id == done_status.get('id'):
log.debug('Found a completed movie with a snatched release : %s. Setting release status to ignored...' , default_title)
rel.status_id = ignored_status.get('id')
rel.last_edit = int(time.time())
db.commit()
continue
movie_dict = fireEvent('movie.get', rel.movie_id, single = True)
# check status
@@ -640,7 +699,34 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.debug('Found %s: %s, time to go: %s', (item['name'], item['status'].upper(), timeleft))
if item['status'] == 'busy':
pass
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if item['folder'] and self.conf('from') in item['folder']:
self.tagDir(item['folder'], 'downloading')
elif item['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.movie.status_id == done_status.get('id') and self.statusInfoComplete(item):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (item['name'], item['seed_ratio']))
# Remove the downloading tag
self.untagDir(item['folder'], 'downloading')
rel.status_id = seeding_status.get('id')
rel.last_edit = int(time.time())
db.commit()
# Scan and set the torrent to paused if required
item.update({'pause': True, 'scan': True, 'process_complete': False})
scan_items.append(item)
else:
if rel.status_id != seeding_status.get('id'):
rel.status_id = seeding_status.get('id')
rel.last_edit = int(time.time())
db.commit()
#let it seed
log.debug('%s is seeding with ratio: %s', (item['name'], item['seed_ratio']))
elif item['status'] == 'failed':
fireEvent('download.remove_failed', item, single = True)
rel.status_id = failed_status.get('id')
@@ -648,11 +734,39 @@ Remove it if you want it to be renamed (again, or at least let it try again)
db.commit()
if self.conf('next_on_failed'):
fireEvent('searcher.try_next_release', movie_id = rel.movie_id)
fireEvent('movie.searcher.try_next_release', movie_id = rel.movie_id)
elif item['status'] == 'completed':
log.info('Download of %s completed!', item['name'])
if item['id'] and item['downloader'] and item['folder']:
fireEventAsync('renamer.scan', movie_folder = item['folder'], download_info = item)
if self.statusInfoComplete(item):
# If the release has been seeding, process now the seeding is done
if rel.status_id == seeding_status.get('id'):
if rel.movie.status_id == done_status.get('id'):
# Set the release to done as the movie has already been renamed
rel.status_id = downloaded_status.get('id')
rel.last_edit = int(time.time())
db.commit()
# Allow the downloader to clean-up
item.update({'pause': False, 'scan': False, 'process_complete': True})
scan_items.append(item)
else:
# Set the release to snatched so that the renamer can process the release as if it was never seeding
rel.status_id = snatched_status.get('id')
rel.last_edit = int(time.time())
db.commit()
# Scan and Allow the downloader to clean-up
item.update({'pause': False, 'scan': True, 'process_complete': True})
scan_items.append(item)
else:
# Remove the downloading tag
self.untagDir(item['folder'], 'downloading')
# Scan and Allow the downloader to clean-up
item.update({'pause': False, 'scan': True, 'process_complete': True})
scan_items.append(item)
else:
scan_required = True
@@ -665,6 +779,23 @@ Remove it if you want it to be renamed (again, or at least let it try again)
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go
for item in scan_items:
# Ask the renamer to scan the item
if item['scan']:
if item['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', item = item, pause = True, single = True)
fireEvent('renamer.scan', download_info = item)
if item['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', item = item, pause = False, single = True)
if item['process_complete']:
#First make sure the files were succesfully processed
if not self.hastagDir(item['folder'], 'failed_rename'):
# Remove the seeding tag if it exists
self.untagDir(item['folder'], 'renamed_already')
# Ask the downloader to process the item
fireEvent('download.process_complete', item = item, single = True)
if scan_required:
fireEvent('renamer.scan')
@@ -699,10 +830,146 @@ Remove it if you want it to be renamed (again, or at least let it try again)
download_info.update({
'imdb_id': rls.movie.library.identifier,
'quality': rls.quality.identifier,
'type': rls_dict.get('info', {}).get('type')
'protocol': rls_dict.get('info', {}).get('protocol') or rls_dict.get('info', {}).get('type'),
})
return download_info
def downloadIsTorrent(self, download_info):
return download_info and download_info.get('type') in ['torrent', 'torrent_magnet']
return download_info and download_info.get('protocol') in ['torrent', 'torrent_magnet']
def fileIsAdded(self, src, group):
if not group or not group.get('before_rename'):
return False
return src in group['before_rename']
def statusInfoComplete(self, item):
return item['id'] and item['downloader'] and item['folder']
def movieInFromFolder(self, movie_folder):
return movie_folder and self.conf('from') in movie_folder or not movie_folder
def extractFiles(self, folder = None, movie_folder = None, files = None, cleanup = False):
if not files: files = []
# RegEx for finding rar files
archive_regex = '(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))'
extr_files = []
# Check input variables
if not folder:
folder = self.conf('from')
check_file_date = True
if movie_folder:
check_file_date = False
if not files:
for root, folders, names in os.walk(folder):
files.extend([os.path.join(root, name) for name in names])
# Find all archive files
archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)]
#Extract all found archives
for archive in archives:
# Check if it has already been processed by CPS
if self.hastagDir(os.path.dirname(archive['file'])):
continue
# Find all related archive files
archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)]
archive['files'].append(archive['file'])
# Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute
if check_file_date:
file_too_new = False
for cur_file in archive['files']:
if not os.path.isfile(cur_file):
file_too_new = time.time()
break
file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)]
for t in file_time:
if t > time.time() - 60:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file'])))
continue
log.info('Archive %s found. Extracting...', os.path.basename(archive['file']))
try:
rar_handle = RarFile(archive['file'])
extr_path = os.path.join(self.conf('from'), os.path.relpath(os.path.dirname(archive['file']), folder))
self.makeDir(extr_path)
for packedinfo in rar_handle.infolist():
if not packedinfo.isdir and not os.path.isfile(os.path.join(extr_path, os.path.basename(packedinfo.filename))):
log.debug('Extracting %s...', packedinfo.filename)
rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False)
extr_files.append(os.path.join(extr_path, os.path.basename(packedinfo.filename)))
del rar_handle
except Exception, e:
log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc()))
continue
# Delete the archive files
for filename in archive['files']:
if cleanup:
try:
os.remove(filename)
except Exception, e:
log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc()))
continue
files.remove(filename)
# Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided)
if extr_files and os.path.normpath(os.path.normcase(folder)) != os.path.normpath(os.path.normcase(self.conf('from'))):
for leftoverfile in list(files):
move_to = os.path.join(self.conf('from'), os.path.relpath(leftoverfile, folder))
try:
self.makeDir(os.path.dirname(move_to))
self.moveFile(leftoverfile, move_to, cleanup)
except Exception, e:
log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc()))
# As we probably tried to overwrite the nfo file, check if it exists and then remove the original
if os.path.isfile(move_to):
if cleanup:
log.info('Deleting left over file %s instead...', leftoverfile)
os.unlink(leftoverfile)
else:
continue
files.remove(leftoverfile)
extr_files.append(move_to)
if cleanup:
# Remove all left over folders
log.debug('Removing old movie folder %s...', movie_folder)
self.deleteEmptyFolder(movie_folder)
movie_folder = os.path.join(self.conf('from'), os.path.relpath(movie_folder, folder))
folder = self.conf('from')
if extr_files:
files.extend(extr_files)
# Cleanup files and folder if movie_folder was not provided
if not movie_folder:
files = []
folder = None
return folder, movie_folder, files, extr_files

View File

@@ -120,13 +120,17 @@ class Scanner(Plugin):
files = []
for root, dirs, walk_files in os.walk(folder):
files.extend(os.path.join(root, filename) for filename in walk_files)
# Break if CP wants to shut down
if self.shuttingDown():
break
except:
log.error('Failed getting files from %s: %s', (folder, traceback.format_exc()))
else:
check_file_date = False
files = [ss(x) for x in files]
db = get_session()
for file_path in files:
@@ -225,6 +229,10 @@ class Scanner(Plugin):
# Remove the found files from the leftover stack
leftovers = leftovers - set(found_files)
exts = [getExt(ff) for ff in found_files]
if 'ignore' in exts:
ignored_identifiers.append(identifier)
# Break if CP wants to shut down
if self.shuttingDown():
break
@@ -251,6 +259,10 @@ class Scanner(Plugin):
# Remove the found files from the leftover stack
leftovers = leftovers - set([ff])
ext = getExt(ff)
if ext == 'ignore':
ignored_identifiers.append(new_identifier)
# Break if CP wants to shut down
if self.shuttingDown():
break
@@ -269,7 +281,7 @@ class Scanner(Plugin):
except:
break
# Check if movie is fresh and maybe still unpacking, ignore files new then 1 minute
# Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute
file_too_new = False
for cur_file in group['unsorted_files']:
if not os.path.isfile(cur_file):
@@ -321,14 +333,18 @@ class Scanner(Plugin):
del movie_files
total_found = len(valid_files)
# Make sure only one movie was found if a download ID is provided
if download_info and not len(valid_files) == 1:
if download_info and total_found == 0:
log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', download_info.get('imdb_id'))
elif download_info and total_found > 1:
log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (download_info.get('imdb_id'), len(valid_files)))
download_info = None
# Determine file types
db = get_session()
processed_movies = {}
total_found = len(valid_files)
while True and not self.shuttingDown():
try:
identifier, group = valid_files.popitem()
@@ -413,7 +429,7 @@ class Scanner(Plugin):
if len(processed_movies) > 0:
log.info('Found %s movies in the folder %s', (len(processed_movies), folder))
else:
log.debug('Found no movies in the folder %s', (folder))
log.debug('Found no movies in the folder %s', folder)
return processed_movies
@@ -492,6 +508,7 @@ class Scanner(Plugin):
detected_languages = {}
# Subliminal scanner
paths = None
try:
paths = group['files']['movie']
scan_result = []
@@ -544,12 +561,14 @@ class Scanner(Plugin):
break
# Check and see if nfo contains the imdb-id
nfo_file = None
if not imdb_id:
try:
for nfo_file in files['nfo']:
for nf in files['nfo']:
imdb_id = getImdb(nfo_file)
if imdb_id:
log.debug('Found movie via nfo file: %s', nfo_file)
log.debug('Found movie via nfo file: %s', nf)
nfo_file = nf
break
except:
pass
@@ -569,26 +588,16 @@ class Scanner(Plugin):
# Check if path is already in db
if not imdb_id:
db = get_session()
for cur_file in files['movie']:
f = db.query(File).filter_by(path = toUnicode(cur_file)).first()
for cf in files['movie']:
f = db.query(File).filter_by(path = toUnicode(cf)).first()
try:
imdb_id = f.library[0].identifier
log.debug('Found movie via database: %s', cur_file)
log.debug('Found movie via database: %s', cf)
cur_file = cf
break
except:
pass
# Search based on OpenSubtitleHash
if not imdb_id and not group['is_dvd']:
for cur_file in files['movie']:
movie = fireEvent('movie.by_hash', file = cur_file, merge = True)
if len(movie) > 0:
imdb_id = movie[0].get('imdb')
if imdb_id:
log.debug('Found movie via OpenSubtitleHash: %s', cur_file)
break
# Search based on identifiers
if not imdb_id:
for identifier in group['identifiers']:
@@ -609,7 +618,7 @@ class Scanner(Plugin):
log.debug('Identifier to short to use for search: %s', identifier)
if imdb_id:
return fireEvent('library.add', attrs = {
return fireEvent('library.add.movie', attrs = {
'identifier': imdb_id
}, update_after = False, single = True)
@@ -675,10 +684,9 @@ class Scanner(Plugin):
return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn']
files = set(filter(test, files))
images = {}
# Fanart
images['backdrop'] = set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files))
images = {
'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files))
}
# Rest
images['rest'] = files - images['backdrop']
@@ -750,7 +758,7 @@ class Scanner(Plugin):
# Year
year = self.findYear(identifier)
if year:
if year and identifier[:4] != year:
identifier = '%s %s' % (identifier.split(year)[0].strip(), year)
else:
identifier = identifier.split('::')[0]

View File

@@ -1,11 +1,12 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.helpers.variable import getTitle, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.score.scores import nameScore, nameRatioScore, \
sizeScore, providerScore, duplicateScore, partialIgnoredScore, namePositionScore, \
halfMultipartScore
from couchpotato.environment import Env
log = CPLog(__name__)
@@ -16,9 +17,14 @@ class Score(Plugin):
addEvent('score.calculate', self.calculate)
def calculate(self, nzb, movie):
''' Calculate the score of a NZB, used for sorting later '''
""" Calculate the score of a NZB, used for sorting later """
score = nameScore(toUnicode(nzb['name'] + ' ' + nzb.get('name_extra', '')), movie['library']['year'])
# Merge global and category
preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower())
try: preferred_words = list(set(preferred_words + splitString(movie['category']['preferred'].lower())))
except: pass
score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words)
for movie_title in movie['library']['titles']:
score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title']))
@@ -40,8 +46,13 @@ class Score(Plugin):
# Duplicates in name
score += duplicateScore(nzb['name'], getTitle(movie['library']))
# Merge global and category
ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower())
try: ignored_words = list(set(ignored_words + splitString(movie['category']['ignored'].lower())))
except: pass
# Partial ignored words
score += partialIgnoredScore(nzb['name'], getTitle(movie['library']))
score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words)
# Ignore single downloads from multipart
score += halfMultipartScore(nzb['name'])

View File

@@ -1,6 +1,6 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.helpers.variable import tryInt
from couchpotato.environment import Env
import re
@@ -23,8 +23,8 @@ name_scores = [
]
def nameScore(name, year):
''' Calculate score for words in the NZB name '''
def nameScore(name, year, preferred_words):
""" Calculate score for words in the NZB name """
score = 0
name = name.lower()
@@ -34,20 +34,18 @@ def nameScore(name, year):
v = value.split(':')
add = int(v.pop())
if v.pop() in name:
score = score + add
score += add
# points if the year is correct
if str(year) in name:
score = score + 5
score += 5
# Contains preferred word
nzb_words = re.split('\W+', simplifyString(name))
preferred_words = splitString(Env.setting('preferred_words', section = 'searcher'))
score += 100 * len(list(set(nzb_words) & set(preferred_words)))
return score
def nameRatioScore(nzb_name, movie_name):
nzb_words = re.split('\W+', fireEvent('scanner.create_file_identifier', nzb_name, single = True))
movie_words = re.split('\W+', simplifyString(movie_name))
@@ -70,9 +68,12 @@ def namePositionScore(nzb_name, movie_name):
name_year = fireEvent('scanner.name_year', nzb_name, single = True)
# Give points for movies beginning with the correct name
name_split = simplifyString(nzb_name).split(simplifyString(movie_name))
if name_split[0].strip() == '':
score += 10
split_by = simplifyString(movie_name)
name_split = []
if len(split_by) > 0:
name_split = simplifyString(nzb_name).split(split_by)
if name_split[0].strip() == '':
score += 10
# If year is second in line, give more points
if len(name_split) > 1 and name_year:
@@ -134,13 +135,11 @@ def duplicateScore(nzb_name, movie_name):
return len(list(set(duplicates) - set(movie_words))) * -4
def partialIgnoredScore(nzb_name, movie_name):
def partialIgnoredScore(nzb_name, movie_name, ignored_words):
nzb_name = nzb_name.lower()
movie_name = movie_name.lower()
ignored_words = [x.strip().lower() for x in Env.setting('ignored_words', section = 'searcher').split(',')]
score = 0
for ignored_word in ignored_words:
if ignored_word in nzb_name and ignored_word not in movie_name:
@@ -148,6 +147,7 @@ def partialIgnoredScore(nzb_name, movie_name):
return score
def halfMultipartScore(nzb_name):
wrong_found = 0

View File

@@ -1,113 +0,0 @@
from .main import Searcher
import random
def start():
return Searcher()
config = [{
'name': 'searcher',
'order': 20,
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'label': 'Search',
'description': 'Options for the searchers',
'options': [
{
'name': 'preferred_words',
'label': 'Preferred words',
'default': '',
'description': 'These words will give the releases a higher score.'
},
{
'name': 'required_words',
'label': 'Required words',
'default': '',
'placeholder': 'Example: DTS, AC3 & English',
'description': 'A release should contain at least one set of words. Sets are separated by "," and each word within a set must be separated with "&"'
},
{
'name': 'ignored_words',
'label': 'Ignored words',
'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs',
'description': 'Ignores releases that match any of these sets. (Works like explained above)'
},
{
'name': 'preferred_method',
'label': 'First search',
'description': 'Which of the methods do you prefer',
'default': 'both',
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrents', 'torrent')],
},
{
'name': 'always_search',
'default': False,
'advanced': True,
'type': 'bool',
'label': 'Always search',
'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.',
},
],
}, {
'tab': 'searcher',
'name': 'cronjob',
'label': 'Cronjob',
'advanced': True,
'description': 'Cron settings for the searcher see: <a href="http://packages.python.org/APScheduler/cronschedule.html">APScheduler</a> for details.',
'options': [
{
'name': 'run_on_launch',
'label': 'Run on launch',
'advanced': True,
'default': 0,
'type': 'bool',
'description': 'Force run the searcher after (re)start.',
},
{
'name': 'cron_day',
'label': 'Day',
'advanced': True,
'default': '*',
'type': 'string',
'description': '<strong>*</strong>: Every day, <strong>*/2</strong>: Every 2 days, <strong>1</strong>: Every first of the month.',
},
{
'name': 'cron_hour',
'label': 'Hour',
'advanced': True,
'default': random.randint(0, 23),
'type': 'string',
'description': '<strong>*</strong>: Every hour, <strong>*/8</strong>: Every 8 hours, <strong>3</strong>: At 3, midnight.',
},
{
'name': 'cron_minute',
'label': 'Minute',
'advanced': True,
'default': random.randint(0, 59),
'type': 'string',
'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour."
},
],
},
],
}, {
'name': 'nzb',
'groups': [
{
'tab': 'searcher',
'name': 'nzb',
'label': 'NZB',
'wizard': True,
'options': [
{
'name': 'retention',
'default': 1000,
'type': 'int',
'unit': 'days'
},
],
},
],
}]

View File

@@ -23,6 +23,7 @@ class StatusPlugin(Plugin):
'ignored': 'Ignored',
'available': 'Available',
'suggest': 'Suggest',
'seeding': 'Seeding',
}
status_cached = {}
@@ -74,7 +75,7 @@ class StatusPlugin(Plugin):
def get(self, identifiers):
if not isinstance(identifiers, (list)):
if not isinstance(identifiers, list):
identifiers = [identifiers]
db = get_session()

View File

@@ -36,13 +36,12 @@ class Subtitle(Plugin):
files = []
for file in release.files.filter(FileType.status.has(identifier = 'movie')).all():
files.append(file.path);
files.append(file.path)
# get subtitles for those files
subliminal.list_subtitles(files, cache_dir = Env.get('cache_dir'), multi = True, languages = self.getLanguages(), services = self.services)
def searchSingle(self, group):
if self.isDisabled(): return
try:
@@ -60,6 +59,7 @@ class Subtitle(Plugin):
for d_sub in downloaded:
log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files))
group['files']['subtitle'].append(d_sub.path)
group['before_rename'].append(d_sub.path)
group['subtitle_language'][d_sub.path] = [d_sub.language.alpha2]
return True

View File

@@ -1,13 +1,14 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import splitString, md5
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie
from couchpotato.environment import Env
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_
class Suggestion(Plugin):
def __init__(self):
@@ -15,38 +16,40 @@ class Suggestion(Plugin):
addApiView('suggestion.view', self.suggestView)
addApiView('suggestion.ignore', self.ignoreView)
def suggestView(self, **kwargs):
def suggestView(self, limit = 6, **kwargs):
movies = splitString(kwargs.get('movies', ''))
ignored = splitString(kwargs.get('ignored', ''))
limit = kwargs.get('limit', 6)
if not movies or len(movies) == 0:
db = get_session()
active_movies = db.query(Movie) \
.filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
if not ignored or len(ignored) == 0:
ignored = splitString(Env.prop('suggest_ignore', default = ''))
cached_suggestion = self.getCache('suggestion_cached')
if cached_suggestion:
suggestions = cached_suggestion
else:
if not movies or len(movies) == 0:
db = get_session()
active_movies = db.query(Movie) \
.options(joinedload_all('library')) \
.filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
if not ignored or len(ignored) == 0:
ignored = splitString(Env.prop('suggest_ignore', default = ''))
suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
self.setCache(md5(ss('suggestion_cached')), suggestions, timeout = 6048000) # Cache for 10 weeks
self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks
return {
'success': True,
'count': len(suggestions),
'suggestions': suggestions[:limit]
'suggestions': suggestions[:int(limit)]
}
def ignoreView(self, imdb = None, limit = 6, remove_only = False, **kwargs):
ignored = splitString(Env.prop('suggest_ignore', default = ''))
new_suggestions = []
if imdb:
if not remove_only:
ignored.append(imdb)
@@ -86,6 +89,6 @@ class Suggestion(Plugin):
if suggestions:
new_suggestions.extend(suggestions)
self.setCache(md5(ss('suggestion_cached')), new_suggestions, timeout = 6048000)
self.setCache('suggestion_cached', new_suggestions, timeout = 6048000)
return new_suggestions

View File

@@ -16,52 +16,90 @@
width: 50%;
}
}
@media all and (max-width: 600px) {
.suggestions .movie_result {
width: 100%;
}
}
.suggestions .movie_result .data {
left: 100px;
background: #4e5969;
border: none;
}
.suggestions .movie_result .data .info {
top: 15px;
left: 15px;
right: 15px;
bottom: 15px;
overflow: hidden;
}
.suggestions .movie_result .data .info h2 {
white-space: normal;
max-height: 120px;
font-size: 18px;
line-height: 18px;
}
.suggestions .movie_result .data .info .rating,
.suggestions .movie_result .data .info .genres,
.suggestions .movie_result .data .info .year {
position: static;
display: block;
margin: 5px 0 0;
padding: 0;
opacity: .6;
}
.suggestions .movie_result .data .info .year {
margin: 10px 0 0;
}
.suggestions .movie_result .data .info .rating {
font-size: 20px;
float: right;
margin-top: -20px;
}
.suggestions .movie_result .data .info .rating:before {
content: "\e031";
font-family: 'Elusive-Icons';
font-size: 14px;
margin: 0 5px 0 0;
vertical-align: bottom;
}
.suggestions .movie_result .data .info .genres {
font-size: 11px;
font-style: italic;
text-align: right;
}
.suggestions .movie_result .data {
cursor: default;
cursor: default;
}
.suggestions .movie_result .options {
left: 100px;
}
.suggestions .movie_result .options select[name=title] { width: 100%; }
.suggestions .movie_result .options select[name=profile] { width: 100%; }
.suggestions .movie_result .options select[name=category] { width: 100%; }
.suggestions .movie_result .button {
position: absolute;
margin: 2px 0 0 0;
right: 15px;
bottom: 15px;
}
.suggestions .movie_result .thumbnail {
width: 100px;
}
.suggestions .movie_result .actions {
position: absolute;
bottom: 10px;
@@ -75,10 +113,9 @@
.suggestions .movie_result .data.open .actions {
display: none;
}
.suggestions .movie_result .actions a {
margin-left: 10px;
vertical-align: middle;
}

View File

@@ -43,6 +43,8 @@ var SuggestList = new Class({
fill: function(json){
var self = this;
if(!json) return;
Object.each(json.suggestions, function(movie){
@@ -71,9 +73,23 @@ var SuggestList = new Class({
)
);
m.data_container.removeEvents('click');
// Add rating
m.info_container.adopt(
m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', {
'text': parseFloat(m.info.rating.imdb[0]),
'title': parseInt(m.info.rating.imdb[1]) + ' votes'
}) : null,
m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', {
'text': m.info.genres.slice(0, 3).join(', ')
}) : null
)
$(m).inject(self.el);
});
self.fireEvent('loaded');
},

View File

@@ -12,8 +12,8 @@ class Trailer(Plugin):
def __init__(self):
addEvent('renamer.after', self.searchSingle)
def searchSingle(self, message = None, group = {}):
def searchSingle(self, message = None, group = None):
if not group: group = {}
if self.isDisabled() or len(group['files']['trailer']) > 0: return
trailers = fireEvent('trailer.search', group = group, merge = True)
@@ -40,4 +40,3 @@ class Trailer(Plugin):
break
return True

Some files were not shown because too many files have changed in this diff Show More