Compare commits

..

172 Commits

Author SHA1 Message Date
Ruud 6772b9d965 Don't migrate when db is closed 2014-07-17 23:09:26 +02:00
Ruud 5df14d67e1 One up 2014-07-17 22:28:32 +02:00
Ruud 73abd1f022 Merge branch 'refs/heads/master' into desktop 2014-07-17 22:27:23 +02:00
Ruud e75a8529c9 Try fix migration failure from 2.5.1 2014-07-17 22:26:23 +02:00
Ruud 07a7f8cbcf Change fanart api url 2014-07-16 10:32:02 +02:00
Ruud 9b35a0fb20 Only trigger onClose when it's set 2014-07-08 21:21:22 +02:00
Ruud 0622e6e5ab One up 2014-06-29 23:16:09 +02:00
Ruud f16931906f Don't remove pyc files when using desktop updater 2014-06-29 23:15:36 +02:00
Ruud 68dcba8853 One up 2/2 2014-06-29 21:56:51 +02:00
Ruud ae8f66df1a Exit main loop on crash 2014-06-29 21:56:39 +02:00
Ruud 5237ead5cb Merge branch 'refs/heads/develop' into desktop 2014-06-29 17:01:47 +02:00
Ruud 45b2dff6d2 Merge branch 'refs/heads/develop' 2014-06-29 11:01:09 +02:00
Ruud 30d56b5d2c Merge branch 'refs/heads/develop' 2014-06-29 00:02:55 +02:00
Ruud 5ff6824ae9 Merge branch 'refs/heads/develop' 2014-06-25 18:26:10 +02:00
Ruud 0210859155 Merge branch 'refs/heads/develop' 2014-06-25 09:17:12 +02:00
Ruud 665478db13 Merge branch 'refs/heads/develop' 2014-06-23 23:45:03 +02:00
Ruud 84c366ab54 Merge branch 'master' of github.com:RuudBurger/CouchPotatoServer 2014-06-23 20:47:30 +02:00
Ruud 908e5eae77 Merge branch 'refs/heads/develop' 2014-06-23 20:47:06 +02:00
Ruud c4aaa10308 One up 2014-06-23 20:00:06 +02:00
Ruud d10536a829 Remove path from getOptions 2014-06-23 20:00:00 +02:00
Ruud 1e7fa82e11 Merge branch 'refs/heads/develop' into desktop 2014-06-23 19:01:58 +02:00
Ruud 1d448f3d9c Merge branch 'refs/heads/develop' 2014-06-23 14:29:20 +02:00
Ruud 338b5f427a Merge branch 'refs/heads/develop' 2014-06-23 13:37:50 +02:00
Ruud 59e3e73c4c Merge branch 'refs/heads/develop' 2014-06-23 01:19:05 +02:00
Ruud cb2614127c Merge branch 'refs/heads/develop' 2014-06-22 21:14:44 +02:00
Ruud fdbd826917 Merge branch 'refs/heads/develop' 2014-06-22 20:35:30 +02:00
Ruud 31daf4915e Merge branch 'refs/heads/develop' 2014-06-20 21:31:48 +02:00
Ruud 4ca7691afd Merge branch 'refs/heads/develop' 2014-06-20 21:08:33 +02:00
Ruud 64d3ecd9b8 Merge branch 'refs/heads/develop' 2014-06-20 14:52:15 +02:00
Ruud d55df3240f Merge branch 'refs/heads/develop' 2014-06-20 14:14:26 +02:00
Ruud 52214e4938 Merge branch 'refs/heads/develop' 2014-06-20 12:22:13 +02:00
Ruud b45307e493 Merge branch 'refs/heads/develop' 2014-06-11 23:51:05 +02:00
Ruud 4320369448 Merge branch 'refs/heads/develop' 2014-06-11 10:15:31 +02:00
Ruud f560dc093c Merge branch 'refs/heads/develop' 2014-06-10 22:54:14 +02:00
Ruud d26a2b1480 Merge branch 'refs/heads/develop' 2014-06-07 20:44:49 +02:00
Ruud e11b07b559 Don't save profile order twice 2014-06-06 17:26:45 +02:00
Ruud b6ee8ef4d4 Merge branch 'refs/heads/develop' 2014-06-06 11:24:24 +02:00
Ruud f80559d380 Merge branch 'refs/heads/develop' 2014-06-03 22:31:20 +02:00
Ruud 8530b00e7b Merge branch 'refs/heads/develop' 2014-06-03 17:18:11 +02:00
Ruud 5851e1e69f Merge branch 'refs/heads/develop' 2014-06-02 23:51:01 +02:00
Ruud 686bfd62eb Merge branch 'refs/heads/develop' 2014-06-02 15:10:29 +02:00
Ruud 9b82603c26 Merge branch 'refs/heads/develop' 2014-06-02 14:20:50 +02:00
Ruud f41792915f Merge branch 'refs/heads/develop' 2014-06-02 12:59:47 +02:00
Ruud 2fa77fb610 Merge branch 'refs/heads/develop' 2014-06-02 10:40:07 +02:00
Ruud e64d0e33fc Merge branch 'refs/heads/develop' 2014-06-01 14:31:39 +02:00
Ruud b168643600 Merge branch 'refs/heads/develop'
Conflicts:
	couchpotato/core/helpers/variable.py
2014-05-31 22:50:02 +02:00
Ruud 240283405e variable 'year' referenced before assignment 2014-05-07 11:50:36 +02:00
Ruud b69f8b7ed5 Files not properly send to sabnzbd 2014-03-19 22:33:14 +01:00
Ruud fbccba77a7 64Bit installer setup 2014-03-16 13:00:09 +01:00
Ruud d3efda74b2 One up 2014-03-16 09:44:44 +01:00
Ruud 66b849cb29 Merge branch 'refs/heads/master' into desktop
Conflicts:
	version.py
2014-03-16 09:43:32 +01:00
Ruud b19f98ef5b Merge branch 'refs/heads/develop' 2014-03-15 12:35:28 +01:00
Ruud c389790cf2 Merge branch 'refs/heads/develop' 2014-03-03 22:19:29 +01:00
Ruud d7445dfa80 Merge branch 'refs/heads/develop' 2014-02-26 14:00:56 +01:00
Ruud 36782768a4 Merge branch 'refs/heads/develop' 2014-02-25 21:37:29 +01:00
Ruud 2c9d487614 Update build url 2014-02-25 21:20:59 +01:00
Ruud b9a724c8bb Merge branch 'refs/heads/develop' 2014-02-16 09:43:03 +01:00
Ruud 68d826ca1c Merge branch 'refs/heads/develop' 2014-02-15 19:48:07 +01:00
Ruud d6921882e1 Merge branch 'refs/heads/develop' 2014-02-14 19:39:47 +01:00
Ruud 2cfff73486 Merge branch 'refs/heads/develop' 2014-01-18 19:54:32 +01:00
Ruud 0c7dda8d44 Merge branch 'refs/heads/develop' 2014-01-17 23:17:41 +01:00
Ruud dbaa377770 version.master 2014-01-17 16:29:29 +01:00
Ruud 47d2b81d1c Merge branch 'refs/heads/develop' 2014-01-17 16:28:59 +01:00
Ruud f79fcda27f Small one up 2013-11-17 21:22:24 +01:00
Ruud cdbcad2238 Merge branch 'refs/heads/develop' into desktop 2013-11-17 21:20:30 +01:00
Ruud 5d913e87c3 One up! 2013-11-17 20:20:18 +01:00
Ruud 16f02bda27 Merge branch 'refs/heads/develop' into desktop 2013-11-17 20:03:22 +01:00
Ruud 8d108b92bf One Up 2013-09-23 21:48:12 +02:00
Ruud 46783028b1 Merge branch 'refs/heads/develop' into desktop 2013-09-23 21:36:45 +02:00
Ruud d08c7c57a8 One up! 2013-09-20 17:46:54 +02:00
Ruud eeeb845ef3 Simplify string before checking on imdb 2013-09-20 17:30:11 +02:00
Ruud 651a063f94 Fix about submenu 2013-09-20 16:33:01 +02:00
Ruud f20aaa2d9d Hide IE clear button on search 2013-09-20 16:23:42 +02:00
Ruud ba925ec191 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/plugins/suggestion/main.py
2013-09-20 16:12:40 +02:00
Ruud 3b7376fd18 One up 2013-07-06 01:01:26 +02:00
Ruud c31b10c798 Ignore current suggested results 2013-07-06 00:49:11 +02:00
Ruud acda664686 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2013-07-05 22:43:54 +02:00
Ruud e2852407ea One up 2013-06-03 22:22:44 +02:00
Ruud 88e738c6cd Don't show double updater name 2013-06-03 22:22:35 +02:00
Ruud eaae8bdb0b Merge branch 'refs/heads/develop' into desktop 2013-06-03 22:00:21 +02:00
Ruud 821f68909d One up 2013-05-05 21:19:10 +02:00
Ruud 2b8dfed475 Merge branch 'refs/heads/master' into desktop
Conflicts:
	version.py
2013-05-05 20:31:28 +02:00
Ruud 607b5ea766 Run exe after install 2013-03-19 21:22:07 +01:00
Ruud 88579cd71a One up 2013-03-19 20:52:07 +01:00
Ruud 6c57316ce6 Use https for changelog 2013-03-19 20:46:00 +01:00
Ruud 6702683da3 Merge branch 'refs/heads/develop' into desktop 2013-03-19 20:34:38 +01:00
Ruud 1ed58586a1 Force install install in AppData
Add images to installer
2013-03-18 23:56:54 +01:00
Ruud f08ccd4fd8 One up installer 2013-03-17 22:34:04 +01:00
Ruud 312562a9f5 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2013-03-17 16:42:53 +01:00
Ruud 9e260a89af One up 2013-01-26 14:51:39 +01:00
Ruud d233e4d22e Merge branch 'refs/heads/develop' into desktop 2013-01-26 13:54:56 +01:00
Ruud 23893dbcb9 Merge branch 'refs/heads/develop' into desktop 2013-01-25 20:13:58 +01:00
Ruud 506871b506 One up 2013-01-23 23:10:55 +01:00
Ruud 6115917660 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2013-01-23 22:57:07 +01:00
Ruud 21df8819d3 Merge branch 'refs/heads/develop' into desktop 2013-01-23 22:55:09 +01:00
Ruud fb3f3e11f6 Merge branch 'refs/heads/develop' into desktop 2013-01-22 21:40:40 +01:00
Ruud 178c8942c3 Merge branch 'refs/heads/develop' into desktop 2013-01-14 19:54:22 +01:00
Ruud 51e747049d One up 2013-01-07 23:10:42 +01:00
Ruud 0582f7d694 Urlencode spotweb id. fix #1213 2013-01-07 23:10:06 +01:00
Ruud fa7cac7538 Merge branch 'refs/heads/develop' into desktop 2013-01-07 22:41:55 +01:00
Ruud 9a314cfbc4 One up 2012-12-29 00:03:45 +01:00
Ruud 5941d0bf77 Add version to update url 2012-12-29 00:03:36 +01:00
Ruud d326c1c25c Merge branch 'refs/heads/master' into desktop
Conflicts:
	version.py
2012-12-28 23:31:08 +01:00
Ruud 96472a9a8f One up 2012-12-16 23:51:58 +01:00
Ruud 27252561e2 Merge branch 'refs/heads/develop' into desktop 2012-12-16 23:51:24 +01:00
Ruud c9e732651f One up 2012-12-01 12:16:58 +01:00
Ruud 7849e7170d Uninstall only create files, no wildcard *.* 2012-12-01 12:16:51 +01:00
Ruud 087894eb4e Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-12-01 11:50:08 +01:00
Ruud 25f1b8c7a7 Fedora init fix #1009 2012-11-02 18:32:15 +01:00
Ruud e71da1f14d Use proper description for binary build. fix #1005 2012-11-02 18:24:13 +01:00
Ruud 938b14ba18 One up installer 2012-10-29 20:45:17 +01:00
Ruud d6522d8f38 One up installer 2012-10-27 18:49:44 +02:00
Ruud 78eab890e7 Merge branch 'refs/heads/develop' into desktop 2012-10-27 18:25:36 +02:00
Ruud 1a56191f83 Don't unzip 2012-10-27 18:22:50 +02:00
Ruud 41c0f34d95 Properly restart 2012-10-27 18:22:40 +02:00
Ruud 37bf205d7a Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-10-27 11:56:57 +02:00
Ruud aa1fa3eb9a Add description 2012-09-19 15:42:33 +02:00
Ruud 0e2f8a612c Extract zip after build, for testing 2012-09-19 15:29:07 +02:00
Ruud 465e7b2abc Merge branch 'refs/heads/develop' into desktop 2012-09-16 12:36:17 +02:00
Ruud 578fb45785 Installer 1 up 2012-09-16 11:35:56 +02:00
Ruud 96995bbbe5 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-09-16 10:45:19 +02:00
Ruud 4cfdafebbc Merge branch 'refs/heads/develop' into desktop 2012-09-14 13:15:47 +02:00
Ruud b97acb8ef5 Merge branch 'refs/heads/develop' into desktop 2012-09-14 13:08:19 +02:00
Ruud d68d2dfdb6 Updated installer 2012-09-09 21:48:38 +02:00
Ruud 39b269a454 Merge branch 'refs/heads/develop' into desktop 2012-09-09 17:32:47 +02:00
Ruud ac081d3e10 Getting ready for build 2012-09-09 17:28:23 +02:00
Ruud 5d4efb60cf Merge branch 'refs/heads/develop' into desktop 2012-09-08 16:01:49 +02:00
Ruud cc408b980c Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/_base/updater/main.py
2012-08-05 16:18:35 +02:00
Ruud 59590b3ac9 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/_base/updater/main.py
2012-07-14 00:35:00 +02:00
Ruud ff759dacf3 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	couchpotato/core/_base/updater/main.py
2012-07-11 22:43:45 +02:00
Ruud a328e44130 Merge branch 'desktop' of github.com:RuudBurger/CouchPotatoServer into desktop 2012-05-15 23:23:56 +02:00
Ruud 7924cac5f9 Update installer version 2012-05-15 23:21:24 +02:00
Ruud 1cef3b0c93 remove --nogit tag 2012-05-15 23:21:24 +02:00
Ruud 3cd59edc8b Import errors
File icon
2012-05-15 23:21:24 +02:00
Ruud 0d624af01d Working PNG 2012-05-15 23:21:24 +02:00
Ruud a09132570c Change branch to desktop 2012-05-15 23:21:14 +02:00
Ruud ee3fc38432 Better setup 2012-05-15 23:21:14 +02:00
Ruud dbf0192c8e Inno setup, start 2012-05-15 23:21:14 +02:00
Ruud 6962cfc3f5 new Desktop runner 2012-05-15 23:21:14 +02:00
Ruud e096ec3b5b Desktop files 2012-05-15 23:20:05 +02:00
Ruud b30a74ae0c Merge branch 'refs/heads/develop' into desktop 2012-05-15 23:15:17 +02:00
Ruud 978eeb16c9 Update installer version 2012-05-15 23:14:20 +02:00
Ruud e5c9d91657 Merge branch 'refs/heads/develop' into desktop 2012-05-15 22:27:22 +02:00
Ruud fa81c3a07a Merge branch 'refs/heads/develop' into desktop
Conflicts:
	version.py
2012-05-14 22:00:02 +02:00
Ruud 9cdd520d41 Merge branch 'refs/heads/develop' into desktop 2012-05-14 20:22:55 +02:00
Ruud 55d7898771 Merge branch 'refs/heads/develop' into desktop 2012-05-13 12:56:45 +02:00
Ruud b8256bef97 Merge branch 'refs/heads/develop' into desktop 2012-05-12 00:35:52 +02:00
Ruud 5be9dc0b4a Merge branch 'refs/heads/develop' into desktop 2012-05-09 22:20:53 +02:00
Ruud 7d0be0cefb remove --nogit tag 2012-05-07 22:55:54 +02:00
Ruud f7ce1edb13 Merge branch 'refs/heads/develop' into desktop 2012-05-07 22:44:01 +02:00
Ruud 5ad9280b60 Merge branch 'refs/heads/develop' into desktop 2012-05-07 22:27:55 +02:00
Ruud 2b353f1b20 Merge branch 'refs/heads/develop' into desktop 2012-05-04 17:29:15 +02:00
Ruud 75ab90b87b Merge branch 'refs/heads/develop' into desktop 2012-05-02 21:40:19 +02:00
Ruud 0219296120 Import errors
File icon
2012-05-02 21:34:45 +02:00
Ruud 20032b3a31 Working PNG 2012-05-01 07:35:44 +02:00
Ruud ea9e9a8c90 Updater base 2012-05-01 07:35:27 +02:00
Ruud f7b0ee145b Change branch to desktop 2012-04-30 21:37:04 +02:00
Ruud cc866738ee Merge branch 'refs/heads/develop' into desktop 2012-04-30 21:32:56 +02:00
Ruud eadccf6e33 Merge branch 'refs/heads/develop' into desktop 2012-04-29 00:00:25 +02:00
Ruud b70b66e567 Merge branch 'refs/heads/develop' into desktop 2012-04-28 23:14:59 +02:00
Ruud 5b6792dc20 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	CouchPotato.py
	couchpotato/core/plugins/renamer/main.py
	couchpotato/core/plugins/trailer/__init__.py
2012-04-07 21:35:36 +02:00
Ruud f498e7343a Better setup 2012-02-25 01:48:58 +01:00
Ruud 6962f441e6 Inno setup, start 2012-02-21 18:50:34 +01:00
Ruud 1def62b1b1 new Desktop runner 2012-02-19 17:13:37 +01:00
Ruud a4a4a6a185 Merge branch 'refs/heads/develop' into desktop
Conflicts:
	CouchPotato.py
2012-02-19 13:14:56 +01:00
Ruud d4c9469c1a Remove nfo when not renaming as .orig.nfo 2012-02-19 12:53:55 +01:00
Ruud 3e2d4c5d7b Initial trailer support 2012-02-19 12:48:54 +01:00
Ruud d03f711d69 kwargs in file.download for urlopen 2012-02-19 12:45:22 +01:00
Ruud 44dd8d9b96 Merge lists, not overwrite 2012-02-19 12:37:25 +01:00
Ruud 549a3be0d8 Merge branch 'refs/heads/develop' into desktop 2012-02-12 00:10:56 +01:00
Ruud 1bb2edf8ec Merge branch 'refs/heads/develop' into desktop 2012-02-11 23:33:14 +01:00
Ruud 84c6f36315 Desktop files 2012-02-11 23:06:14 +01:00
293 changed files with 16150 additions and 10046 deletions
+1
View File
@@ -10,6 +10,7 @@ import socket
import subprocess import subprocess
import sys import sys
import traceback import traceback
import time
# Root path # Root path
base_path = dirname(os.path.abspath(__file__)) base_path = dirname(os.path.abspath(__file__))
+235
View File
@@ -0,0 +1,235 @@
from esky.util import appdir_from_executable #@UnresolvedImport
from threading import Thread
from version import VERSION
from wx.lib.softwareupdate import SoftwareUpdate
import os
import sys
import time
import webbrowser
import wx
# Include proper dirs
if hasattr(sys, 'frozen'):
import libs
base_path = os.path.dirname(os.path.dirname(os.path.abspath(libs.__file__)))
else:
base_path = os.path.dirname(os.path.abspath(__file__))
lib_dir = os.path.join(base_path, 'libs')
sys.path.insert(0, base_path)
sys.path.insert(0, lib_dir)
from couchpotato.environment import Env
class TaskBarIcon(wx.TaskBarIcon):
TBMENU_OPEN = wx.NewId()
TBMENU_SETTINGS = wx.NewId()
TBMENU_EXIT = wx.ID_EXIT
closed = False
menu = False
enabled = False
def __init__(self, frame):
wx.TaskBarIcon.__init__(self)
self.frame = frame
icon = wx.Icon('icon.png', wx.BITMAP_TYPE_PNG)
self.SetIcon(icon)
self.Bind(wx.EVT_TASKBAR_LEFT_UP, self.OnTaskBarClick)
self.Bind(wx.EVT_TASKBAR_RIGHT_UP, self.OnTaskBarClick)
self.Bind(wx.EVT_MENU, self.onOpen, id = self.TBMENU_OPEN)
self.Bind(wx.EVT_MENU, self.onSettings, id = self.TBMENU_SETTINGS)
self.Bind(wx.EVT_MENU, self.onTaskBarClose, id = self.TBMENU_EXIT)
def OnTaskBarClick(self, evt):
menu = self.CreatePopupMenu()
self.PopupMenu(menu)
menu.Destroy()
def enable(self):
self.enabled = True
if self.menu:
self.open_menu.Enable(True)
self.setting_menu.Enable(True)
self.open_menu.SetText('Open')
def CreatePopupMenu(self):
if not self.menu:
self.menu = wx.Menu()
self.open_menu = self.menu.Append(self.TBMENU_OPEN, 'Open')
self.setting_menu = self.menu.Append(self.TBMENU_SETTINGS, 'About')
self.exit_menu = self.menu.Append(self.TBMENU_EXIT, 'Quit')
if not self.enabled:
self.open_menu.Enable(False)
self.setting_menu.Enable(False)
self.open_menu.SetText('Loading...')
return self.menu
def onOpen(self, event):
url = self.frame.parent.getSetting('base_url')
webbrowser.open(url)
def onSettings(self, event):
url = self.frame.parent.getSetting('base_url') + 'settings/about/'
webbrowser.open(url)
def onTaskBarClose(self, evt):
if self.closed:
return
self.closed = True
self.RemoveIcon()
wx.CallAfter(self.frame.Close)
def makeIcon(self, img):
if "wxMSW" in wx.PlatformInfo:
img = img.Scale(16, 16)
elif "wxGTK" in wx.PlatformInfo:
img = img.Scale(22, 22)
icon = wx.IconFromBitmap(img.CopyFromBitmap())
return icon
class MainFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, None, style = wx.FRAME_NO_TASKBAR)
self.parent = parent
self.tbicon = TaskBarIcon(self)
class WorkerThread(Thread):
def __init__(self, desktop):
Thread.__init__(self)
self.daemon = True
self._desktop = desktop
self.start()
def run(self):
# Get options via arg
from couchpotato.runner import getOptions
args = ['--quiet']
self.options = getOptions(args)
# Load settings
settings = Env.get('settings')
settings.setFile(self.options.config_file)
# Create data dir if needed
self.data_dir = os.path.expanduser(Env.setting('data_dir'))
if self.data_dir == '':
from couchpotato.core.helpers.variable import getDataDir
self.data_dir = getDataDir()
if not os.path.isdir(self.data_dir):
os.makedirs(self.data_dir)
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
try:
from couchpotato.runner import runCouchPotato
runCouchPotato(self.options, base_path, args, data_dir = self.data_dir, log_dir = self.log_dir, Env = Env, desktop = self._desktop)
except:
pass
self._desktop.frame.Close()
self._desktop.ExitMainLoop()
class CouchPotatoApp(wx.App, SoftwareUpdate):
settings = {}
events = {}
restart = False
closing = False
triggered_onClose = False
def OnInit(self):
# Updater
base_url = 'https://api.couchpota.to/updates/%s'
self.InitUpdates(base_url % VERSION + '/', 'https://couchpota.to/updates/%s' % 'changelog.html',
icon = wx.Icon('icon.png'))
self.frame = MainFrame(self)
self.frame.Bind(wx.EVT_CLOSE, self.onClose)
# CouchPotato thread
self.worker = WorkerThread(self)
return True
def onAppLoad(self):
self.frame.tbicon.enable()
def setSettings(self, settings = {}):
self.settings = settings
def getSetting(self, name):
return self.settings.get(name)
def addEvents(self, events = {}):
for name in events.iterkeys():
self.events[name] = events[name]
def onClose(self, event):
if not self.closing:
self.closing = True
self.frame.tbicon.onTaskBarClose(event)
onClose = self.events.get('onClose')
if onClose and not self.triggered_onClose:
self.triggered_onClose = True
onClose(event)
def afterShutdown(self, restart = False):
self.frame.Destroy()
self.restart = restart
self.ExitMainLoop()
if __name__ == '__main__':
app = CouchPotatoApp(redirect = False)
app.MainLoop()
time.sleep(1)
if app.restart:
def appexe_from_executable(exepath):
appdir = appdir_from_executable(exepath)
exename = os.path.basename(exepath)
if sys.platform == "darwin":
if os.path.isdir(os.path.join(appdir, "Contents", "MacOS")):
return os.path.join(appdir, "Contents", "MacOS", exename)
return os.path.join(appdir, exename)
exe = appexe_from_executable(sys.executable)
os.chdir(os.path.dirname(exe))
os.execv(exe, [exe] + sys.argv[1:])
+7 -13
View File
@@ -29,25 +29,19 @@ OS X:
* Then do `python CouchPotatoServer/CouchPotato.py` * Then do `python CouchPotatoServer/CouchPotato.py`
* Your browser should open up, but if it doesn't go to `http://localhost:5050/` * Your browser should open up, but if it doesn't go to `http://localhost:5050/`
Linux: Linux (Ubuntu / Debian):
* (Ubuntu / Debian) Install [GIT](http://git-scm.com/) with `apt-get install git-core` * Install [GIT](http://git-scm.com/) with `apt-get install git-core`
* (Fedora / CentOS) Install [GIT](http://git-scm.com/) with `yum install git`
* 'cd' to the folder of your choosing. * 'cd' to the folder of your choosing.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git` * Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py` to start * Then do `python CouchPotatoServer/CouchPotato.py` to start
* (Ubuntu / Debian) To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato` * To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
* (Ubuntu / Debian) Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato` * Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato`
* (Ubuntu / Debian) Change the paths inside the default file `sudo nano /etc/default/couchpotato` * Change the paths inside the default file `sudo nano /etc/default/couchpotato`
* (Ubuntu / Debian) Make it executable `sudo chmod +x /etc/init.d/couchpotato` * Make it executable `sudo chmod +x /etc/init.d/couchpotato`
* (Ubuntu / Debian) Add it to defaults `sudo update-rc.d couchpotato defaults` * Add it to defaults `sudo update-rc.d couchpotato defaults`
* (systemd) To run on boot copy the systemd config `sudo cp CouchPotatoServer/init/couchpotato.fedora.service /etc/systemd/system/couchpotato.service`
* (systemd) Update the systemd config file with your user and path to CouchPotato.py
* (systemd) Enable it at boot with `sudo systemctl enable couchpotato`
* Open your browser and go to `http://localhost:5050/` * Open your browser and go to `http://localhost:5050/`
Docker:
* You can use [razorgirl's Dockerfile](https://github.com/razorgirl/docker-couchpotato) to quickly build your own isolated app container. It's based on the Linux instructions above. For more info about Docker check out the [official website](https://www.docker.com).
FreeBSD : FreeBSD :
+1 -5
View File
@@ -13,8 +13,6 @@ Lastly, for anything related to CouchPotato, feel free to stop by the [forum](ht
## Issues ## Issues
Issues are intended for reporting bugs and weird behaviour or suggesting improvements to CouchPotatoServer. Issues are intended for reporting bugs and weird behaviour or suggesting improvements to CouchPotatoServer.
Before you submit an issue, please go through the following checklist: Before you submit an issue, please go through the following checklist:
* **FILL IN ALL THE FIELDS ASKED FOR**
* **POST MORE THAN A SINGLE LINE LOG**, if you do, you'd better have a easy reproducable bug
* Search through existing issues (*including closed issues!*) first: you might be able to get your answer there. * Search through existing issues (*including closed issues!*) first: you might be able to get your answer there.
* Double check your issue manually, because it could be an external issue. * Double check your issue manually, because it could be an external issue.
* Post logs with your issue: Without seeing what is going on, the developers can't reproduce the error. * Post logs with your issue: Without seeing what is going on, the developers can't reproduce the error.
@@ -27,14 +25,12 @@ Before you submit an issue, please go through the following checklist:
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows. * What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows.
* Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag. * Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag.
* If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else! * If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!
* Do not "bump" issues with "Any updates on this" or whatever. Yes I've seen it, you don't have to remind me of it. There will be an update when the code is done or I need information. If you feel the need to do so, you'd better have more info on the issue.
The more relevant information you provide, the more likely that your issue will be resolved. The more relevant information you provide, the more likely that your issue will be resolved.
If you don't follow any of the checks above, I'll close the issue. If you are wondering why (and ask) I'll block you from posting new issues and the repo.
## Pull Requests ## Pull Requests
Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following: Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following:
* Make sure your pull request is made for the *develop* branch (or relevant feature branch). * Make sure your pull request is made for the *develop* branch (or relevant feature branch).
* Have you tested your PR? If not, why? * Have you tested your PR? If not, why?
* Does your PR have any limitations I should know of? * Does your PR have any limitations we should know of?
* Is your PR up-to-date with the branch you're trying to push into? * Is your PR up-to-date with the branch you're trying to push into?
-9
View File
@@ -40,8 +40,6 @@ class WebHandler(BaseHandler):
return return
try: try:
if route == 'robots.txt':
self.set_header('Content-Type', 'text/plain')
self.write(views[route]()) self.write(views[route]())
except: except:
log.error("Failed doing web request '%s': %s", (route, traceback.format_exc())) log.error("Failed doing web request '%s': %s", (route, traceback.format_exc()))
@@ -62,13 +60,6 @@ def index():
addView('', index) addView('', index)
# Web view
def robots():
return 'User-agent: * \n' \
'Disallow: /'
addView('robots.txt', robots)
# API docs # API docs
def apiDocs(): def apiDocs():
routes = list(api.keys()) routes = list(api.keys())
+22 -28
View File
@@ -7,7 +7,6 @@ import urllib
from couchpotato.core.helpers.request import getParams from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, asynchronous from tornado.web import RequestHandler, asynchronous
@@ -51,22 +50,24 @@ class NonBlockHandler(RequestHandler):
start, stop = api_nonblock[route] start, stop = api_nonblock[route]
self.stopper = stop self.stopper = stop
start(self.sendData, last_id = self.get_argument('last_id', None)) start(self.onNewMessage, last_id = self.get_argument('last_id', None))
def sendData(self, response): def onNewMessage(self, response):
if not self.request.connection.stream.closed(): if self.request.connection.stream.closed():
try: self.on_connection_close()
self.finish(response) return
except:
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
self.removeStopper() try:
self.finish(response)
except:
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
def on_connection_close(self):
def removeStopper(self):
if self.stopper: if self.stopper:
self.stopper(self.sendData) self.stopper(self.onNewMessage)
self.stopper = None self.stopper = None
@@ -82,11 +83,10 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
# Blocking API handler # Blocking API handler
class ApiHandler(RequestHandler): class ApiHandler(RequestHandler):
route = None
@asynchronous @asynchronous
def get(self, route, *args, **kwargs): def get(self, route, *args, **kwargs):
self.route = route = route.strip('/') route = route.strip('/')
if not api.get(route): if not api.get(route):
self.write('API call doesn\'t seem to exist') self.write('API call doesn\'t seem to exist')
self.finish() self.finish()
@@ -123,15 +123,11 @@ class ApiHandler(RequestHandler):
except: except:
log.error('Failed write error "%s": %s', (route, traceback.format_exc())) log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
self.unlock() api_locks[route].release()
post = get post = get
def taskFinished(self, result, route): def taskFinished(self, result, route):
IOLoop.current().add_callback(self.sendData, result, route)
self.unlock()
def sendData(self, result, route):
if not self.request.connection.stream.closed(): if not self.request.connection.stream.closed():
try: try:
@@ -139,22 +135,20 @@ class ApiHandler(RequestHandler):
jsonp_callback = self.get_argument('callback_func', default = None) jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback: if jsonp_callback:
self.set_header('Content-Type', 'text/javascript') self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.finish(str(jsonp_callback) + '(' + json.dumps(result) + ')') self.set_header("Content-Type", "text/javascript")
self.finish()
elif isinstance(result, tuple) and result[0] == 'redirect': elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1]) self.redirect(result[1])
else: else:
self.finish(result) self.write(result)
except UnicodeDecodeError: self.finish()
log.error('Failed proper encode: %s', traceback.format_exc())
except: except:
log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc())) log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'}) try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass except: pass
def unlock(self): api_locks[route].release()
try: api_locks[self.route].release()
except: pass
def addApiView(route, func, static = False, docs = None, **kwargs): def addApiView(route, func, static = False, docs = None, **kwargs):
+5 -5
View File
@@ -181,13 +181,13 @@ class Core(Plugin):
return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key')) return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def version(self): def version(self):
ver = fireEvent('updater.info', single = True) or {'version': {}} ver = fireEvent('updater.info', single = True)
if os.name == 'nt': platf = 'windows' if os.name == 'nt': platf = 'windows'
elif 'Darwin' in platform.platform(): platf = 'osx' elif 'Darwin' in platform.platform(): platf = 'osx'
else: platf = 'linux' else: platf = 'linux'
return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown') return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash'])
def versionView(self, **kwargs): def versionView(self, **kwargs):
return { return {
@@ -286,13 +286,13 @@ config = [{
'name': 'permission_folder', 'name': 'permission_folder',
'default': '0755', 'default': '0755',
'label': 'Folder CHMOD', 'label': 'Folder CHMOD',
'description': 'Can be either decimal (493) or octal (leading zero: 0755). <a target="_blank" href="http://permissions-calculator.org/">Calculate the correct value</a>', 'description': 'Can be either decimal (493) or octal (leading zero: 0755)',
}, },
{ {
'name': 'permission_file', 'name': 'permission_file',
'default': '0644', 'default': '0755',
'label': 'File CHMOD', 'label': 'File CHMOD',
'description': 'See Folder CHMOD description, but for files', 'description': 'Same as Folder CHMOD but for files',
}, },
], ],
}, },
+8 -17
View File
@@ -205,28 +205,19 @@ class GitUpdater(BaseUpdater):
def getVersion(self): def getVersion(self):
if not self.version: if not self.version:
hash = None
date = None
branch = self.branch
try: try:
output = self.repo.getHead() # Yes, please output = self.repo.getHead() # Yes, please
log.debug('Git version output: %s', output.hash) log.debug('Git version output: %s', output.hash)
self.version = {
hash = output.hash[:8] 'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.repo.getCurrentBranch().name or self.branch, output.hash[:8], datetime.fromtimestamp(output.getDate())),
date = output.getDate() 'hash': output.hash[:8],
branch = self.repo.getCurrentBranch().name 'date': output.getDate(),
'type': 'git',
'branch': self.repo.getCurrentBranch().name
}
except Exception as e: except Exception as e:
log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e) log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
return 'No GIT'
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'),
'hash': hash,
'date': date,
'type': 'git',
'branch': branch
}
return self.version return self.version
+285 -317
View File
@@ -2,7 +2,6 @@ import json
import os import os
import time import time
import traceback import traceback
from sqlite3 import OperationalError
from CodernityDB.database import RecordNotFound from CodernityDB.database import RecordNotFound
from CodernityDB.index import IndexException, IndexNotFoundException, IndexConflict from CodernityDB.index import IndexException, IndexNotFoundException, IndexConflict
@@ -10,7 +9,7 @@ from couchpotato import CPLog
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, sp from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import getImdb, tryInt, randomString from couchpotato.core.helpers.variable import getImdb, tryInt
log = CPLog(__name__) log = CPLog(__name__)
@@ -33,7 +32,6 @@ class Database(object):
addEvent('database.setup.after', self.startup_compact) addEvent('database.setup.after', self.startup_compact)
addEvent('database.setup_index', self.setupIndex) addEvent('database.setup_index', self.setupIndex)
addEvent('database.delete_corrupted', self.deleteCorrupted)
addEvent('app.migrate', self.migrate) addEvent('app.migrate', self.migrate)
addEvent('app.after_shutdown', self.close) addEvent('app.after_shutdown', self.close)
@@ -149,17 +147,6 @@ class Database(object):
return results return results
def deleteCorrupted(self, _id, traceback_error = ''):
db = self.getDB()
try:
log.debug('Deleted corrupted document "%s": %s', (_id, traceback_error))
corrupted = db.get('id', _id, with_storage = False)
db._delete_id_index(corrupted.get('_id'), corrupted.get('_rev'), None)
except:
log.debug('Failed deleting corrupted: %s', traceback.format_exc())
def reindex(self, **kwargs): def reindex(self, **kwargs):
success = True success = True
@@ -312,328 +299,309 @@ class Database(object):
} }
migrate_data = {} migrate_data = {}
rename_old = False
try: c = conn.cursor()
c = conn.cursor() for ml in migrate_list:
migrate_data[ml] = {}
rows = migrate_list[ml]
for ml in migrate_list: try:
migrate_data[ml] = {} c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml))
rows = migrate_list[ml] except:
# ignore faulty destination_id database
try: if ml == 'category':
c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml)) migrate_data[ml] = {}
except:
# ignore faulty destination_id database
if ml == 'category':
migrate_data[ml] = {}
else:
rename_old = True
raise
for p in c.fetchall():
columns = {}
for row in migrate_list[ml]:
columns[row] = p[rows.index(row)]
if not migrate_data[ml].get(p[0]):
migrate_data[ml][p[0]] = columns
else:
if not isinstance(migrate_data[ml][p[0]], list):
migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
migrate_data[ml][p[0]].append(columns)
conn.close()
log.info('Getting data took %s', time.time() - migrate_start)
db = self.getDB()
if not db.opened:
return
# Use properties
properties = migrate_data['properties']
log.info('Importing %s properties', len(properties))
for x in properties:
property = properties[x]
Env.prop(property.get('identifier'), property.get('value'))
# Categories
categories = migrate_data.get('category', [])
log.info('Importing %s categories', len(categories))
category_link = {}
for x in categories:
c = categories[x]
new_c = db.insert({
'_t': 'category',
'order': c.get('order', 999),
'label': toUnicode(c.get('label', '')),
'ignored': toUnicode(c.get('ignored', '')),
'preferred': toUnicode(c.get('preferred', '')),
'required': toUnicode(c.get('required', '')),
'destination': toUnicode(c.get('destination', '')),
})
category_link[x] = new_c.get('_id')
# Profiles
log.info('Importing profiles')
new_profiles = db.all('profile', with_doc = True)
new_profiles_by_label = {}
for x in new_profiles:
# Remove default non core profiles
if not x['doc'].get('core'):
db.delete(x['doc'])
else: else:
new_profiles_by_label[x['doc']['label']] = x['_id'] raise
profiles = migrate_data['profile'] for p in c.fetchall():
profile_link = {} columns = {}
for x in profiles: for row in migrate_list[ml]:
p = profiles[x] columns[row] = p[rows.index(row)]
exists = new_profiles_by_label.get(p.get('label')) if not migrate_data[ml].get(p[0]):
migrate_data[ml][p[0]] = columns
# Update existing with order only
if exists and p.get('core'):
profile = db.get('id', exists)
profile['order'] = tryInt(p.get('order'))
profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
db.update(profile)
profile_link[x] = profile.get('_id')
else: else:
if not isinstance(migrate_data[ml][p[0]], list):
migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
migrate_data[ml][p[0]].append(columns)
new_profile = { conn.close()
'_t': 'profile',
'label': p.get('label'), log.info('Getting data took %s', time.time() - migrate_start)
'order': int(p.get('order', 999)),
'core': p.get('core', False), db = self.getDB()
'qualities': [], if not db.opened:
'wait_for': [], return
'finish': []
# Use properties
properties = migrate_data['properties']
log.info('Importing %s properties', len(properties))
for x in properties:
property = properties[x]
Env.prop(property.get('identifier'), property.get('value'))
# Categories
categories = migrate_data.get('category', [])
log.info('Importing %s categories', len(categories))
category_link = {}
for x in categories:
c = categories[x]
new_c = db.insert({
'_t': 'category',
'order': c.get('order', 999),
'label': toUnicode(c.get('label', '')),
'ignored': toUnicode(c.get('ignored', '')),
'preferred': toUnicode(c.get('preferred', '')),
'required': toUnicode(c.get('required', '')),
'destination': toUnicode(c.get('destination', '')),
})
category_link[x] = new_c.get('_id')
# Profiles
log.info('Importing profiles')
new_profiles = db.all('profile', with_doc = True)
new_profiles_by_label = {}
for x in new_profiles:
# Remove default non core profiles
if not x['doc'].get('core'):
db.delete(x['doc'])
else:
new_profiles_by_label[x['doc']['label']] = x['_id']
profiles = migrate_data['profile']
profile_link = {}
for x in profiles:
p = profiles[x]
exists = new_profiles_by_label.get(p.get('label'))
# Update existing with order only
if exists and p.get('core'):
profile = db.get('id', exists)
profile['order'] = tryInt(p.get('order'))
profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
db.update(profile)
profile_link[x] = profile.get('_id')
else:
new_profile = {
'_t': 'profile',
'label': p.get('label'),
'order': int(p.get('order', 999)),
'core': p.get('core', False),
'qualities': [],
'wait_for': [],
'finish': []
}
types = migrate_data['profiletype']
for profile_type in types:
p_type = types[profile_type]
if types[profile_type]['profile_id'] == p['id']:
if p_type['quality_id']:
new_profile['finish'].append(p_type['finish'])
new_profile['wait_for'].append(p_type['wait_for'])
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
if len(new_profile['qualities']) > 0:
new_profile.update(db.insert(new_profile))
profile_link[x] = new_profile.get('_id')
else:
log.error('Corrupt profile list for "%s", using default.', p.get('label'))
# Qualities
log.info('Importing quality sizes')
new_qualities = db.all('quality', with_doc = True)
new_qualities_by_identifier = {}
for x in new_qualities:
new_qualities_by_identifier[x['doc']['identifier']] = x['_id']
qualities = migrate_data['quality']
quality_link = {}
for x in qualities:
q = qualities[x]
q_id = new_qualities_by_identifier[q.get('identifier')]
quality = db.get('id', q_id)
quality['order'] = q.get('order')
quality['size_min'] = tryInt(q.get('size_min'))
quality['size_max'] = tryInt(q.get('size_max'))
db.update(quality)
quality_link[x] = quality
# Titles
titles = migrate_data['librarytitle']
titles_by_library = {}
for x in titles:
title = titles[x]
if title.get('default'):
titles_by_library[title.get('libraries_id')] = title.get('title')
# Releases
releaseinfos = migrate_data['releaseinfo']
for x in releaseinfos:
info = releaseinfos[x]
# Skip if release doesn't exist for this info
if not migrate_data['release'].get(info.get('release_id')):
continue
if not migrate_data['release'][info.get('release_id')].get('info'):
migrate_data['release'][info.get('release_id')]['info'] = {}
migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')
releases = migrate_data['release']
releases_by_media = {}
for x in releases:
release = releases[x]
if not releases_by_media.get(release.get('movie_id')):
releases_by_media[release.get('movie_id')] = []
releases_by_media[release.get('movie_id')].append(release)
# Type ids
types = migrate_data['filetype']
type_by_id = {}
for t in types:
type = types[t]
type_by_id[type.get('id')] = type
# Media
log.info('Importing %s media items', len(migrate_data['movie']))
statuses = migrate_data['status']
libraries = migrate_data['library']
library_files = migrate_data['library_files__file_library']
releases_files = migrate_data['release_files__file_release']
all_files = migrate_data['file']
poster_type = migrate_data['filetype']['poster']
medias = migrate_data['movie']
for x in medias:
m = medias[x]
status = statuses.get(m['status_id']).get('identifier')
l = libraries.get(m['library_id'])
# Only migrate wanted movies, Skip if no identifier present
if not l or not getImdb(l.get('identifier')): continue
profile_id = profile_link.get(m['profile_id'])
category_id = category_link.get(m['category_id'])
title = titles_by_library.get(m['library_id'])
releases = releases_by_media.get(x, [])
info = json.loads(l.get('info', ''))
files = library_files.get(m['library_id'], [])
if not isinstance(files, list):
files = [files]
added_media = fireEvent('movie.add', {
'info': info,
'identifier': l.get('identifier'),
'profile_id': profile_id,
'category_id': category_id,
'title': title
}, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)
if not added_media:
log.error('Failed adding media %s: %s', (l.get('identifier'), info))
continue
added_media['files'] = added_media.get('files', {})
for f in files:
ffile = all_files[f.get('file_id')]
# Only migrate posters
if ffile.get('type_id') == poster_type.get('id'):
if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
added_media['files']['image_poster'] = [ffile.get('path')]
break
if 'image_poster' in added_media['files']:
db.update(added_media)
for rel in releases:
empty_info = False
if not rel.get('info'):
empty_info = True
rel['info'] = {}
quality = quality_link.get(rel.get('quality_id'))
if not quality:
continue
release_status = statuses.get(rel.get('status_id')).get('identifier')
if rel['info'].get('download_id'):
status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True']
rel['info']['download_info'] = {
'id': rel['info'].get('download_id'),
'downloader': rel['info'].get('download_downloader'),
'status_support': status_support,
} }
types = migrate_data['profiletype'] # Add status to keys
for profile_type in types: rel['info']['status'] = release_status
p_type = types[profile_type] if not empty_info:
if types[profile_type]['profile_id'] == p['id']: fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
if p_type['quality_id']: else:
new_profile['finish'].append(p_type['finish']) release = {
new_profile['wait_for'].append(p_type['wait_for']) '_t': 'release',
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier']) 'identifier': rel.get('identifier'),
'media_id': added_media.get('_id'),
'quality': quality.get('identifier'),
'status': release_status,
'last_edit': int(time.time()),
'files': {}
}
if len(new_profile['qualities']) > 0: # Add downloader info if provided
new_profile.update(db.insert(new_profile)) try:
profile_link[x] = new_profile.get('_id') release['download_info'] = rel['info']['download_info']
else: del rel['download_info']
log.error('Corrupt profile list for "%s", using default.', p.get('label')) except:
pass
# Qualities # Add files
log.info('Importing quality sizes') release_files = releases_files.get(rel.get('id'), [])
new_qualities = db.all('quality', with_doc = True) if not isinstance(release_files, list):
new_qualities_by_identifier = {} release_files = [release_files]
for x in new_qualities:
new_qualities_by_identifier[x['doc']['identifier']] = x['_id']
qualities = migrate_data['quality'] if len(release_files) == 0:
quality_link = {}
for x in qualities:
q = qualities[x]
q_id = new_qualities_by_identifier[q.get('identifier')]
quality = db.get('id', q_id)
quality['order'] = q.get('order')
quality['size_min'] = tryInt(q.get('size_min'))
quality['size_max'] = tryInt(q.get('size_max'))
db.update(quality)
quality_link[x] = quality
# Titles
titles = migrate_data['librarytitle']
titles_by_library = {}
for x in titles:
title = titles[x]
if title.get('default'):
titles_by_library[title.get('libraries_id')] = title.get('title')
# Releases
releaseinfos = migrate_data['releaseinfo']
for x in releaseinfos:
info = releaseinfos[x]
# Skip if release doesn't exist for this info
if not migrate_data['release'].get(info.get('release_id')):
continue
if not migrate_data['release'][info.get('release_id')].get('info'):
migrate_data['release'][info.get('release_id')]['info'] = {}
migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')
releases = migrate_data['release']
releases_by_media = {}
for x in releases:
release = releases[x]
if not releases_by_media.get(release.get('movie_id')):
releases_by_media[release.get('movie_id')] = []
releases_by_media[release.get('movie_id')].append(release)
# Type ids
types = migrate_data['filetype']
type_by_id = {}
for t in types:
type = types[t]
type_by_id[type.get('id')] = type
# Media
log.info('Importing %s media items', len(migrate_data['movie']))
statuses = migrate_data['status']
libraries = migrate_data['library']
library_files = migrate_data['library_files__file_library']
releases_files = migrate_data['release_files__file_release']
all_files = migrate_data['file']
poster_type = migrate_data['filetype']['poster']
medias = migrate_data['movie']
for x in medias:
m = medias[x]
status = statuses.get(m['status_id']).get('identifier')
l = libraries.get(m['library_id'])
# Only migrate wanted movies, Skip if no identifier present
if not l or not getImdb(l.get('identifier')): continue
profile_id = profile_link.get(m['profile_id'])
category_id = category_link.get(m['category_id'])
title = titles_by_library.get(m['library_id'])
releases = releases_by_media.get(x, [])
info = json.loads(l.get('info', ''))
files = library_files.get(m['library_id'], [])
if not isinstance(files, list):
files = [files]
added_media = fireEvent('movie.add', {
'info': info,
'identifier': l.get('identifier'),
'profile_id': profile_id,
'category_id': category_id,
'title': title
}, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)
if not added_media:
log.error('Failed adding media %s: %s', (l.get('identifier'), info))
continue
added_media['files'] = added_media.get('files', {})
for f in files:
ffile = all_files[f.get('file_id')]
# Only migrate posters
if ffile.get('type_id') == poster_type.get('id'):
if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
added_media['files']['image_poster'] = [ffile.get('path')]
break
if 'image_poster' in added_media['files']:
db.update(added_media)
for rel in releases:
empty_info = False
if not rel.get('info'):
empty_info = True
rel['info'] = {}
quality = quality_link.get(rel.get('quality_id'))
if not quality:
continue continue
release_status = statuses.get(rel.get('status_id')).get('identifier') for f in release_files:
rfile = all_files[f.get('file_id')]
file_type = type_by_id.get(rfile.get('type_id')).get('identifier')
if rel['info'].get('download_id'): if not release['files'].get(file_type):
status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True'] release['files'][file_type] = []
rel['info']['download_info'] = {
'id': rel['info'].get('download_id'),
'downloader': rel['info'].get('download_downloader'),
'status_support': status_support,
}
# Add status to keys release['files'][file_type].append(rfile.get('path'))
rel['info']['status'] = release_status
if not empty_info:
fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
else:
release = {
'_t': 'release',
'identifier': rel.get('identifier'),
'media_id': added_media.get('_id'),
'quality': quality.get('identifier'),
'status': release_status,
'last_edit': int(time.time()),
'files': {}
}
# Add downloader info if provided try:
try: rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
release['download_info'] = rel['info']['download_info'] rls.update(release)
del rel['download_info'] db.update(rls)
except: except:
pass db.insert(release)
# Add files
release_files = releases_files.get(rel.get('id'), [])
if not isinstance(release_files, list):
release_files = [release_files]
if len(release_files) == 0:
continue
for f in release_files:
rfile = all_files.get(f.get('file_id'))
if not rfile:
continue
file_type = type_by_id.get(rfile.get('type_id')).get('identifier')
if not release['files'].get(file_type):
release['files'][file_type] = []
release['files'][file_type].append(rfile.get('path'))
try:
rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
rls.update(release)
db.update(rls)
except:
db.insert(release)
log.info('Total migration took %s', time.time() - migrate_start)
log.info('=' * 30)
rename_old = True
except OperationalError:
log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc())
rename_old = True
except:
log.error('Migration failed: %s', traceback.format_exc())
log.info('Total migration took %s', time.time() - migrate_start)
log.info('=' * 30)
# rename old database # rename old database
if rename_old: log.info('Renaming old database to %s ', old_db + '.old')
random = randomString() os.rename(old_db, old_db + '.old')
log.info('Renaming old database to %s ', '%s.%s_old' % (old_db, random))
os.rename(old_db, '%s.%s_old' % (old_db, random))
if os.path.isfile(old_db + '-wal'): if os.path.isfile(old_db + '-wal'):
os.rename(old_db + '-wal', '%s-wal.%s_old' % (old_db, random)) os.rename(old_db + '-wal', old_db + '-wal.old')
if os.path.isfile(old_db + '-shm'): if os.path.isfile(old_db + '-shm'):
os.rename(old_db + '-shm', '%s-shm.%s_old' % (old_db, random)) os.rename(old_db + '-shm', old_db + '-shm.old')
-36
View File
@@ -20,31 +20,14 @@ class Blackhole(DownloaderBase):
status_support = False status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
directory = self.conf('directory') directory = self.conf('directory')
# The folder needs to exist
if not directory or not os.path.isdir(directory): if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.', data.get('protocol')) log.error('No directory set for blackhole %s download.', data.get('protocol'))
else: else:
try: try:
# Filedata can be empty, which probably means it a magnet link
if not filedata or len(filedata) < 50: if not filedata or len(filedata) < 50:
try: try:
if data.get('protocol') == 'torrent_magnet': if data.get('protocol') == 'torrent_magnet':
@@ -53,16 +36,13 @@ class Blackhole(DownloaderBase):
except: except:
log.error('Failed download torrent via magnet url: %s', traceback.format_exc()) log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
# If it's still empty, don't know what to do!
if not filedata or len(filedata) < 50: if not filedata or len(filedata) < 50:
log.error('No nzb/torrent available: %s', data.get('url')) log.error('No nzb/torrent available: %s', data.get('url'))
return False return False
# Create filename with imdb id and other nice stuff
file_name = self.createFileName(data, filedata, media) file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name) full_path = os.path.join(directory, file_name)
# People want thinks nice and tidy, create a subdir
if self.conf('create_subdir'): if self.conf('create_subdir'):
try: try:
new_path = os.path.splitext(full_path)[0] new_path = os.path.splitext(full_path)[0]
@@ -73,8 +53,6 @@ class Blackhole(DownloaderBase):
log.error('Couldnt create sub dir, reverting to old one: %s', full_path) log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try: try:
# Make sure the file doesn't exist yet, no need in overwriting it
if not os.path.isfile(full_path): if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path)) log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f: with open(full_path, 'wb') as f:
@@ -96,10 +74,6 @@ class Blackhole(DownloaderBase):
return False return False
def test(self): def test(self):
""" Test and see if the directory is writable
:return: boolean
"""
directory = self.conf('directory') directory = self.conf('directory')
if directory and os.path.isdir(directory): if directory and os.path.isdir(directory):
@@ -114,10 +88,6 @@ class Blackhole(DownloaderBase):
return False return False
def getEnabledProtocol(self): def getEnabledProtocol(self):
""" What protocols is this downloaded used for
:return: list with protocols
"""
if self.conf('use_for') == 'both': if self.conf('use_for') == 'both':
return super(Blackhole, self).getEnabledProtocol() return super(Blackhole, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent': elif self.conf('use_for') == 'torrent':
@@ -126,12 +96,6 @@ class Blackhole(DownloaderBase):
return ['nzb'] return ['nzb']
def isEnabled(self, manual = False, data = None): def isEnabled(self, manual = False, data = None):
""" Check if protocol is used (and enabled)
:param manual: The user has clicked to download a link through the webUI
:param data: dict returned from provider
Contains the release information
:return: boolean
"""
if not data: data = {} if not data: data = {}
for_protocol = ['both'] for_protocol = ['both']
if data and 'torrent' in data.get('protocol'): if data and 'torrent' in data.get('protocol'):
-34
View File
@@ -25,18 +25,8 @@ class Deluge(DownloaderBase):
drpc = None drpc = None
def connect(self, reconnect = False): def connect(self, reconnect = False):
""" Connect to the delugeRPC, re-use connection when already available
:param reconnect: force reconnect
:return: DelugeRPC instance
"""
# Load host from config and split out port. # Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':') host = cleanHost(self.conf('host'), protocol = False).split(':')
# Force host assignment
if len(host) == 1:
host.append(80)
if not isInt(host[1]): if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.') log.error('Config properties are not filled in correctly, port is missing.')
return False return False
@@ -47,20 +37,6 @@ class Deluge(DownloaderBase):
return self.drpc return self.drpc
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -115,21 +91,11 @@ class Deluge(DownloaderBase):
return self.downloadReturnId(remote_torrent) return self.downloadReturnId(remote_torrent)
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True) and self.drpc.test(): if self.connect(True) and self.drpc.test():
return True return True
return False return False
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Deluge download status.') log.debug('Checking Deluge download status.')
-427
View File
@@ -1,427 +0,0 @@
from base64 import b16encode, b32decode, b64encode
from distutils.version import LooseVersion
from hashlib import sha1
import httplib
import json
import os
import re
import urllib2
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from bencode import bencode as benc, bdecode
log = CPLog(__name__)
autoload = 'Hadouken'
class Hadouken(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
hadouken_api = None
def connect(self):
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.conf('api_key'):
log.error('Config properties are not filled in correctly, API key is missing.')
return False
self.hadouken_api = HadoukenAPI(host[0], port = host[1], api_key = self.conf('api_key'))
return True
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol')))
if not self.connect():
return False
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
torrent_filename = self.createFileName(data, filedata, media)
if data.get('protocol') == 'torrent_magnet':
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = self.torrent_trackers
torrent_params['name'] = torrent_filename
else:
info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
# Send request to Hadouken
if data.get('protocol') == 'torrent_magnet':
self.hadouken_api.add_magnet_link(data.get('url'), torrent_params)
else:
self.hadouken_api.add_file(filedata, torrent_params)
return self.downloadReturnId(torrent_hash)
def test(self):
""" Tests the given host:port and API key """
if not self.connect():
return False
version = self.hadouken_api.get_version()
if not version:
log.error('Could not get Hadouken version.')
return False
# The minimum required version of Hadouken is 4.5.6.
if LooseVersion(version) >= LooseVersion('4.5.6'):
return True
log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version)
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Hadouken download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.hadouken_api.get_by_hash_list(ids)
if not queue:
return []
for torrent in queue:
if torrent is None:
continue
torrent_filelist = self.hadouken_api.get_files_by_hash(torrent['InfoHash'])
torrent_files = []
save_path = torrent['SavePath']
# The 'Path' key for each file_item contains
# the full path to the single file relative to the
# torrents save path.
# For a single file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "file1.iso"
# Resulting path: "C:\Downloads\file1.iso"
# For a multi file torrent the result would be,
# - Save path: "C:\Downloads"
# - file_item['Path'] = "dirname/file1.iso"
# Resulting path: "C:\Downloads\dirname/file1.iso"
for file_item in torrent_filelist:
torrent_files.append(sp(os.path.join(save_path, file_item['Path'])))
release_downloads.append({
'id': torrent['InfoHash'].upper(),
'name': torrent['Name'],
'status': self.get_torrent_status(torrent),
'seed_ratio': self.get_seed_ratio(torrent),
'original_status': torrent['State'],
'timeleft': -1,
'folder': sp(save_path if len(torrent_files == 1) else os.path.join(save_path, torrent['Name'])),
'files': torrent_files
})
return release_downloads
def get_seed_ratio(self, torrent):
""" Returns the seed ratio for a given torrent.
Keyword arguments:
torrent -- The torrent to calculate seed ratio for.
"""
up = torrent['TotalUploadedBytes']
down = torrent['TotalDownloadedBytes']
if up > 0 and down > 0:
return up / down
return 0
def get_torrent_status(self, torrent):
""" Returns the CouchPotato status for a given torrent.
Keyword arguments:
torrent -- The torrent to translate status for.
"""
if torrent['IsSeeding'] and torrent['IsFinished'] and torrent['Paused']:
return 'completed'
if torrent['IsSeeding']:
return 'seeding'
return 'busy'
def pause(self, release_download, pause = True):
""" Pauses or resumes the torrent specified by the ID field
in release_download.
Keyword arguments:
release_download -- The CouchPotato release_download to pause/resume.
pause -- Boolean indicating whether to pause or resume.
"""
if not self.connect():
return False
return self.hadouken_api.pause(release_download['id'], pause)
def removeFailed(self, release_download):
""" Removes a failed torrent and also remove the data associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
"""
log.info('%s failed downloading, deleting...', release_download['name'])
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = True)
def processComplete(self, release_download, delete_files = False):
""" Removes the completed torrent from Hadouken and optionally removes the data
associated with it.
Keyword arguments:
release_download -- The CouchPotato release_download to remove.
delete_files: Boolean indicating whether to remove the associated data.
"""
log.debug('Requesting Hadouken to remove the torrent %s%s.',
(release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect():
return False
return self.hadouken_api.remove(release_download['id'], remove_data = delete_files)
class HadoukenAPI(object):
def __init__(self, host = 'localhost', port = 7890, api_key = None):
self.url = 'http://' + str(host) + ':' + str(port)
self.api_key = api_key
self.requestId = 0;
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent', 'couchpotato-hadouken-client/1.0'), ('Accept', 'application/json')]
if not api_key:
log.error('API key missing.')
def add_file(self, filedata, torrent_params):
""" Add a file to Hadouken with the specified parameters.
Keyword arguments:
filedata -- The binary torrent data.
torrent_params -- Additional parameters for the file.
"""
data = {
'method': 'torrents.addFile',
'params': [b64encode(filedata), torrent_params]
}
return self._request(data)
def add_magnet_link(self, magnetLink, torrent_params):
""" Add a magnet link to Hadouken with the specified parameters.
Keyword arguments:
magnetLink -- The magnet link to send.
torrent_params -- Additional parameters for the magnet link.
"""
data = {
'method': 'torrents.addUrl',
'params': [magnetLink, torrent_params]
}
return self._request(data)
def get_by_hash_list(self, infoHashList):
""" Gets a list of torrents filtered by the given info hash list.
Keyword arguments:
infoHashList -- A list of info hashes.
"""
data = {
'method': 'torrents.getByInfoHashList',
'params': [infoHashList]
}
return self._request(data)
def get_files_by_hash(self, infoHash):
""" Gets a list of files for the torrent identified by the
given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to return files for.
"""
data = {
'method': 'torrents.getFiles',
'params': [infoHash]
}
return self._request(data)
def get_version(self):
""" Gets the version, commitish and build date of Hadouken. """
data = {
'method': 'core.getVersion',
'params': None
}
result = self._request(data)
if not result:
return False
return result['Version']
def pause(self, infoHash, pause):
""" Pauses/unpauses the torrent identified by the given info hash.
Keyword arguments:
infoHash -- The info hash of the torrent to operate on.
pause -- If true, pauses the torrent. Otherwise resumes.
"""
data = {
'method': 'torrents.pause',
'params': [infoHash]
}
if not pause:
data['method'] = 'torrents.resume'
return self._request(data)
def remove(self, infoHash, remove_data = False):
""" Removes the torrent identified by the given info hash and
optionally removes the data as well.
Keyword arguments:
infoHash -- The info hash of the torrent to remove.
remove_data -- If true, removes the data associated with the torrent.
"""
data = {
'method': 'torrents.remove',
'params': [infoHash, remove_data]
}
return self._request(data)
def _request(self, data):
self.requestId += 1
data['jsonrpc'] = '2.0'
data['id'] = self.requestId
request = urllib2.Request(self.url + '/jsonrpc', data = json.dumps(data))
request.add_header('Authorization', 'Token ' + self.api_key)
request.add_header('Content-Type', 'application/json')
try:
f = self.opener.open(request)
response = f.read()
f.close()
obj = json.loads(response)
if not 'error' in obj.keys():
return obj['result']
log.error('JSONRPC error, %s: %s', obj['error']['code'], obj['error']['message'])
except httplib.InvalidURL as err:
log.error('Invalid Hadouken host, check your config %s', err)
except urllib2.HTTPError as err:
if err.code == 401:
log.error('Invalid Hadouken API key, check your config')
else:
log.error('Hadouken HTTPError: %s', err)
except urllib2.URLError as err:
log.error('Unable to connect to Hadouken %s', err)
return False
config = [{
'name': 'hadouken',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'hadouken',
'label': 'Hadouken',
'description': 'Use <a href="http://www.hdkn.net">Hadouken</a> (>= v4.5.6) to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent'
},
{
'name': 'host',
'default': 'localhost:7890'
},
{
'name': 'api_key',
'label': 'API key',
'type': 'password'
},
{
'name': 'label',
'description': 'Label to add torrent as.'
}
]
}
]
}]
+3 -28
View File
@@ -23,20 +23,6 @@ class NZBGet(DownloaderBase):
rpc = 'xmlrpc' rpc = 'xmlrpc'
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -85,10 +71,6 @@ class NZBGet(DownloaderBase):
return False return False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
rpc = self.getRPC() rpc = self.getRPC()
try: try:
@@ -109,13 +91,6 @@ class NZBGet(DownloaderBase):
return True return True
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking NZBGet download status.') log.debug('Checking NZBGet download status.')
@@ -188,12 +163,12 @@ class NZBGet(DownloaderBase):
nzb_id = nzb['NZBID'] nzb_id = nzb['NZBID']
if nzb_id in ids: if nzb_id in ids:
log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log'])) log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({ release_downloads.append({
'id': nzb_id, 'id': nzb_id,
'name': nzb['NZBFilename'], 'name': nzb['NZBFilename'],
'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed', 'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['Status'], 'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)), 'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir']) 'folder': sp(nzb['DestDir'])
}) })
+64 -64
View File
@@ -1,10 +1,16 @@
from base64 import b64encode from base64 import b64encode
import os from urllib2 import URLError
from uuid import uuid4 from uuid import uuid4
import hashlib import hashlib
import httplib
import json
import os
import socket
import ssl
import sys
import time
import traceback import traceback
import urllib2
from requests import HTTPError
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, sp from couchpotato.core.helpers.encoding import tryUrlencode, sp
@@ -24,45 +30,23 @@ class NZBVortex(DownloaderBase):
session_id = None session_id = None
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
# Send the nzb # Send the nzb
try: try:
nzb_filename = self.createFileName(data, filedata, media, unique_tag = True) nzb_filename = self.createFileName(data, filedata, media)
response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = { self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
'name': nzb_filename,
'groupname': self.conf('group')
})
if response and response.get('result', '').lower() == 'ok': time.sleep(10)
return self.downloadReturnId(nzb_filename) raw_statuses = self.call('nzb')
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if os.path.basename(nzb['nzbFileName']) == nzb_filename][0]
log.error('Something went wrong sending the NZB file. Response: %s', response) return self.downloadReturnId(nzb_id)
return False
except: except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc()) log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False return False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
try: try:
login_result = self.login() login_result = self.login()
except: except:
@@ -71,20 +55,12 @@ class NZBVortex(DownloaderBase):
return login_result return login_result
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
raw_statuses = self.call('nzb') raw_statuses = self.call('nzb')
release_downloads = ReleaseDownloadList(self) release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []): for nzb in raw_statuses.get('nzbs', []):
nzb_id = os.path.basename(nzb['nzbFileName']) if nzb['id'] in ids:
if nzb_id in ids:
# Check status # Check status
status = 'busy' status = 'busy'
@@ -94,8 +70,7 @@ class NZBVortex(DownloaderBase):
status = 'failed' status = 'failed'
release_downloads.append({ release_downloads.append({
'temp_id': nzb['id'], 'id': nzb['id'],
'id': nzb_id,
'name': nzb['uiTitle'], 'name': nzb['uiTitle'],
'status': status, 'status': status,
'original_status': nzb['state'], 'original_status': nzb['state'],
@@ -110,7 +85,7 @@ class NZBVortex(DownloaderBase):
log.info('%s failed downloading, deleting...', release_download['name']) log.info('%s failed downloading, deleting...', release_download['name'])
try: try:
self.call('nzb/%s/cancel' % release_download['temp_id']) self.call('nzb/%s/cancel' % release_download['id'])
except: except:
log.error('Failed deleting: %s', traceback.format_exc(0)) log.error('Failed deleting: %s', traceback.format_exc(0))
return False return False
@@ -139,7 +114,7 @@ class NZBVortex(DownloaderBase):
log.error('Login failed, please check you api-key') log.error('Login failed, please check you api-key')
return False return False
def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs): def call(self, call, parameters = None, repeat = False, auth = True, *args, **kwargs):
# Login first # Login first
if not parameters: parameters = {} if not parameters: parameters = {}
@@ -152,20 +127,19 @@ class NZBVortex(DownloaderBase):
params = tryUrlencode(parameters) params = tryUrlencode(parameters)
url = cleanHost(self.conf('host')) + 'api/' + call url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api/' + call
try: try:
data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs) data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
if data: if data:
return data return json.loads(data)
except HTTPError as e: except URLError as e:
sc = e.response.status_code if hasattr(e, 'code') and e.code == 403:
if sc == 403:
# Try login and do again # Try login and do again
if not is_repeat: if not repeat:
self.login() self.login()
return self.call(call, parameters = parameters, is_repeat = True, **kwargs) return self.call(call, parameters = parameters, repeat = True, **kwargs)
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
except: except:
@@ -177,12 +151,13 @@ class NZBVortex(DownloaderBase):
if not self.api_level: if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
try: try:
data = self.call('app/apilevel', auth = False) data = self.urlopen(url, show_error = False)
self.api_level = float(data.get('apilevel')) self.api_level = float(json.loads(data).get('apilevel'))
except HTTPError as e: except URLError as e:
sc = e.response.status_code if hasattr(e, 'code') and e.code == 403:
if sc == 403:
log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher') log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
else: else:
log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1)) log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
@@ -194,6 +169,29 @@ class NZBVortex(DownloaderBase):
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel() return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
class HTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if sys.version_info < (2, 6, 7):
if hasattr(self, '_tunnel_host'):
self.sock = sock
self._tunnel()
else:
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1)
class HTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnection, req)
config = [{ config = [{
'name': 'nzbvortex', 'name': 'nzbvortex',
'groups': [ 'groups': [
@@ -213,18 +211,20 @@ config = [{
}, },
{ {
'name': 'host', 'name': 'host',
'default': 'https://localhost:4321', 'default': 'localhost:4321',
'description': 'Hostname with port. Usually <strong>https://localhost:4321</strong>', 'description': 'Hostname with port. Usually <strong>localhost:4321</strong>',
},
{
'name': 'ssl',
'default': 1,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
}, },
{ {
'name': 'api_key', 'name': 'api_key',
'label': 'Api Key', 'label': 'Api Key',
}, },
{
'name': 'group',
'label': 'Group',
'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.',
},
{ {
'name': 'manual', 'name': 'manual',
'default': False, 'default': False,
-18
View File
@@ -19,20 +19,6 @@ class Pneumatic(DownloaderBase):
status_support = False status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -77,10 +63,6 @@ class Pneumatic(DownloaderBase):
return False return False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
directory = self.conf('directory') directory = self.conf('directory')
if directory and os.path.isdir(directory): if directory and os.path.isdir(directory):
@@ -1,68 +0,0 @@
from .main import PutIO
def autoload():
return PutIO()
config = [{
'name': 'putio',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'putio',
'label': 'put.io',
'description': 'This will start a torrent download on <a href="http://put.io">Put.io</a>.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'oauth_token',
'label': 'oauth_token',
'description': 'This is the OAUTH_TOKEN from your putio API',
'advanced': True,
},
{
'name': 'folder',
'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'),
'default': 0,
},
{
'name': 'callback_host',
'description': 'External reachable url to CP so put.io can do it\'s thing',
},
{
'name': 'download',
'description': 'Set this to have CouchPotato download the file from Put.io',
'type': 'bool',
'default': 0,
},
{
'name': 'delete_file',
'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'),
'type': 'bool',
'default': 0,
},
{
'name': 'download_dir',
'type': 'directory',
'label': 'Download Directory',
'description': 'The Directory to download files to, does nothing if you don\'t select download',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]
-181
View File
@@ -1,181 +0,0 @@
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEventAsync
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from pio import api as pio
import datetime
log = CPLog(__name__)
autoload = 'Putiodownload'
class PutIO(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
downloading_list = []
oauth_authenticate = 'https://api.couchpota.to/authorize/putio/'
def __init__(self):
addApiView('downloader.putio.getfrom', self.getFromPutio, docs = {
'desc': 'Allows you to download file from prom Put.io',
})
addApiView('downloader.putio.auth_url', self.getAuthorizationUrl)
addApiView('downloader.putio.credentials', self.getCredentials)
addEvent('putio.download', self.putioDownloader)
return super(PutIO, self).__init__()
# This is a recusive function to check for the folders
def recursionFolder(self, client, folder = 0, tfolder = ''):
files = client.File.list(folder)
for f in files:
if f.content_type == 'application/x-directory':
if f.name == tfolder:
return f.id
else:
result = self.recursionFolder(client, f.id, tfolder)
if result != 0:
return result
return 0
# This will check the root for the folder, and kick of recusively checking sub folder
def convertFolder(self, client, folder):
if folder == 0:
return 0
else:
return self.recursionFolder(client, 0, folder)
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to put.io', data.get('name'))
url = data.get('url')
client = pio.Client(self.conf('oauth_token'))
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('putioFolder ID is %s', putioFolder)
# It might be possible to call getFromPutio from the renamer if we can then we don't need to do this.
# Note callback_host is NOT our address, it's the internet host that putio can call too
callbackurl = None
if self.conf('download'):
callbackurl = 'http://' + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder)
log.debug('resp is %s', resp.id);
return self.downloadReturnId(resp.id)
def test(self):
try:
client = pio.Client(self.conf('oauth_token'))
if client.File.list():
return True
except:
log.info('Failed to get file listing, check OAUTH_TOKEN')
return False
def getAuthorizationUrl(self, host = None, **kwargs):
callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/'))
log.debug('callback_url is %s', callback_url)
target_url = self.oauth_authenticate + "?target=" + callback_url
log.debug('target_url is %s', target_url)
return {
'success': True,
'url': target_url,
}
def getCredentials(self, **kwargs):
try:
oauth_token = kwargs.get('oauth')
except:
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
log.debug('oauth_token is: %s', oauth_token)
self.conf('oauth_token', value = oauth_token);
return 'redirect', Env.get('web_base') + 'settings/downloaders/'
def getAllDownloadStatus(self, ids):
log.debug('Checking putio download status.')
client = pio.Client(self.conf('oauth_token'))
transfers = client.Transfer.list()
log.debug(transfers);
release_downloads = ReleaseDownloadList(self)
for t in transfers:
if t.id in ids:
log.debug('downloading list is %s', self.downloading_list)
if t.status == "COMPLETED" and self.conf('download') == False :
status = 'completed'
# So check if we are trying to download something
elif t.status == "COMPLETED" and self.conf('download') == True:
# Assume we are done
status = 'completed'
if not self.downloading_list:
now = datetime.datetime.utcnow()
date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S")
# We need to make sure a race condition didn't happen
if (now - date_time) < datetime.timedelta(minutes=5):
# 5 minutes haven't passed so we wait
status = 'busy'
else:
# If we have the file_id in the downloading_list mark it as busy
if str(t.file_id) in self.downloading_list:
status = 'busy'
else:
status = 'busy'
release_downloads.append({
'id' : t.id,
'name': t.name,
'status': status,
'timeleft': t.estimated_time,
})
return release_downloads
def putioDownloader(self, fid):
log.info('Put.io Real downloader called with file_id: %s',fid)
client = pio.Client(self.conf('oauth_token'))
log.debug('About to get file List')
putioFolder = self.convertFolder(client, self.conf('folder'))
log.debug('PutioFolderID is %s', putioFolder)
files = client.File.list(parent_id=putioFolder)
downloaddir = self.conf('download_dir')
for f in files:
if str(f.id) == str(fid):
client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file'))
# Once the download is complete we need to remove it from the running list.
self.downloading_list.remove(fid)
return True
def getFromPutio(self, **kwargs):
try:
file_id = str(kwargs.get('file_id'))
except:
return {
'success' : False,
}
log.info('Put.io Download has been called file_id is %s', file_id)
if file_id not in self.downloading_list:
self.downloading_list.append(file_id)
fireEventAsync('putio.download',fid = file_id)
return {
'success': True,
}
return {
'success': False,
}
@@ -1,68 +0,0 @@
var PutIODownloader = new Class({
initialize: function(){
var self = this;
App.addEvent('loadSettings', self.addRegisterButton.bind(self));
},
addRegisterButton: function(){
var self = this;
var setting_page = App.getPage('Settings');
setting_page.addEvent('create', function(){
var fieldset = setting_page.tabs.downloaders.groups.putio,
l = window.location;
var putio_set = 0;
fieldset.getElements('input[type=text]').each(function(el){
putio_set += +(el.get('value') != '');
});
new Element('.ctrlHolder').adopt(
// Unregister button
(putio_set > 0) ?
[
self.unregister = new Element('a.button.red', {
'text': 'Unregister "'+fieldset.getElement('input[name*=oauth_token]').get('value')+'"',
'events': {
'click': function(){
fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change');
self.unregister.destroy();
self.unregister_or.destroy();
}
}
}),
self.unregister_or = new Element('span[text=or]')
]
: null,
// Register button
new Element('a.button', {
'text': putio_set > 0 ? 'Register a different account' : 'Register your put.io account',
'events': {
'click': function(){
Api.request('downloader.putio.auth_url', {
'data': {
'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '')
},
'onComplete': function(json){
window.location = json.url;
}
});
}
}
})
).inject(fieldset.getElement('.test_button'), 'before');
})
}
});
window.addEvent('domready', function(){
new PutIODownloader();
});
@@ -41,30 +41,12 @@ class qBittorrent(DownloaderBase):
return self.qb return self.qb
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(): if self.connect():
return True return True
return False return False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -113,14 +95,6 @@ class qBittorrent(DownloaderBase):
return 'busy' return 'busy'
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking qBittorrent download status.') log.debug('Checking qBittorrent download status.')
if not self.connect(): if not self.connect():
-26
View File
@@ -84,10 +84,6 @@ class rTorrent(DownloaderBase):
return self.rt return self.rt
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True): if self.connect(True):
return True return True
@@ -98,20 +94,6 @@ class rTorrent(DownloaderBase):
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -179,14 +161,6 @@ class rTorrent(DownloaderBase):
return 'completed' return 'completed'
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking rTorrent download status.') log.debug('Checking rTorrent download status.')
if not self.connect(): if not self.connect():
-27
View File
@@ -21,21 +21,6 @@ class Sabnzbd(DownloaderBase):
protocol = ['nzb'] protocol = ['nzb']
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -84,11 +69,6 @@ class Sabnzbd(DownloaderBase):
return False return False
def test(self): def test(self):
""" Check if connection works
Return message if an old version of SAB is used
:return: bool
"""
try: try:
sab_data = self.call({ sab_data = self.call({
'mode': 'version', 'mode': 'version',
@@ -109,13 +89,6 @@ class Sabnzbd(DownloaderBase):
return True return True
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking SABnzbd download status.') log.debug('Checking SABnzbd download status.')
+1 -20
View File
@@ -19,21 +19,6 @@ class Synology(DownloaderBase):
status_support = False status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -65,10 +50,6 @@ class Synology(DownloaderBase):
return self.downloadReturnId('') if response else False return self.downloadReturnId('') if response else False
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
host = cleanHost(self.conf('host'), protocol = False).split(':') host = cleanHost(self.conf('host'), protocol = False).split(':')
try: try:
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password')) srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
@@ -137,7 +118,7 @@ class SynologyRPC(object):
def _req(self, url, args, files = None): def _req(self, url, args, files = None):
response = {'success': False} response = {'success': False}
try: try:
req = requests.post(url, data = args, files = files, verify = False) req = requests.post(url, data = args, files = files)
req.raise_for_status() req.raise_for_status()
response = json.loads(req.text) response = json.loads(req.text)
if response['success']: if response['success']:
+12 -40
View File
@@ -23,32 +23,19 @@ class Transmission(DownloaderBase):
log = CPLog(__name__) log = CPLog(__name__)
trpc = None trpc = None
def connect(self): def connect(self, reconnect = False):
# Load host from config and split out port. # Load host from config and split out port.
host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1) host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]): if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.') log.error('Config properties are not filled in correctly, port is missing.')
return False return False
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password')) if not self.trpc or reconnect:
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
return self.trpc return self.trpc
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -93,32 +80,19 @@ class Transmission(DownloaderBase):
log.error('Failed sending torrent to Transmission') log.error('Failed sending torrent to Transmission')
return False return False
data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate')
# Change settings of added torrents # Change settings of added torrents
if torrent_params: if torrent_params:
self.trpc.set_torrent(data['hashString'], torrent_params) self.trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.') log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(data['hashString']) return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
def test(self): def test(self):
""" Check if connection works if self.connect(True) and self.trpc.get_session():
:return: bool
"""
if self.connect() and self.trpc.get_session():
return True return True
return False return False
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Transmission download status.') log.debug('Checking Transmission download status.')
@@ -147,8 +121,6 @@ class Transmission(DownloaderBase):
status = 'failed' status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1: elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed' status = 'completed'
elif torrent['status'] == 16 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]: elif torrent['status'] in [5, 6]:
status = 'seeding' status = 'seeding'
@@ -192,11 +164,11 @@ class Transmission(DownloaderBase):
class TransmissionRPC(object): class TransmissionRPC(object):
"""TransmissionRPC lite library""" """TransmissionRPC lite library"""
def __init__(self, host = 'http://localhost', port = 9091, rpc_url = 'transmission', username = None, password = None): def __init__(self, host = 'localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
super(TransmissionRPC, self).__init__() super(TransmissionRPC, self).__init__()
self.url = host + ':' + str(port) + '/' + rpc_url + '/rpc' self.url = 'http://' + host + ':' + str(port) + '/' + rpc_url + '/rpc'
self.tag = 0 self.tag = 0
self.session_id = 0 self.session_id = 0
self.session = {} self.session = {}
@@ -304,8 +276,8 @@ config = [{
}, },
{ {
'name': 'host', 'name': 'host',
'default': 'http://localhost:9091', 'default': 'localhost:9091',
'description': 'Hostname with port. Usually <strong>http://localhost:9091</strong>', 'description': 'Hostname with port. Usually <strong>localhost:9091</strong>',
}, },
{ {
'name': 'rpc_url', 'name': 'rpc_url',
-26
View File
@@ -51,21 +51,6 @@ class uTorrent(DownloaderBase):
return self.utorrent_api return self.utorrent_api
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
@@ -135,10 +120,6 @@ class uTorrent(DownloaderBase):
return self.downloadReturnId(torrent_hash) return self.downloadReturnId(torrent_hash)
def test(self): def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(): if self.connect():
build_version = self.utorrent_api.get_build() build_version = self.utorrent_api.get_build()
if not build_version: if not build_version:
@@ -150,13 +131,6 @@ class uTorrent(DownloaderBase):
return False return False
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking uTorrent download status.') log.debug('Checking uTorrent download status.')
+1 -1
View File
@@ -90,7 +90,7 @@ def fireEvent(name, *args, **kwargs):
else: else:
e = Event(name = name, threads = 10, exc_info = True, traceback = True) e = Event(name = name, threads = 10, exc_info = True, traceback = True, lock = threading.RLock())
for event in events[name]: for event in events[name]:
e.handle(event['handler'], priority = event['priority']) e.handle(event['handler'], priority = event['priority'])
+4 -14
View File
@@ -5,7 +5,6 @@ import re
import traceback import traceback
import unicodedata import unicodedata
from chardet import detect
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
import six import six
@@ -36,19 +35,13 @@ def toUnicode(original, *args):
return six.text_type(original, *args) return six.text_type(original, *args)
except: except:
try: try:
detected = detect(original)
try:
if detected.get('confidence') > 0.8:
return original.decode(detected.get('encoding'))
except:
pass
return ek(original, *args) return ek(original, *args)
except: except:
raise raise
except: except:
log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc())) log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc()))
return 'ERROR DECODING STRING' ascii_text = str(original).encode('string_escape')
return toUnicode(ascii_text)
def ss(original, *args): def ss(original, *args):
@@ -59,10 +52,7 @@ def ss(original, *args):
return u_original.encode(Env.get('encoding')) return u_original.encode(Env.get('encoding'))
except Exception as e: except Exception as e:
log.debug('Failed ss encoding char, force UTF8: %s', e) log.debug('Failed ss encoding char, force UTF8: %s', e)
try: return u_original.encode('UTF-8')
return u_original.encode(Env.get('encoding'), 'replace')
except:
return u_original.encode('utf-8', 'replace')
def sp(path, *args): def sp(path, *args):
@@ -95,7 +85,7 @@ def ek(original, *args):
if isinstance(original, (str, unicode)): if isinstance(original, (str, unicode)):
try: try:
from couchpotato.environment import Env from couchpotato.environment import Env
return original.decode(Env.get('encoding'), 'ignore') return original.decode(Env.get('encoding'))
except UnicodeDecodeError: except UnicodeDecodeError:
raise raise
+2 -32
View File
@@ -41,11 +41,11 @@ def symlink(src, dst):
def getUserDir(): def getUserDir():
try: try:
import pwd import pwd
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir) os.environ['HOME'] = pwd.getpwuid(os.geteuid()).pw_dir
except: except:
pass pass
return sp(os.path.expanduser('~')) return os.path.expanduser('~')
def getDownloadDir(): def getDownloadDir():
@@ -380,33 +380,3 @@ def getFreeSpace(directories):
free_space[folder] = size free_space[folder] = size
return free_space return free_space
def getSize(paths):
single = not isinstance(paths, (tuple, list))
if single:
paths = [paths]
total_size = 0
for path in paths:
path = sp(path)
if os.path.isdir(path):
total_size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
elif os.path.isfile(path):
total_size += os.path.getsize(path)
return total_size / 1048576 # MB
def find(func, iterable):
for item in iterable:
if func(item):
return item
return None
+9 -8
View File
@@ -59,14 +59,15 @@ class CPLog(object):
msg = ss(msg) msg = ss(msg)
try: try:
if isinstance(replace_tuple, tuple): msg = msg % replace_tuple
msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)]) except:
elif isinstance(replace_tuple, dict): try:
msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems()) if isinstance(replace_tuple, tuple):
else: msg = msg % tuple([ss(x) for x in list(replace_tuple)])
msg = msg % ss(replace_tuple) else:
except Exception as e: msg = msg % ss(replace_tuple)
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e)) except Exception as e:
self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
self.setup() self.setup()
if not self.is_develop: if not self.is_develop:
+12 -24
View File
@@ -1,10 +1,9 @@
import os import os
import traceback import traceback
from couchpotato import CPLog, md5 from couchpotato import CPLog
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
import six import six
@@ -27,9 +26,9 @@ class MediaBase(Plugin):
def onComplete(): def onComplete():
try: try:
media = fireEvent('media.get', media_id, single = True) media = fireEvent('media.get', media_id, single = True)
if media: event_name = '%s.searcher.single' % media.get('type')
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True) fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
except: except:
log.error('Failed creating onComplete: %s', traceback.format_exc()) log.error('Failed creating onComplete: %s', traceback.format_exc())
@@ -40,9 +39,9 @@ class MediaBase(Plugin):
def notifyFront(): def notifyFront():
try: try:
media = fireEvent('media.get', media_id, single = True) media = fireEvent('media.get', media_id, single = True)
if media: event_name = '%s.update' % media.get('type')
event_name = '%s.update' % media.get('type')
fireEvent('notify.frontend', type = event_name, data = media) fireEvent('notify.frontend', type = event_name, data = media)
except: except:
log.error('Failed creating onComplete: %s', traceback.format_exc()) log.error('Failed creating onComplete: %s', traceback.format_exc())
@@ -66,13 +65,10 @@ class MediaBase(Plugin):
return def_title or 'UNKNOWN' return def_title or 'UNKNOWN'
def getPoster(self, media, image_urls): def getPoster(self, image_urls, existing_files):
if 'files' not in media:
media['files'] = {}
existing_files = media['files']
image_type = 'poster' image_type = 'poster'
# Remove non-existing files
file_type = 'image_%s' % image_type file_type = 'image_%s' % image_type
# Make existing unique # Make existing unique
@@ -93,18 +89,10 @@ class MediaBase(Plugin):
if not isinstance(image, (str, unicode)): if not isinstance(image, (str, unicode)):
continue continue
# Check if it has top image if file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
filename = '%s.%s' % (md5(image), getExt(image))
existing = existing_files.get(file_type, [])
has_latest = False
for x in existing:
if filename in x:
has_latest = True
if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True) file_path = fireEvent('file.download', url = image, single = True)
if file_path: if file_path:
existing_files[file_type] = [toUnicode(file_path)] existing_files[file_type] = [file_path]
break break
else: else:
break break
-110
View File
@@ -1,47 +1,10 @@
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.library.base import LibraryBase from couchpotato.core.media._base.library.base import LibraryBase
log = CPLog(__name__)
class Library(LibraryBase): class Library(LibraryBase):
def __init__(self): def __init__(self):
addEvent('library.title', self.title) addEvent('library.title', self.title)
addEvent('library.related', self.related)
addEvent('library.tree', self.tree)
addEvent('library.root', self.root)
addApiView('library.query', self.queryView)
addApiView('library.related', self.relatedView)
addApiView('library.tree', self.treeView)
def queryView(self, media_id, **kwargs):
db = get_db()
media = db.get('id', media_id)
return {
'result': fireEvent('library.query', media, single = True)
}
def relatedView(self, media_id, **kwargs):
db = get_db()
media = db.get('id', media_id)
return {
'result': fireEvent('library.related', media, single = True)
}
def treeView(self, media_id, **kwargs):
db = get_db()
media = db.get('id', media_id)
return {
'result': fireEvent('library.tree', media, single = True)
}
def title(self, library): def title(self, library):
return fireEvent( return fireEvent(
@@ -53,76 +16,3 @@ class Library(LibraryBase):
include_identifier = False, include_identifier = False,
single = True single = True
) )
def related(self, media):
result = {self.key(media['type']): media}
db = get_db()
cur = media
while cur and cur.get('parent_id'):
cur = db.get('id', cur['parent_id'])
result[self.key(cur['type'])] = cur
children = db.get_many('media_children', media['_id'], with_doc = True)
for item in children:
key = self.key(item['doc']['type']) + 's'
if key not in result:
result[key] = []
result[key].append(item['doc'])
return result
def root(self, media):
db = get_db()
cur = media
while cur and cur.get('parent_id'):
cur = db.get('id', cur['parent_id'])
return cur
def tree(self, media = None, media_id = None):
db = get_db()
if media:
result = media
elif media_id:
result = db.get('id', media_id, with_doc = True)
else:
return None
# Find children
items = db.get_many('media_children', result['_id'], with_doc = True)
keys = []
# Build children arrays
for item in items:
key = self.key(item['doc']['type']) + 's'
if key not in result:
result[key] = {}
elif type(result[key]) is not dict:
result[key] = {}
if key not in keys:
keys.append(key)
result[key][item['_id']] = fireEvent('library.tree', item['doc'], single = True)
# Unique children
for key in keys:
result[key] = result[key].values()
# Include releases
result['releases'] = fireEvent('release.for_media', result['_id'], single = True)
return result
def key(self, media_type):
parts = media_type.split('.')
return parts[-1]
+3 -3
View File
@@ -40,7 +40,7 @@ class Matcher(MatcherBase):
return False return False
def correctTitle(self, chain, media): def correctTitle(self, chain, media):
root = fireEvent('library.root', media, single = True) root_library = media['library']['root_library']
if 'show_name' not in chain.info or not len(chain.info['show_name']): if 'show_name' not in chain.info or not len(chain.info['show_name']):
log.info('Wrong: missing show name in parsed result') log.info('Wrong: missing show name in parsed result')
@@ -50,10 +50,10 @@ class Matcher(MatcherBase):
chain_words = [x.lower() for x in chain.info['show_name']] chain_words = [x.lower() for x in chain.info['show_name']]
# Build a list of possible titles of the media we are searching for # Build a list of possible titles of the media we are searching for
titles = root['info']['titles'] titles = root_library['info']['titles']
# Add year suffix titles (will result in ['<name_one>', '<name_one> <suffix_one>', '<name_two>', ...]) # Add year suffix titles (will result in ['<name_one>', '<name_one> <suffix_one>', '<name_two>', ...])
suffixes = [None, root['info']['year']] suffixes = [None, root_library['info']['year']]
titles = [ titles = [
title + ((' %s' % suffix) if suffix else '') title + ((' %s' % suffix) if suffix else '')
+33 -84
View File
@@ -1,9 +1,10 @@
from datetime import timedelta from datetime import timedelta
from operator import itemgetter
import time import time
import traceback import traceback
from string import ascii_lowercase from string import ascii_lowercase
from CodernityDB.database import RecordNotFound, RecordDeleted from CodernityDB.database import RecordNotFound
from couchpotato import tryInt, get_db from couchpotato import tryInt, get_db
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
@@ -43,15 +44,15 @@ class MediaPlugin(MediaBase):
'desc': 'List media', 'desc': 'List media',
'params': { 'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'}, 'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter media by status. Example:"active,done"'}, 'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter media by status of its releases. Example:"snatched,available"'}, 'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the media list. Examples: "50" or "50,30"'}, 'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all media starting with the letter "a"'}, 'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search media title'}, 'search': {'desc': 'Search movie title'},
}, },
'return': {'type': 'object', 'example': """{ 'return': {'type': 'object', 'example': """{
'success': True, 'success': True,
'empty': bool, any media returned or not, 'empty': bool, any movies returned or not,
'media': array, media found, 'media': array, media found,
}"""} }"""}
}) })
@@ -77,7 +78,6 @@ class MediaPlugin(MediaBase):
addEvent('app.load', self.addSingleListView, priority = 100) addEvent('app.load', self.addSingleListView, priority = 100)
addEvent('app.load', self.addSingleCharView, priority = 100) addEvent('app.load', self.addSingleCharView, priority = 100)
addEvent('app.load', self.addSingleDeleteView, priority = 100) addEvent('app.load', self.addSingleDeleteView, priority = 100)
addEvent('app.load', self.cleanupFaults)
addEvent('media.get', self.get) addEvent('media.get', self.get)
addEvent('media.with_status', self.withStatus) addEvent('media.with_status', self.withStatus)
@@ -88,18 +88,6 @@ class MediaPlugin(MediaBase):
addEvent('media.tag', self.tag) addEvent('media.tag', self.tag)
addEvent('media.untag', self.unTag) addEvent('media.untag', self.unTag)
# Wrongly tagged media files
def cleanupFaults(self):
medias = fireEvent('media.with_status', 'ignored', single = True) or []
db = get_db()
for media in medias:
try:
media['status'] = 'done'
db.update(media)
except:
pass
def refresh(self, id = '', **kwargs): def refresh(self, id = '', **kwargs):
handlers = [] handlers = []
ids = splitString(id) ids = splitString(id)
@@ -121,7 +109,7 @@ class MediaPlugin(MediaBase):
try: try:
media = get_db().get('id', media_id) media = get_db().get('id', media_id)
event = '%s.update' % media.get('type') event = '%s.update_info' % media.get('type')
def handler(): def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id)) fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
@@ -158,7 +146,7 @@ class MediaPlugin(MediaBase):
return media return media
except (RecordNotFound, RecordDeleted): except RecordNotFound:
log.error('Media with id "%s" not found', media_id) log.error('Media with id "%s" not found', media_id)
except: except:
raise raise
@@ -172,13 +160,10 @@ class MediaPlugin(MediaBase):
'media': media, 'media': media,
} }
def withStatus(self, status, types = None, with_doc = True): def withStatus(self, status, with_doc = True):
db = get_db() db = get_db()
if types and not isinstance(types, (list, tuple)):
types = [types]
status = list(status if isinstance(status, (list, tuple)) else [status]) status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status: for s in status:
@@ -186,29 +171,24 @@ class MediaPlugin(MediaBase):
if with_doc: if with_doc:
try: try:
doc = db.get('id', ms['_id']) doc = db.get('id', ms['_id'])
if types and doc.get('type') not in types:
continue
yield doc yield doc
except (RecordDeleted, RecordNotFound): except RecordNotFound:
log.debug('Record not found, skipping: %s', ms['_id']) log.debug('Record not found, skipping: %s', ms['_id'])
except (ValueError, EOFError):
fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0))
else: else:
yield ms yield ms
def withIdentifiers(self, identifiers, with_doc = False): def withIdentifiers(self, identifiers, with_doc = False):
db = get_db() db = get_db()
for x in identifiers: for x in identifiers:
try: try:
return db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc) media = db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
return media
except: except:
pass pass
log.debug('No media found with identifiers: %s', identifiers) log.debug('No media found with identifiers: %s', identifiers)
return False
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None): def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None):
@@ -295,10 +275,6 @@ class MediaPlugin(MediaBase):
media = fireEvent('media.get', media_id, single = True) media = fireEvent('media.get', media_id, single = True)
# Skip if no media has been found
if not media:
continue
# Merge releases with movie dict # Merge releases with movie dict
medias.append(media) medias.append(media)
@@ -331,22 +307,9 @@ class MediaPlugin(MediaBase):
def addSingleListView(self): def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True): for media_type in fireEvent('media.types', merge = True):
tempList = lambda *args, **kwargs : self.listView(type = media_type, **kwargs) def tempList(*args, **kwargs):
addApiView('%s.list' % media_type, tempList, docs = { return self.listView(types = media_type, **kwargs)
'desc': 'List media', addApiView('%s.list' % media_type, tempList)
'params': {
'status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the ' + media_type + ' list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all ' + media_type + 's starting with the letter "a"'},
'search': {'desc': 'Search ' + media_type + ' title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any """ + media_type + """s returned or not,
'media': array, media found,
}"""}
})
def availableChars(self, types = None, status = None, release_status = None): def availableChars(self, types = None, status = None, release_status = None):
@@ -392,7 +355,7 @@ class MediaPlugin(MediaBase):
if x['_id'] in media_ids: if x['_id'] in media_ids:
chars.add(x['key']) chars.add(x['key'])
if len(chars) == 27: if len(chars) == 25:
break break
return list(chars) return list(chars)
@@ -413,7 +376,8 @@ class MediaPlugin(MediaBase):
def addSingleCharView(self): def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True): for media_type in fireEvent('media.types', merge = True):
tempChar = lambda *args, **kwargs : self.charView(type = media_type, **kwargs) def tempChar(*args, **kwargs):
return self.charView(types = media_type, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar) addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None): def delete(self, media_id, delete_from = None):
@@ -451,16 +415,11 @@ class MediaPlugin(MediaBase):
db.delete(release) db.delete(release)
total_deleted += 1 total_deleted += 1
if (total_releases == total_deleted) or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'): if (total_releases == total_deleted and media['status'] != 'active') or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'):
db.delete(media) db.delete(media)
deleted = True deleted = True
elif new_media_status: elif new_media_status:
media['status'] = new_media_status media['status'] = new_media_status
# Remove profile (no use for in manage)
if new_media_status == 'done':
media['profile_id'] = None
db.update(media) db.update(media)
fireEvent('media.untag', media['_id'], 'recent', single = True) fireEvent('media.untag', media['_id'], 'recent', single = True)
@@ -487,16 +446,11 @@ class MediaPlugin(MediaBase):
def addSingleDeleteView(self): def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True): for media_type in fireEvent('media.types', merge = True):
tempDelete = lambda *args, **kwargs : self.deleteView(type = media_type, **kwargs) def tempDelete(*args, **kwargs):
addApiView('%s.delete' % media_type, tempDelete, docs = { return self.deleteView(types = media_type, *args, **kwargs)
'desc': 'Delete a ' + media_type + ' from the wanted list', addApiView('%s.delete' % media_type, tempDelete)
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete ' + media_type + ' from this page', 'type': 'string: all (default), wanted, manage'},
}
})
def restatus(self, media_id, tag_recent = True, allowed_restatus = None): def restatus(self, media_id):
try: try:
db = get_db() db = get_db()
@@ -516,13 +470,12 @@ class MediaPlugin(MediaBase):
done_releases = [release for release in media_releases if release.get('status') == 'done'] done_releases = [release for release in media_releases if release.get('status') == 'done']
if done_releases: if done_releases:
# Only look at latest added release
release = sorted(done_releases, key = itemgetter('last_edit'), reverse = True)[0]
# Check if we are finished with the media # Check if we are finished with the media
for release in done_releases: if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True):
if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True): m['status'] = 'done'
m['status'] = 'done'
break
elif previous_status == 'done': elif previous_status == 'done':
m['status'] = 'done' m['status'] = 'done'
@@ -531,26 +484,22 @@ class MediaPlugin(MediaBase):
m['status'] = previous_status m['status'] = previous_status
# Only update when status has changed # Only update when status has changed
if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus): if previous_status != m['status']:
db.update(m) db.update(m)
# Tag media as recent # Tag media as recent
if tag_recent: self.tag(media_id, 'recent')
self.tag(media_id, 'recent', update_edited = True)
return m['status'] return m['status']
except: except:
log.error('Failed restatus: %s', traceback.format_exc()) log.error('Failed restatus: %s', traceback.format_exc())
def tag(self, media_id, tag, update_edited = False): def tag(self, media_id, tag):
try: try:
db = get_db() db = get_db()
m = db.get('id', media_id) m = db.get('id', media_id)
if update_edited:
m['last_edit'] = int(time.time())
tags = m.get('tags') or [] tags = m.get('tags') or []
if tag not in tags: if tag not in tags:
tags.append(tag) tags.append(tag)
@@ -5,11 +5,6 @@ import time
import traceback import traceback
import xml.etree.ElementTree as XMLTree import xml.etree.ElementTree as XMLTree
try:
from xml.etree.ElementTree import ParseError as XmlParseError
except ImportError:
from xml.parsers.expat import ExpatError as XmlParseError
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \ from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
@@ -99,8 +94,6 @@ class Provider(Plugin):
try: try:
data = XMLTree.fromstring(ss(data)) data = XMLTree.fromstring(ss(data))
return self.getElements(data, item_path) return self.getElements(data, item_path)
except XmlParseError:
log.error('Invalid XML returned, check "%s" manually for issues', url)
except: except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
@@ -45,7 +45,7 @@ class Base(NZBProvider, RSS):
def _searchOnHost(self, host, media, quality, results): def _searchOnHost(self, host, media, quality, results):
query = self.buildUrl(media, host) query = self.buildUrl(media, host)
url = '%s%s' % (self.getUrl(host['host']), query) url = '%s&%s' % (self.getUrl(host['host']), query)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
for nzb in nzbs: for nzb in nzbs:
@@ -68,12 +68,8 @@ class Base(NZBProvider, RSS):
if not date: if not date:
date = self.getTextElement(nzb, 'pubDate') date = self.getTextElement(nzb, 'pubDate')
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
name = self.getTextElement(nzb, 'title') name = self.getTextElement(nzb, 'title')
detail_url = self.getTextElement(nzb, 'guid')
nzb_id = detail_url.split('/')[-1:].pop()
if '://' not in detail_url:
detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)
if not name: if not name:
continue continue
@@ -87,7 +83,7 @@ class Base(NZBProvider, RSS):
try: try:
# Get details for extended description to retrieve passwords # Get details for extended description to retrieve passwords
query = self.buildDetailsUrl(nzb_id, host['api_key']) query = self.buildDetailsUrl(nzb_id, host['api_key'])
url = '%s%s' % (self.getUrl(host['host']), query) url = '%s&%s' % (self.getUrl(host['host']), query)
nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0] nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0]
description = self.getTextElement(nzb_details, 'description') description = self.getTextElement(nzb_details, 'description')
@@ -107,7 +103,7 @@ class Base(NZBProvider, RSS):
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), 'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': detail_url, 'detail_url': (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id),
'content': self.getTextElement(nzb, 'description'), 'content': self.getTextElement(nzb, 'description'),
'description': description, 'description': description,
'score': host['extra_score'], 'score': host['extra_score'],
@@ -187,16 +183,15 @@ class Base(NZBProvider, RSS):
return 'try_next' return 'try_next'
try: try:
data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) data = self.urlopen(url, show_error = False)
self.limits_reached[host] = False self.limits_reached[host] = False
return data return data
except HTTPError as e: except HTTPError as e:
sc = e.response.status_code if e.code == 503:
if sc in [503, 429]:
response = e.read().lower() response = e.read().lower()
if sc == 429 or 'maximum api' in response or 'download limit' in response: if 'maximum api' in response or 'download limit' in response:
if not self.limits_reached.get(host): if not self.limits_reached.get(host):
log.error('Limit reached / to many requests for newznab provider: %s', host) log.error('Limit reached for newznab provider: %s', host)
self.limits_reached[host] = time.time() self.limits_reached[host] = time.time()
return 'try_next' return 'try_next'
@@ -225,7 +220,7 @@ config = [{
'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \ 'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \
<a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \ <a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>, <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>, \ <a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>, <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>, \
<a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>', <a href="https://smackdownonyou.com" target="_blank">SmackDown</a>, <a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=',
'options': [ 'options': [
@@ -236,30 +231,30 @@ config = [{
}, },
{ {
'name': 'use', 'name': 'use',
'default': '0,0,0,0,0' 'default': '0,0,0,0,0,0'
}, },
{ {
'name': 'host', 'name': 'host',
'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://api.nzbgeek.info,https://www.nzbfinder.ws', 'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws',
'description': 'The hostname of your newznab provider', 'description': 'The hostname of your newznab provider',
}, },
{ {
'name': 'extra_score', 'name': 'extra_score',
'advanced': True, 'advanced': True,
'label': 'Extra Score', 'label': 'Extra Score',
'default': '0,0,0,0,0', 'default': '0,0,0,0,0,0',
'description': 'Starting score for each release found via this provider.', 'description': 'Starting score for each release found via this provider.',
}, },
{ {
'name': 'custom_tag', 'name': 'custom_tag',
'advanced': True, 'advanced': True,
'label': 'Custom tag', 'label': 'Custom tag',
'default': ',,,,', 'default': ',,,,,',
'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org', 'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
}, },
{ {
'name': 'api_key', 'name': 'api_key',
'default': ',,,,', 'default': ',,,,,',
'label': 'Api Key', 'label': 'Api Key',
'description': 'Can be found on your profile page', 'description': 'Can be found on your profile page',
'type': 'combined', 'type': 'combined',
@@ -0,0 +1,126 @@
import re
import time
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from dateutil.parser import parse
log = CPLog(__name__)
class Base(NZBProvider, RSS):
urls = {
'download': 'https://www.nzbindex.com/download/',
'search': 'https://www.nzbindex.com/rss/?%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media, quality))
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
title = self.getTextElement(nzb, "title")
match = fireEvent('matcher.parse', title, parser='usenet', single = True)
if not match.chains:
log.info('Unable to parse release with title "%s"', title)
continue
# TODO should we consider other lower-weight chains here?
info = fireEvent('matcher.flatten_info', match.chains[0].info, single = True)
release_name = fireEvent('matcher.construct_from_raw', info.get('release_name'), single = True)
file_name = info.get('detail', {}).get('file_name')
file_name = file_name[0] if file_name else None
title = release_name or file_name
# Strip extension from parsed title (if one exists)
ext_pos = title.rfind('.')
# Assume extension if smaller than 4 characters
# TODO this should probably be done a better way
if len(title[ext_pos + 1:]) <= 4:
title = title[:ext_pos]
if not title:
log.info('Unable to find release name from match')
continue
try:
description = self.getTextElement(nzb, "description")
except:
description = ''
def extra_check(item):
if '#c20000' in item['description'].lower():
log.info('Wrong: Seems to be passworded: %s', item['name'])
return False
return True
results.append({
'id': nzbindex_id,
'name': title,
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': enclosure['url'].replace('/download/', '/release/'),
'description': description,
'get_more_info': self.getMoreInfo,
'extra_check': extra_check,
})
def getMoreInfo(self, item):
try:
if '/nfo/' in item['description'].lower():
nfo_url = re.search('href=\"(?P<nfo>.+)\" ', item['description']).group('nfo')
full_description = self.getCache('nzbindex.%s' % item['id'], url = nfo_url, cache_timeout = 25920000)
html = BeautifulSoup(full_description)
item['description'] = toUnicode(html.find('pre', attrs = {'id': 'nfo0'}).text)
except:
pass
config = [{
'name': 'nzbindex',
'groups': [
{
'tab': 'searcher',
'list': 'nzb_providers',
'name': 'nzbindex',
'description': 'Free provider, less accurate. See <a href="https://www.nzbindex.com/">NZBIndex</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAo0lEQVR42t2SQQ2AMBAEcUCwUAv94QMLfHliAQtYqIVawEItYAG6yZFMLkUANNlk79Kbbtp2P1j9uKxVV9VWFeStl+Wh3fWK9hNwEoADZkJtMD49AqS5AUjWGx6A+m+ARICGrM5W+wSTB0gETKzdHZwCEZAJ8PGZQN4AiQAmkR9s06EBAugJiBoAAPFfAQcBgZcIHzwA6TYP4JsXeSg3P9L31w3eksbH3zMb/wAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
@@ -1,9 +1,13 @@
from urlparse import urlparse, parse_qs
import time
from couchpotato.core.event import fireEvent from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.nzb.base import NZBProvider from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from dateutil.parser import parse
log = CPLog(__name__) log = CPLog(__name__)
@@ -12,19 +16,27 @@ log = CPLog(__name__)
class Base(NZBProvider, RSS): class Base(NZBProvider, RSS):
urls = { urls = {
'search': 'https://api.omgwtfnzbs.org/json/?%s', 'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s',
'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
cat_ids = [ cat_ids = [
([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']), ([15], ['dvdrip']),
([15, 16], ['brrip']), ([15, 16], ['brrip']),
([16], ['720p', '1080p', 'bd50']), ([16], ['720p', '1080p', 'bd50']),
([17], ['dvdr']), ([17], ['dvdr']),
] ]
cat_backup_id = 'movie' cat_backup_id = 'movie'
def search(self, movie, quality):
if quality['identifier'] in fireEvent('quality.pre_releases', single = True):
return []
return super(Base, self).search(movie, quality)
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
q = '%s %s' % (title, movie['info']['year']) q = '%s %s' % (title, movie['info']['year'])
@@ -35,20 +47,22 @@ class Base(NZBProvider, RSS):
'api': self.conf('api_key', default = ''), 'api': self.conf('api_key', default = ''),
}) })
nzbs = self.getJsonData(self.urls['search'] % params) nzbs = self.getRSSData(self.urls['search'] % params)
if isinstance(nzbs, list): for nzb in nzbs:
for nzb in nzbs:
results.append({ enclosure = self.getElement(nzb, 'enclosure').attrib
'id': nzb.get('nzbid'), nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]
'name': toUnicode(nzb.get('release')),
'age': self.calculateAge(tryInt(nzb.get('usenetage'))), results.append({
'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024, 'id': nzb_id,
'url': nzb.get('getnzb'), 'name': toUnicode(self.getTextElement(nzb, 'title')),
'detail_url': nzb.get('details'), 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
'description': nzb.get('weblink') 'size': tryInt(enclosure['length']) / 1024 / 1024,
}) 'url': enclosure['url'],
'detail_url': self.urls['detail_url'] % nzb_id,
'description': self.getTextElement(nzb, 'description')
})
config = [{ config = [{
@@ -61,7 +61,7 @@ class Base(TorrentProvider):
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)), 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')), 'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
'detail_url': self.urls['detail'] % torrent_id, 'detail_url': self.urls['detail'] % torrent_id,
'size': tryInt(entry.find('size').get_text()) / 1048576, 'size': self.parseSize(entry.find('size').get_text()),
'seeders': tryInt(entry.find('seeders').get_text()), 'seeders': tryInt(entry.find('seeders').get_text()),
'leechers': tryInt(entry.find('leechers').get_text()), 'leechers': tryInt(entry.find('leechers').get_text()),
'score': torrentscore 'score': torrentscore
@@ -13,11 +13,11 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'https://www.bit-hdtv.com/', 'test': 'http://www.bit-hdtv.com/',
'login': 'https://www.bit-hdtv.com/takelogin.php', 'login': 'http://www.bit-hdtv.com/takelogin.php',
'login_check': 'https://www.bit-hdtv.com/messages.php', 'login_check': 'http://www.bit-hdtv.com/messages.php',
'detail': 'https://www.bit-hdtv.com/details.php?id=%s', 'detail': 'http://www.bit-hdtv.com/details.php?id=%s',
'search': 'https://www.bit-hdtv.com/torrents.php?', 'search': 'http://www.bit-hdtv.com/torrents.php?',
} }
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken # Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
@@ -93,7 +93,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'BiT-HDTV', 'name': 'BiT-HDTV',
'description': '<a href="https://bit-hdtv.com">BiT-HDTV</a>', 'description': '<a href="http://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
'options': [ 'options': [
@@ -22,9 +22,6 @@ class Base(TorrentProvider):
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
only_tables_tags = SoupStrainer('table') only_tables_tags = SoupStrainer('table')
torrent_name_cell = 1
torrent_download_cell = 2
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % self.buildUrl(title, movie, quality) url = self.urls['search'] % self.buildUrl(title, movie, quality)
@@ -43,8 +40,8 @@ class Base(TorrentProvider):
all_cells = result.find_all('td') all_cells = result.find_all('td')
torrent = all_cells[self.torrent_name_cell].find('a') torrent = all_cells[1].find('a')
download = all_cells[self.torrent_download_cell].find('a') download = all_cells[3].find('a')
torrent_id = torrent['href'] torrent_id = torrent['href']
torrent_id = torrent_id.replace('details.php?id=', '') torrent_id = torrent_id.replace('details.php?id=', '')
@@ -52,9 +49,9 @@ class Base(TorrentProvider):
torrent_name = torrent.getText() torrent_name = torrent.getText()
torrent_size = self.parseSize(all_cells[8].getText()) torrent_size = self.parseSize(all_cells[7].getText())
torrent_seeders = tryInt(all_cells[10].getText()) torrent_seeders = tryInt(all_cells[9].getText())
torrent_leechers = tryInt(all_cells[11].getText()) torrent_leechers = tryInt(all_cells[10].getText())
torrent_url = self.urls['baseurl'] % download['href'] torrent_url = self.urls['baseurl'] % download['href']
torrent_detail_url = self.urls['baseurl'] % torrent['href'] torrent_detail_url = self.urls['baseurl'] % torrent['href']
@@ -1,130 +0,0 @@
import re
import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://hdaccess.net/',
'detail': 'https://hdaccess.net/details.php?id=%s',
'search': 'https://hdaccess.net/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s',
'download': 'https://hdaccess.net/grab.php?torrent=%s&apikey=%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only')))
if data:
try:
#for result in data[]:
for key, result in data.iteritems():
if tryInt(result['total_results']) == 0:
return
torrentscore = self.conf('extra_score')
releasegroup = result['releasegroup']
resolution = result['resolution']
encoding = result['encoding']
freeleech = tryInt(result['freeleech'])
seeders = tryInt(result['seeders'])
torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)
if freeleech > 0 and self.conf('prefer_internal'):
torrent_desc += '/ Internal'
torrentscore += 200
if seeders == 0:
torrentscore = 0
name = result['release_name']
year = tryInt(result['year'])
results.append({
'id': tryInt(result['torrentid']),
'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
'url': self.urls['download'] % (result['torrentid'], self.conf('apikey')),
'detail_url': self.urls['detail'] % result['torrentid'],
'size': tryInt(result['size']),
'seeders': tryInt(result['seeders']),
'leechers': tryInt(result['leechers']),
'age': tryInt(result['age']),
'score': torrentscore
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
config = [{
'name': 'hdaccess',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'HDAccess',
'wizard': True,
'description': '<a href="https://hdaccess.net">HDAccess</a>',
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAADuUlEQVQ4yz3T209bdQAH8O/vnNNzWno5FIpAKZdSLi23gWMDtumWuSXOyzJj9M1kyIOPS1xiYuKe9GUPezZZnGIiMTqTxS1bdIuYkG2MWKBAKYVszOgKFkrbA+259HfO+fli/PwPHzI+Pg5CCEAI2VcUlEsl1tHdU7P5bGOkWChEaaUCwvHpmkD93POn6bwgCMQGAMYYYwyCruuQnE7SPzjIstvb8l+bm5fXkokJSmlQEkUQAIpSRH5vd0tyum7I/sA1Z5VH2ctmiGWZjHw4McE1NAZtQ9fD25kXt1VN7es7dNjuGRjiJFeVpWo6slsZPhF/Ys/PPeIs2056ff7zIOS5rpU5/viJEwwEnu3Mi18dojjw0aWP6amz57h9RSE/35zinq2nuGjvIQwOj7K2SKeZWkk0auXSSZ+/ZopSy+CbW1pQKpWu6Jr2/qVPPqWRjm6HWi6Tm999g3RyGbndLCqGgVBrO3F7fHykK0YX47NNtGLYlBq/c+H2iD+3k704dHQUDcFmQVXLyP6zhfTqCl45fQYjx17FemoJunoAk1bQFGoVhkdPwNC0ix2dMT+3llodM02rKdo7gN3dHAEhuH/vNgDg3Pl3cPaNt2GZJpYX5lBbFwClBukfGobL5WrayW6NccVCISY4HIQxYts2Q3J5CXOPHuLlo6NoCoXQ2hbG0JFRpJYWcVDIQ5ZlyL5qW5b9hNlWjKsYBgzDgKppMCoGHty7A0orOHbyNNweL+obGnDm9TdhWSYS8Vn4a2shOZ0QJRGSKIHjeGGtWNhjqqpyG+k04k8eozPai9ZwByavf4kfpyZxZGwMfYOHsbwQx34hB5dL4syKweRq/xpXHwzNapqWSSYWMDszzYqFPEaOn4KiKJiZfoCZ6d8Am+GtC++iXCpjaf4P9vefT8HzfKarp3eWRKMxCILwuWXSz977YIK2RTodDoGH1+OG1+tDlbsKkuiAJEngeWBjNUUnv7rucIiOLyzTvMKJTgnVtbVXLctK3L31g+NAUajL5bEptaDpOnTdgGkzVHl9drms0ju3fnJIkphoaQtfbQiFwAcCAY5wnCE5Xff3i8XX4o9nGksH+8zl9hAGZlWMCivkc9z0L3fZ999+LTCGZKi55YJTFHfye3sc6e/vB88LpK6+iWlqSS4WcpcNXZtwOp3B6mo/REmCSSkEgd+qq3vpRkt75Fp9Y1BZWZwnhq4zEovF/u/MATAti4U7umvyu9kR27aikihC9vvTnV2xufVUMu/2uIksy/9tZvgX49fLmAMx3bsAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
'description': 'Enter your site username.',
},
{
'name': 'apikey',
'default': '',
'label': 'API Key',
'description': 'Enter your site api key. This can be find on <a href="https://hdaccess.net/usercp.php?action=security">Profile Security</a>',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 0,
'description': 'Will not be (re)moved until this seed ratio is met. HDAccess minimum is 1:1.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 0,
'description': 'Will not be (re)moved until this seed time (in hours) is met. HDAccess minimum is 48 hours.',
},
{
'name': 'prefer_internal',
'advanced': True,
'type': 'bool',
'default': 1,
'description': 'Favors internal releases over non-internal releases.',
},
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDAccess internal',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
@@ -29,9 +29,6 @@ class Base(TorrentProvider):
} }
post_data.update(params) post_data.update(params)
if self.conf('internal_only'):
post_data.update({'origin': [1]})
try: try:
result = self.getJsonData(self.urls['api'], data = json.dumps(post_data)) result = self.getJsonData(self.urls['api'], data = json.dumps(post_data))
@@ -113,14 +110,6 @@ config = [{
'default': 0, 'default': 0,
'description': 'Starting score for each release found via this provider.', 'description': 'Starting score for each release found via this provider.',
}, },
{
'name': 'internal_only',
'advanced': True,
'label': 'Internal Only',
'type': 'bool',
'default': False,
'description': 'Only download releases marked as HDBits internal'
}
], ],
}, },
], ],
@@ -14,11 +14,11 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'https://iptorrents.eu/', 'test': 'https://www.iptorrents.com/',
'base_url': 'https://iptorrents.eu', 'base_url': 'https://www.iptorrents.com',
'login': 'https://iptorrents.eu/torrents/', 'login': 'https://www.iptorrents.com/torrents/',
'login_check': 'https://iptorrents.eu/inbox.php', 'login_check': 'https://www.iptorrents.com/inbox.php',
'search': 'https://iptorrents.eu/torrents/?%s%%s&q=%s&qf=ti&p=%%d', 'search': 'https://www.iptorrents.com/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
@@ -120,7 +120,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'IPTorrents', 'name': 'IPTorrents',
'description': '<a href="https://iptorrents.eu">IPTorrents</a>', 'description': '<a href="http://www.iptorrents.com">IPTorrents</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
'options': [ 'options': [
@@ -34,7 +34,8 @@ class Base(TorrentMagnetProvider):
'http://kickass.pw', 'http://kickass.pw',
'http://kickassto.come.in', 'http://kickassto.come.in',
'http://katproxy.ws', 'http://katproxy.ws',
'http://kickass.bitproxy.eu', 'http://www.kickassunblock.info',
'http://www.kickassproxy.info',
'http://katph.eu', 'http://katph.eu',
'http://kickassto.come.in', 'http://kickassto.come.in',
] ]
@@ -64,10 +64,6 @@ class Base(TorrentProvider):
torrentdesc += ' HQ' torrentdesc += ' HQ'
if self.conf('prefer_golden'): if self.conf('prefer_golden'):
torrentscore += 5000 torrentscore += 5000
if 'FreeleechType' in torrent:
torrentdesc += ' Freeleech'
if self.conf('prefer_freeleech'):
torrentscore += 7000
if 'Scene' in torrent and torrent['Scene']: if 'Scene' in torrent and torrent['Scene']:
torrentdesc += ' Scene' torrentdesc += ' Scene'
if self.conf('prefer_scene'): if self.conf('prefer_scene'):
@@ -227,14 +223,6 @@ config = [{
'default': 1, 'default': 1,
'description': 'Favors Golden Popcorn-releases over all other releases.' 'description': 'Favors Golden Popcorn-releases over all other releases.'
}, },
{
'name': 'prefer_freeleech',
'advanced': True,
'type': 'bool',
'label': 'Prefer Freeleech',
'default': 1,
'description': 'Favors torrents marked as freeleech over all other releases.'
},
{ {
'name': 'prefer_scene', 'name': 'prefer_scene',
'advanced': True, 'advanced': True,
@@ -42,7 +42,6 @@ class Base(TorrentProvider):
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a') link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class': 'td_dl'}).find('a') url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a') leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '') torrent_id = link['href'].replace('details?id=', '')
@@ -52,7 +51,7 @@ class Base(TorrentProvider):
'url': self.urls['download'] % url['href'], 'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id, 'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]), 'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
'seeders': tryInt(seeders.string) if seeders else 0, 'seeders': tryInt(result.find('td', attrs = {'class': 'ttr_seeders'}).find('a').string),
'leechers': tryInt(leechers.string) if leechers else 0, 'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo, 'get_more_info': self.getMoreInfo,
}) })
@@ -24,16 +24,16 @@ class Base(TorrentMagnetProvider):
http_time_between_calls = 0 http_time_between_calls = 0
proxy_list = [ proxy_list = [
'https://dieroschtibay.org', 'https://nobay.net',
'https://thebay.al', 'https://thebay.al',
'https://thepiratebay.se', 'https://thepiratebay.se',
'http://thepiratebay.se.net', 'http://thepiratebay.cd',
'http://thebootlegbay.com', 'http://thebootlegbay.com',
'http://tpb.ninja.so', 'http://www.tpb.gr',
'http://proxybay.fr', 'http://tpbproxy.co.uk',
'http://pirateproxy.in', 'http://pirateproxy.in',
'http://piratebay.skey.sk', 'http://www.getpirate.com',
'http://pirateproxy.be', 'http://piratebay.io',
'http://bayproxy.li', 'http://bayproxy.li',
'http://proxybay.pw', 'http://proxybay.pw',
] ]
@@ -1,7 +1,7 @@
import traceback import traceback
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -56,12 +56,11 @@ class Base(TorrentProvider):
full_id = link['href'].replace('details.php?id=', '') full_id = link['href'].replace('details.php?id=', '')
torrent_id = full_id[:6] torrent_id = full_id[:6]
name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip()
results.append({ results.append({
'id': torrent_id, 'id': torrent_id,
'name': name, 'name': link.contents[0],
'url': self.urls['download'] % (torrent_id, name), 'url': self.urls['download'] % (torrent_id, link.contents[0]),
'detail_url': self.urls['detail'] % torrent_id, 'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]), 'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
'seeders': tryInt(cells[8].find('span').contents[0]), 'seeders': tryInt(cells[8].find('span').contents[0]),
@@ -1,4 +1,3 @@
import re
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@@ -9,12 +8,12 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'https://torrentday.eu/', 'test': 'http://www.td.af/',
'login': 'https://torrentday.eu/torrents/', 'login': 'http://www.td.af/torrents/',
'login_check': 'https://torrentday.eu/userdetails.php', 'login_check': 'http://www.torrentday.com/userdetails.php',
'detail': 'https://torrentday.eu/details.php?id=%s', 'detail': 'http://www.td.af/details.php?id=%s',
'search': 'https://torrentday.eu/V3/API/API.php', 'search': 'http://www.td.af/V3/API/API.php',
'download': 'https://torrentday.eu/download.php/%s/%s', 'download': 'http://www.td.af/download.php/%s/%s',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
@@ -56,10 +55,6 @@ class Base(TorrentProvider):
} }
def loginSuccess(self, output): def loginSuccess(self, output):
often = re.search('You tried too often, please wait .*</div>', output)
if often:
raise Exception(often.group(0)[:-6].strip())
return 'Password not correct' not in output return 'Password not correct' not in output
def loginCheckSuccess(self, output): def loginCheckSuccess(self, output):
@@ -73,7 +68,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'TorrentDay', 'name': 'TorrentDay',
'description': '<a href="https://torrentday.eu/">TorrentDay</a>', 'description': '<a href="http://www.td.af/">TorrentDay</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
'options': [ 'options': [
@@ -13,12 +13,12 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'https://www.torrentleech.org/', 'test': 'http://www.torrentleech.org/',
'login': 'https://www.torrentleech.org/user/account/login/', 'login': 'http://www.torrentleech.org/user/account/login/',
'login_check': 'https://torrentleech.org/user/messages', 'login_check': 'http://torrentleech.org/user/messages',
'detail': 'https://www.torrentleech.org/torrent/%s', 'detail': 'http://www.torrentleech.org/torrent/%s',
'search': 'https://www.torrentleech.org/torrents/browse/index/query/%s/categories/%s', 'search': 'http://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d',
'download': 'https://www.torrentleech.org%s', 'download': 'http://www.torrentleech.org%s',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
@@ -13,12 +13,12 @@ log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentProvider):
urls = { urls = {
'test': 'https://torrentshack.me/', 'test': 'https://torrentshack.net/',
'login': 'https://torrentshack.me/login.php', 'login': 'https://torrentshack.net/login.php',
'login_check': 'https://torrentshack.me/inbox.php', 'login_check': 'https://torrentshack.net/inbox.php',
'detail': 'https://torrentshack.me/torrent/%s', 'detail': 'https://torrentshack.net/torrent/%s',
'search': 'https://torrentshack.me/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', 'search': 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download': 'https://torrentshack.me/%s', 'download': 'https://torrentshack.net/%s',
} }
http_time_between_calls = 1 # Seconds http_time_between_calls = 1 # Seconds
@@ -42,17 +42,15 @@ class Base(TorrentProvider):
link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
url = result.find('td', attrs = {'class': 'torrent_td'}).find('a') url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')
size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ')
tds = result.find_all('td')
results.append({ results.append({
'id': link['href'].replace('torrents.php?torrentid=', ''), 'id': link['href'].replace('torrents.php?torrentid=', ''),
'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}), 'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
'url': self.urls['download'] % url['href'], 'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'], 'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(size), 'size': self.parseSize(result.find_all('td')[5].string),
'seeders': tryInt(tds[len(tds)-2].string), 'seeders': tryInt(result.find_all('td')[7].string),
'leechers': tryInt(tds[len(tds)-1].string), 'leechers': tryInt(result.find_all('td')[8].string),
}) })
except: except:
@@ -82,7 +80,7 @@ config = [{
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'TorrentShack', 'name': 'TorrentShack',
'description': '<a href="https://torrentshack.me/">TorrentShack</a>', 'description': '<a href="https://www.torrentshack.net/">TorrentShack</a>',
'wizard': True, 'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC',
'options': [ 'options': [
@@ -22,12 +22,12 @@ class Base(TorrentMagnetProvider, RSS):
http_time_between_calls = 0 http_time_between_calls = 0
def _searchOnTitle(self, title, media, quality, results): def _search(self, media, quality, results):
search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search'] search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']
# Create search parameters # Create search parameters
search_params = self.buildUrl(title, media, quality) search_params = self.buildUrl(media)
smin = quality.get('size_min') smin = quality.get('size_min')
smax = quality.get('size_max') smax = quality.get('size_max')
@@ -2,25 +2,28 @@ import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
log = CPLog(__name__) log = CPLog(__name__)
class Base(TorrentProvider): class Base(TorrentMagnetProvider):
urls = { urls = {
'test': '%s/api/v2', 'test': '%s/api',
'search': '%s/api/v2/list_movies.json?limit=50&query_term=%s' 'search': '%s/api/list.json?keywords=%s&quality=%s',
'detail': '%s/api/movie.json?id=%s'
} }
http_time_between_calls = 1 # seconds http_time_between_calls = 1 # seconds
proxy_list = [ proxy_list = [
'https://yts.re', 'http://yify.unlocktorrent.com',
'https://yts.wf', 'http://yify-torrents.com.come.in',
'https://yts.im', 'http://yts.re',
'http://yts.im'
'http://yify-torrents.im',
] ]
def search(self, movie, quality): def search(self, movie, quality):
@@ -36,31 +39,28 @@ class Base(TorrentProvider):
if not domain: if not domain:
return return
search_url = self.urls['search'] % (domain, getIdentifier(movie)) search_url = self.urls['search'] % (domain, getIdentifier(movie), quality['identifier'])
data = self.getJsonData(search_url) or {} data = self.getJsonData(search_url)
data = data.get('data')
if isinstance(data, dict) and data.get('movies'): if data and data.get('MovieList'):
try: try:
for result in data.get('movies'): for result in data.get('MovieList'):
for release in result.get('torrents', []): if result['Quality'] and result['Quality'] not in result['MovieTitle']:
title = result['MovieTitle'] + ' BrRip ' + result['Quality']
else:
title = result['MovieTitle'] + ' BrRip'
if release['quality'] and release['quality'] not in result['title_long']: results.append({
title = result['title_long'] + ' BRRip ' + release['quality'] 'id': result['MovieID'],
else: 'name': title,
title = result['title_long'] + ' BRRip' 'url': result['TorrentMagnetUrl'],
'detail_url': self.urls['detail'] % (domain, result['MovieID']),
results.append({ 'size': self.parseSize(result['Size']),
'id': release['hash'], 'seeders': tryInt(result['TorrentSeeds']),
'name': title, 'leechers': tryInt(result['TorrentPeers']),
'url': release['url'], })
'detail_url': result['url'],
'size': self.parseSize(release['size']),
'seeders': tryInt(release['seeds']),
'leechers': tryInt(release['peers']),
})
except: except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
@@ -73,24 +73,4 @@ config = [{
], ],
}, },
], ],
}, {
'name': 'torrent',
'groups': [
{
'tab': 'searcher',
'name': 'searcher',
'wizard': True,
'options': [
{
'name': 'minimum_seeders',
'advanced': True,
'label': 'Minimum seeders',
'description': 'Ignore torrents with seeders below this number',
'default': 1,
'type': 'int',
'unit': 'seeders'
},
],
},
],
}] }]
+23 -28
View File
@@ -129,11 +129,7 @@ class Searcher(SearcherBase):
# Try guessing via quality tags # Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True) guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
if guess: return threed == guess.get('is_3d')
return threed == guess.get('is_3d')
# If no quality guess, assume not 3d
else:
return threed == False
def correctYear(self, haystack, year, year_range): def correctYear(self, haystack, year, year_range):
@@ -178,25 +174,6 @@ class Searcher(SearcherBase):
return False return False
def containsWords(self, rel_name, rel_words, conf, media):
# Make sure it has required words
words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower())
try: words = removeDuplicate(words + splitString(media['category'][conf].lower()))
except: pass
req_match = 0
for req_set in words:
if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//':
if re.search(req_set[1:-1], rel_name):
log.debug('Regex match: %s', req_set[1:-1])
req_match += 1
else:
req = splitString(req_set, '&')
req_match += len(list(set(rel_words) & set(req))) == len(req)
return words, req_match > 0
def correctWords(self, rel_name, media): def correctWords(self, rel_name, media):
media_title = fireEvent('searcher.get_search_title', media, single = True) media_title = fireEvent('searcher.get_search_title', media, single = True)
media_words = re.split('\W+', simplifyString(media_title)) media_words = re.split('\W+', simplifyString(media_title))
@@ -204,13 +181,31 @@ class Searcher(SearcherBase):
rel_name = simplifyString(rel_name) rel_name = simplifyString(rel_name)
rel_words = re.split('\W+', rel_name) rel_words = re.split('\W+', rel_name)
required_words, contains_required = self.containsWords(rel_name, rel_words, 'required', media) # Make sure it has required words
if len(required_words) > 0 and not contains_required: required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = removeDuplicate(required_words + splitString(media['category']['required'].lower()))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(rel_words) & set(req))) == len(req)
if len(required_words) > 0 and req_match == 0:
log.info2('Wrong: Required word missing: %s', rel_name) log.info2('Wrong: Required word missing: %s', rel_name)
return False return False
ignored_words, contains_ignored = self.containsWords(rel_name, rel_words, 'ignored', media) # Ignore releases
if len(ignored_words) > 0 and contains_ignored: ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = removeDuplicate(ignored_words + splitString(media['category']['ignored'].lower()))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored)
if len(ignored_words) > 0 and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", rel_name) log.info2("Wrong: '%s' contains 'ignored words'", rel_name)
return False return False
+38 -21
View File
@@ -1,3 +1,4 @@
import os
import traceback import traceback
import time import time
@@ -27,10 +28,6 @@ class MovieBase(MovieTypeBase):
addApiView('movie.add', self.addView, docs = { addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list', 'desc': 'Add new movie to the wanted list',
'return': {'type': 'object', 'example': """{
'success': True,
'movie': object
}"""},
'params': { 'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'}, 'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'}, 'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
@@ -49,7 +46,7 @@ class MovieBase(MovieTypeBase):
}) })
addEvent('movie.add', self.add) addEvent('movie.add', self.add)
addEvent('movie.update', self.update) addEvent('movie.update_info', self.updateInfo)
addEvent('movie.update_release_dates', self.updateReleaseDate) addEvent('movie.update_release_dates', self.updateReleaseDate)
def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None): def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
@@ -65,7 +62,7 @@ class MovieBase(MovieTypeBase):
return False return False
elif not params.get('info'): elif not params.get('info'):
try: try:
is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True) is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True)
if not is_movie: if not is_movie:
msg = 'Can\'t add movie, seems to be a TV show.' msg = 'Can\'t add movie, seems to be a TV show.'
log.error(msg) log.error(msg)
@@ -154,7 +151,8 @@ class MovieBase(MovieTypeBase):
for release in fireEvent('release.for_media', m['_id'], single = True): for release in fireEvent('release.for_media', m['_id'], single = True):
if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']: if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
if params.get('ignore_previous', False): if params.get('ignore_previous', False):
fireEvent('release.update_status', release['_id'], status = 'ignored') release['status'] = 'ignored'
db.update(release)
else: else:
fireEvent('release.delete', release['_id'], single = True) fireEvent('release.delete', release['_id'], single = True)
@@ -174,7 +172,7 @@ class MovieBase(MovieTypeBase):
# Trigger update info # Trigger update info
if added and update_after: if added and update_after:
# Do full update to get images etc # Do full update to get images etc
fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete) fireEventAsync('movie.update_info', m['_id'], default_title = params.get('title'), on_complete = onComplete)
# Remove releases # Remove releases
for rel in fireEvent('release.for_media', m['_id'], single = True): for rel in fireEvent('release.for_media', m['_id'], single = True):
@@ -182,9 +180,6 @@ class MovieBase(MovieTypeBase):
db.delete(rel) db.delete(rel)
movie_dict = fireEvent('media.get', m['_id'], single = True) movie_dict = fireEvent('media.get', m['_id'], single = True)
if not movie_dict:
log.debug('Failed adding media, can\'t find it anymore')
return False
if do_search and search_after: if do_search and search_after:
onComplete = self.createOnComplete(m['_id']) onComplete = self.createOnComplete(m['_id'])
@@ -261,7 +256,7 @@ class MovieBase(MovieTypeBase):
'success': False, 'success': False,
} }
def update(self, media_id = None, identifier = None, default_title = None, extended = False): def updateInfo(self, media_id = None, identifier = None, default_title = None, extended = False):
""" """
Update movie information inside media['doc']['info'] Update movie information inside media['doc']['info']
@@ -274,10 +269,6 @@ class MovieBase(MovieTypeBase):
if self.shuttingDown(): if self.shuttingDown():
return return
lock_key = 'media.get.%s' % media_id if media_id else identifier
self.acquireLock(lock_key)
media = {}
try: try:
db = get_db() db = get_db()
@@ -321,16 +312,42 @@ class MovieBase(MovieTypeBase):
media['title'] = def_title media['title'] = def_title
# Files # Files
image_urls = info.get('images', []) images = info.get('images', [])
media['files'] = media.get('files', {})
for image_type in ['poster']:
self.getPoster(media, image_urls) # Remove non-existing files
file_type = 'image_%s' % image_type
existing_files = list(set(media['files'].get(file_type, [])))
for ef in media['files'].get(file_type, []):
if not os.path.isfile(ef):
existing_files.remove(ef)
# Replace new files list
media['files'][file_type] = existing_files
if len(existing_files) == 0:
del media['files'][file_type]
# Loop over type
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
if file_type not in media['files'] or len(media['files'].get(file_type, [])) == 0:
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
media['files'][file_type] = [file_path]
break
else:
break
db.update(media) db.update(media)
return media
except: except:
log.error('Failed update media: %s', traceback.format_exc()) log.error('Failed update media: %s', traceback.format_exc())
self.releaseLock(lock_key) return {}
return media
def updateReleaseDate(self, media_id): def updateReleaseDate(self, media_id):
""" """
@@ -346,7 +363,7 @@ class MovieBase(MovieTypeBase):
media = db.get('id', media_id) media = db.get('id', media_id)
if not media.get('info'): if not media.get('info'):
media = self.update(media_id) media = self.updateInfo(media_id)
dates = media.get('info', {}).get('release_date') dates = media.get('info', {}).get('release_date')
else: else:
dates = media.get('info').get('release_date') dates = media.get('info').get('release_date')
@@ -115,15 +115,8 @@ MA.Release = new Class({
self.releases = null; self.releases = null;
if(self.options_container){ if(self.options_container){
// Releases are currently displayed self.options_container.destroy();
if(self.options_container.isDisplayed()){ self.options_container = null;
self.options_container.destroy();
self.createReleases();
}
else {
self.options_container.destroy();
self.options_container = null;
}
} }
}); });
@@ -138,10 +131,10 @@ MA.Release = new Class({
}, },
createReleases: function(refresh){ createReleases: function(){
var self = this; var self = this;
if(!self.options_container || refresh){ if(!self.options_container){
self.options_container = new Element('div.options').grab( self.options_container = new Element('div.options').grab(
self.release_container = new Element('div.releases.table') self.release_container = new Element('div.releases.table')
); );
@@ -696,7 +689,7 @@ MA.Readd = new Class({
if(movie_done || snatched && snatched > 0) if(movie_done || snatched && snatched > 0)
self.el = new Element('a.readd', { self.el = new Element('a.readd', {
'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored', 'title': 'Readd the movie and mark all previous snatched/downloaded as ignored',
'events': { 'events': {
'click': self.doReadd.bind(self) 'click': self.doReadd.bind(self)
} }
@@ -54,21 +54,13 @@ var Movie = new Class({
// Reload when releases have updated // Reload when releases have updated
self.global_events['release.update_status'] = function(notification){ self.global_events['release.update_status'] = function(notification){
var data = notification.data; var data = notification.data;
if(data && self.data._id == data.media_id){ if(data && self.data._id == data.movie_id){
if(!self.data.releases) if(!self.data.releases)
self.data.releases = []; self.data.releases = [];
var updated = false; self.data.releases.push({'quality': data.quality, 'status': data.status});
self.data.releases.each(function(release){ self.updateReleases();
if(release._id == data._id){
release['status'] = data.status;
updated = true;
}
});
if(updated)
self.updateReleases();
} }
}; };
@@ -167,7 +159,7 @@ var Movie = new Class({
} }
} }
}), }),
self.thumbnail = (self.data.files && self.data.files.image_poster && self.data.files.image_poster.length > 0) ? new Element('img', { self.thumbnail = (self.data.files && self.data.files.image_poster) ? new Element('img', {
'class': 'type_image poster', 'class': 'type_image poster',
'src': Api.createUrl('file.cache') + self.data.files.image_poster[0].split(Api.getOption('path_sep')).pop() 'src': Api.createUrl('file.cache') + self.data.files.image_poster[0].split(Api.getOption('path_sep')).pop()
}): null, }): null,
@@ -21,6 +21,13 @@ config = [{
'type': 'int', 'type': 'int',
'description': 'Maximum number of items displayed from each chart.', 'description': 'Maximum number of items displayed from each chart.',
}, },
{
'name': 'update_interval',
'default': 12,
'type': 'int',
'advanced': True,
'description': '(hours)',
},
{ {
'name': 'hide_wanted', 'name': 'hide_wanted',
'default': False, 'default': False,
+3 -3
View File
@@ -1,5 +1,6 @@
import time import time
from couchpotato import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import addEvent,fireEvent from couchpotato.core.event import addEvent,fireEvent
@@ -12,14 +13,13 @@ log = CPLog(__name__)
class Charts(Plugin): class Charts(Plugin):
update_in_progress = False update_in_progress = False
update_interval = 72 # hours
def __init__(self): def __init__(self):
addApiView('charts.view', self.automationView) addApiView('charts.view', self.automationView)
addEvent('app.load', self.setCrons) addEvent('app.load', self.setCrons)
def setCrons(self): def setCrons(self):
fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.update_interval) fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.conf('update_interval', default = 12))
def automationView(self, force_update = False, **kwargs): def automationView(self, force_update = False, **kwargs):
@@ -52,7 +52,7 @@ class Charts(Plugin):
for chart in charts: for chart in charts:
chart['hide_wanted'] = self.conf('hide_wanted') chart['hide_wanted'] = self.conf('hide_wanted')
chart['hide_library'] = self.conf('hide_library') chart['hide_library'] = self.conf('hide_library')
self.setCache('charts_cached', charts, timeout = self.update_interval * 3600) self.setCache('charts_cached', charts, timeout = 7200 * tryInt(self.conf('update_interval', default = 12)))
except: except:
log.error('Failed refreshing charts') log.error('Failed refreshing charts')
@@ -264,11 +264,3 @@
height: 40px; height: 40px;
} }
@media all and (max-width: 480px) {
.toggle_menu h2 {
font-size: 16px;
text-align: center;
height: 30px;
}
}
@@ -2,8 +2,6 @@ var Charts = new Class({
Implements: [Options, Events], Implements: [Options, Events],
shown_once: false,
initialize: function(options){ initialize: function(options){
var self = this; var self = this;
self.setOptions(options); self.setOptions(options);
@@ -42,12 +40,15 @@ var Charts = new Class({
) )
); );
if( Cookie.read('suggestions_charts_menu_selected') === 'charts'){ if( Cookie.read('suggestions_charts_menu_selected') === 'charts')
self.show(); self.el.show();
}
else else
self.el.hide(); self.el.hide();
self.api_request = Api.request('charts.view', {
'onComplete': self.fill.bind(self)
});
self.fireEvent.delay(0, self, 'created'); self.fireEvent.delay(0, self, 'created');
}, },
@@ -156,24 +157,6 @@ var Charts = new Class({
}, },
show: function(){
var self = this;
self.el.show();
if(!self.shown_once){
self.api_request = Api.request('charts.view', {
'onComplete': self.fill.bind(self)
});
self.shown_once = true;
}
},
hide: function(){
this.el.hide();
},
afterAdded: function(m){ afterAdded: function(m){
$(m).getElement('div.chart_number') $(m).getElement('div.chart_number')
@@ -1,5 +1,3 @@
import traceback
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from couchpotato import fireEvent from couchpotato import fireEvent
from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.rss import RSS
@@ -7,7 +5,6 @@ from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__) log = CPLog(__name__)
autoload = 'Bluray' autoload = 'Bluray'
@@ -37,49 +34,27 @@ class Bluray(Automation, RSS):
try: try:
# Stop if the release year is before the minimal year # Stop if the release year is before the minimal year
brk = False page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1]
h3s = soup.body.find_all('h3') if tryInt(page_year) < self.getMinimal('year'):
for h3 in h3s:
if h3.parent.name != 'a':
try:
page_year = tryInt(h3.get_text()[-4:])
if page_year > 0 and page_year < self.getMinimal('year'):
brk = True
except:
log.error('Failed determining page year: %s', traceback.format_exc())
brk = True
break
if brk:
break break
for h3 in h3s: for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]:
try: name = table.h3.get_text().lower().split('blu-ray')[0].strip()
if h3.parent.name == 'a': year = table.small.get_text().split('|')[1].strip()
name = h3.get_text().lower().split('blu-ray')[0].strip()
if not name.find('/') == -1: # make sure it is not a double movie release if not name.find('/') == -1: # make sure it is not a double movie release
continue continue
if not h3.parent.parent.small: # ignore non-movie tables if tryInt(year) < self.getMinimal('year'):
continue continue
year = h3.parent.parent.small.get_text().split('|')[1].strip() imdb = self.search(name, year)
if tryInt(year) < self.getMinimal('year'): if imdb:
continue if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
imdb = self.search(name, year)
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
except:
log.debug('Error parsing movie html: %s', traceback.format_exc())
break
except: except:
log.debug('Error loading page %s: %s', (page, traceback.format_exc())) log.debug('Error loading page: %s', page)
break break
self.conf('backlog', value = False) self.conf('backlog', value = False)
@@ -159,7 +134,7 @@ config = [{
{ {
'name': 'backlog', 'name': 'backlog',
'advanced': True, 'advanced': True,
'description': ('Parses the history until the minimum movie year is reached. (Takes a while)', 'Will be disabled once it has completed'), 'description': 'Parses the history until the minimum movie year is reached. (Will be disabled once it has completed)',
'default': False, 'default': False,
'type': 'bool', 'type': 'bool',
}, },
@@ -1,89 +0,0 @@
import re
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'CrowdAI'
class CrowdAI(Automation, RSS):
interval = 1800
def getIMDBids(self):
movies = []
urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
for url in urls:
if not urls[url]:
continue
rss_movies = self.getRSSData(url)
for movie in rss_movies:
description = self.getTextElement(movie, 'description')
grabs = 0
for item in movie:
if item.attrib.get('name') == 'grabs':
grabs = item.attrib.get('value')
break
if int(grabs) > tryInt(self.conf('number_grabs')):
title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
year = re.match(r'.*Year: (\d{4}).*', description).group(1)
imdb = self.search(title, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'crowdai',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'crowdai_automation',
'label': 'CrowdAI',
'description': 'Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie. Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
'default': '1',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
},
{
'name': 'number_grabs',
'default': '500',
'label': 'Grab threshold',
'description': 'Number of grabs required',
},
],
},
],
}]
@@ -48,12 +48,11 @@ class Letterboxd(Automation):
soup = BeautifulSoup(self.getHTMLData(self.url % username)) soup = BeautifulSoup(self.getHTMLData(self.url % username))
for movie in soup.find_all('li', attrs = {'class': 'poster-container'}): for movie in soup.find_all('a', attrs = {'class': 'frame'}):
img = movie.find('img', movie) match = removeEmpty(self.pattern.split(movie['title']))
title = img.get('alt')
movies.append({ movies.append({
'title': title 'title': match[0],
'year': match[1]
}) })
return movies return movies
@@ -39,14 +39,15 @@ class Rottentomatoes(Automation, RSS):
if result: if result:
log.info2('Something smells...')
rating = tryInt(self.getTextElement(movie, rating_tag)) rating = tryInt(self.getTextElement(movie, rating_tag))
name = result.group(0) name = result.group(0)
print rating, tryInt(self.conf('tomatometer_percent'))
if rating < tryInt(self.conf('tomatometer_percent')): if rating < tryInt(self.conf('tomatometer_percent')):
log.info2('%s seems to be rotten...', name) log.info2('%s seems to be rotten...', name)
else: else:
log.info2('Found %s with fresh rating %s', (name, rating))
log.info2('Found %s fresh enough movies, enqueuing: %s', (rating, name))
year = datetime.datetime.now().strftime("%Y") year = datetime.datetime.now().strftime("%Y")
imdb = self.search(name, year) imdb = self.search(name, year)
@@ -2,7 +2,7 @@ import base64
import time import time
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.core.media.movie.providers.base import MovieProvider
from couchpotato.environment import Env from couchpotato.environment import Env
@@ -66,18 +66,15 @@ class CouchPotatoApi(MovieProvider):
if not name: if not name:
return return
name_enc = base64.b64encode(ss(name)) name_enc = base64.b64encode(name)
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders()) return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None, adding = False): def isMovie(self, identifier = None):
if not identifier: if not identifier:
return return
url = self.urls['is_movie'] % identifier data = self.getJsonData(self.urls['is_movie'] % identifier, headers = self.getRequestHeaders())
url += '?adding=1' if adding else ''
data = self.getJsonData(url, headers = self.getRequestHeaders())
if data: if data:
return data.get('is_movie', True) return data.get('is_movie', True)
@@ -4,7 +4,6 @@ from couchpotato import tryInt
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.core.media.movie.providers.base import MovieProvider
from requests import HTTPError
log = CPLog(__name__) log = CPLog(__name__)
@@ -24,23 +23,22 @@ class FanartTV(MovieProvider):
def __init__(self): def __init__(self):
addEvent('movie.info', self.getArt, priority = 1) addEvent('movie.info', self.getArt, priority = 1)
def getArt(self, identifier = None, extended = True, **kwargs): def getArt(self, identifier = None, **kwargs):
if not identifier or not extended: log.debug("Getting Extra Artwork from Fanart.tv...")
if not identifier:
return {} return {}
images = {} images = {}
try: try:
url = self.urls['api'] % identifier url = self.urls['api'] % identifier
fanart_data = self.getJsonData(url, show_error = False) fanart_data = self.getJsonData(url)
if fanart_data: if fanart_data:
log.debug('Found images for %s', fanart_data.get('name')) log.debug('Found images for %s', fanart_data.get('name'))
images = self._parseMovie(fanart_data) images = self._parseMovie(fanart_data)
except HTTPError as e:
log.debug('Failed getting extra art for %s: %s',
(identifier, e))
except: except:
log.error('Failed getting extra art for %s: %s', log.error('Failed getting extra art for %s: %s',
(identifier, traceback.format_exc())) (identifier, traceback.format_exc()))
@@ -2,7 +2,6 @@ import json
import re import re
import traceback import traceback
from couchpotato import Env
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString
@@ -18,8 +17,8 @@ autoload = 'OMDBAPI'
class OMDBAPI(MovieProvider): class OMDBAPI(MovieProvider):
urls = { urls = {
'search': 'http://www.omdbapi.com/?type=movie&%s', 'search': 'http://www.omdbapi.com/?%s',
'info': 'http://www.omdbapi.com/?type=movie&i=%s', 'info': 'http://www.omdbapi.com/?i=%s',
} }
http_time_between_calls = 0 http_time_between_calls = 0
@@ -39,8 +38,7 @@ class OMDBAPI(MovieProvider):
} }
cache_key = 'omdbapi.cache.%s' % q cache_key = 'omdbapi.cache.%s' % q
url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}) cached = self.getCache(cache_key, self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}), timeout = 3)
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached: if cached:
result = self.parseMovie(cached) result = self.parseMovie(cached)
@@ -58,7 +56,7 @@ class OMDBAPI(MovieProvider):
return {} return {}
cache_key = 'omdbapi.cache.%s' % identifier cache_key = 'omdbapi.cache.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = {'User-Agent': Env.getIdentifier()}) cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3)
if cached: if cached:
result = self.parseMovie(cached) result = self.parseMovie(cached)
@@ -1,10 +1,11 @@
import traceback import traceback
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.core.media.movie.providers.base import MovieProvider
import tmdb3
log = CPLog(__name__) log = CPLog(__name__)
@@ -12,66 +13,54 @@ autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider): class TheMovieDb(MovieProvider):
MAX_EXTRATHUMBS = 4
http_time_between_calls = .35
configuration = {
'images': {
'secure_base_url': 'https://image.tmdb.org/t/p/',
},
}
def __init__(self): def __init__(self):
addEvent('info.search', self.search, priority = 3)
addEvent('movie.search', self.search, priority = 3)
addEvent('movie.info', self.getInfo, priority = 3) addEvent('movie.info', self.getInfo, priority = 3)
addEvent('movie.info_by_tmdb', self.getInfo) addEvent('movie.info_by_tmdb', self.getInfo)
addEvent('app.load', self.config)
def config(self): # Configure TMDB settings
configuration = self.request('configuration') tmdb3.set_key(self.conf('api_key'))
if configuration: tmdb3.set_cache('null')
self.configuration = configuration
def search(self, q, limit = 3): def search(self, q, limit = 12):
""" Find movie by name """ """ Find movie by name """
if self.isDisabled(): if self.isDisabled():
return False return False
log.debug('Searching for movie: %s', q) search_string = simplifyString(q)
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
raw = None if not results:
try: log.debug('Searching for movie: %s', q)
name_year = fireEvent('scanner.name_year', q, single = True)
raw = self.request('search/movie', {
'query': name_year.get('name', q),
'year': name_year.get('year'),
'search_type': 'ngram' if limit > 1 else 'phrase'
}, return_key = 'results')
except:
log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc()))
results = [] raw = None
if raw:
try: try:
nr = 0 raw = tmdb3.searchMovie(search_string)
except:
log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc()))
for movie in raw: results = []
parsed_movie = self.parseMovie(movie, extended = False) if raw:
if parsed_movie: try:
results.append(parsed_movie) nr = 0
nr += 1 for movie in raw:
if nr == limit: results.append(self.parseMovie(movie, extended = False))
break
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results]) nr += 1
if nr == limit:
break
return results log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e) self.setCache(cache_key, results)
return False return results
except SyntaxError as e:
log.error('Failed to parse XML response: %s', e)
return False
return results return results
@@ -80,91 +69,101 @@ class TheMovieDb(MovieProvider):
if not identifier: if not identifier:
return {} return {}
result = self.parseMovie({ cache_key = 'tmdb.cache.%s%s' % (identifier, '.ex' if extended else '')
'id': identifier result = self.getCache(cache_key)
}, extended = extended)
return result or {} if not result:
try:
log.debug('Getting info: %s', cache_key)
# noinspection PyArgumentList
movie = tmdb3.Movie(identifier)
try: exists = movie.title is not None
except: exists = False
if exists:
result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result)
else:
result = {}
except:
log.error('Failed getting info for %s: %s', (identifier, traceback.format_exc()))
return result
def parseMovie(self, movie, extended = True): def parseMovie(self, movie, extended = True):
# Do request, append other items cache_key = 'tmdb.cache.%s%s' % (movie.id, '.ex' if extended else '')
movie = self.request('movie/%s' % movie.get('id'), { movie_data = self.getCache(cache_key)
'append_to_response': 'alternative_titles' + (',images,casts' if extended else '')
})
if not movie:
return
# Images if not movie_data:
poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else []
images = { # Images
'poster': [poster] if poster else [], poster = self.getImage(movie, type = 'poster', size = 'w154')
#'backdrop': [backdrop] if backdrop else [], poster_original = self.getImage(movie, type = 'poster', size = 'original')
'poster_original': [poster_original] if poster_original else [], backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
'backdrop_original': [backdrop_original] if backdrop_original else [], extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original', n = self.MAX_EXTRATHUMBS, skipfirst = True)
'actors': {},
'extra_thumbs': extra_thumbs
}
# Genres images = {
try: 'poster': [poster] if poster else [],
genres = [genre.get('name') for genre in movie.get('genres', [])] #'backdrop': [backdrop] if backdrop else [],
except: 'poster_original': [poster_original] if poster_original else [],
genres = [] 'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {},
'extra_thumbs': extra_thumbs
}
# 1900 is the same as None # Genres
year = str(movie.get('release_date') or '')[:4] try:
if not movie.get('release_date') or year == '1900' or year.lower() == 'none': genres = [genre.name for genre in movie.genres]
year = None except:
genres = []
# Gather actors data # 1900 is the same as None
actors = {} year = str(movie.releasedate or '')[:4]
if extended: if not movie.releasedate or year == '1900' or year.lower() == 'none':
year = None
# Full data # Gather actors data
cast = movie.get('casts', {}).get('cast', []) actors = {}
if extended:
for cast_item in movie.cast:
try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
for cast_item in cast: movie_data = {
try: 'type': 'movie',
actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character')) 'via_tmdb': True,
images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original') 'tmdb_id': movie.id,
except: 'titles': [toUnicode(movie.title)],
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc())) 'original_title': movie.originaltitle,
'images': images,
'imdb': movie.imdb,
'runtime': movie.runtime,
'released': str(movie.releasedate),
'year': tryInt(year, None),
'plot': movie.overview,
'genres': genres,
'collection': getattr(movie.collection, 'name', None),
'actor_roles': actors
}
movie_data = { movie_data = dict((k, v) for k, v in movie_data.items() if v)
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.get('id'),
'titles': [toUnicode(movie.get('title'))],
'original_title': movie.get('original_title'),
'images': images,
'imdb': movie.get('imdb_id'),
'runtime': movie.get('runtime'),
'released': str(movie.get('release_date')),
'year': tryInt(year, None),
'plot': movie.get('overview'),
'genres': genres,
'collection': getattr(movie.get('belongs_to_collection'), 'name', None),
'actor_roles': actors
}
movie_data = dict((k, v) for k, v in movie_data.items() if v) # Add alternative names
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']:
movie_data['titles'].append(movie_data['original_title'])
# Add alternative names if extended:
if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']: for alt in movie.alternate_titles:
movie_data['titles'].append(movie_data['original_title']) alt_name = alt.title
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
# Add alternative titles # Cache movie parsed
alternate_titles = movie.get('alternative_titles', {}).get('titles', []) self.setCache(cache_key, movie_data)
for alt in alternate_titles:
alt_name = alt.get('title')
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
return movie_data return movie_data
@@ -172,41 +171,36 @@ class TheMovieDb(MovieProvider):
image_url = '' image_url = ''
try: try:
path = movie.get('%s_path' % type) image_url = getattr(movie, type).geturl(size = size)
image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path)
except: except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie)))) log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie))))
return image_url return image_url
def getMultImages(self, movie, type = 'backdrops', size = 'original'): def getMultImages(self, movie, type = 'backdrops', size = 'original', n = -1, skipfirst = False):
"""
If n < 0, return all images. Otherwise return n images.
If n > len(getattr(movie, type)), then return all images.
If skipfirst is True, then it will skip getattr(movie, type)[0]. This
is because backdrops[0] is typically backdrop.
"""
image_urls = [] image_urls = []
try: try:
for image in movie.get('images', {}).get(type, [])[1:5]: images = getattr(movie, type)
image_urls.append(self.getImage(image, 'file', size)) if n < 0 or n > len(images):
num_images = len(images)
else:
num_images = n
for i in range(int(skipfirst), num_images + int(skipfirst)):
image_urls.append(images[i].geturl(size = size))
except: except:
log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie)))) log.debug('Failed getting %i %s.%s for "%s"', (n, type, size, ss(str(movie))))
return image_urls return image_urls
def request(self, call = '', params = {}, return_key = None):
params = dict((k, v) for k, v in params.items() if v)
params = tryUrlencode(params)
try:
url = 'http://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.conf('api_key'), '&%s' % params if params else '')
data = self.getJsonData(url, show_error = False)
except:
log.debug('Movie not found: %s, %s', (call, params))
data = None
if data and return_key and return_key in data:
data = data.get(return_key)
return data
def isDisabled(self): def isDisabled(self):
if self.conf('api_key') == '': if self.conf('api_key') == '':
log.error('No API key provided.') log.error('No API key provided.')
+1 -1
View File
@@ -28,7 +28,7 @@ class MovieMetaData(MetaDataBase):
# Update library to get latest info # Update library to get latest info
try: try:
group['media'] = fireEvent('movie.update', group['media'].get('_id'), identifier = getIdentifier(group['media']), extended = True, single = True) group['media'] = fireEvent('movie.update_info', group['media'].get('_id'), identifier = getIdentifier(group['media']), extended = True, single = True)
except: except:
log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc()) log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc())
@@ -0,0 +1,30 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.nzb.nzbindex import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'NzbIndex'
class NzbIndex(MovieProvider, Base):
def buildUrl(self, media, quality):
title = fireEvent('library.query', media, include_year = False, single = True)
year = media['info']['year']
query = tryUrlencode({
'q': '"%s %s" | "%s (%s)"' % (title, year, title, year),
'age': Env.setting('retention', 'nzb'),
'sort': 'agedesc',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
'rating': 1,
'max': 250,
'more': 1,
'complete': 1,
})
return query
@@ -11,7 +11,7 @@ autoload = 'Bitsoup'
class Bitsoup(MovieProvider, Base): class Bitsoup(MovieProvider, Base):
cat_ids = [ cat_ids = [
([17], ['3d']), ([17], ['3d']),
([80], ['720p', '1080p']), ([41], ['720p', '1080p']),
([20], ['dvdr']), ([20], ['dvdr']),
([19], ['brrip', 'dvdrip']), ([19], ['brrip', 'dvdrip']),
] ]
@@ -1,11 +0,0 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.hdaccess import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'HDAccess'
class HDAccess(MovieProvider, Base):
pass
@@ -13,7 +13,7 @@ class IPTorrents(MovieProvider, Base):
([87], ['3d']), ([87], ['3d']),
([48], ['720p', '1080p', 'bd50']), ([48], ['720p', '1080p', 'bd50']),
([72], ['cam', 'ts', 'tc', 'r5', 'scr']), ([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
([7, 48, 20], ['dvdrip', 'brrip']), ([7], ['dvdrip', 'brrip']),
([6], ['dvdr']), ([6], ['dvdr']),
] ]
@@ -13,7 +13,7 @@ class PassThePopcorn(MovieProvider, Base):
'bd50': {'media': 'Blu-ray', 'format': 'BD50'}, 'bd50': {'media': 'Blu-ray', 'format': 'BD50'},
'1080p': {'resolution': '1080p'}, '1080p': {'resolution': '1080p'},
'720p': {'resolution': '720p'}, '720p': {'resolution': '720p'},
'brrip': {'resolution': 'anyhd'}, 'brrip': {'media': 'Blu-ray'},
'dvdr': {'resolution': 'anysd'}, 'dvdr': {'resolution': 'anysd'},
'dvdrip': {'media': 'DVD'}, 'dvdrip': {'media': 'DVD'},
'scr': {'media': 'DVD-Screener'}, 'scr': {'media': 'DVD-Screener'},
@@ -27,7 +27,7 @@ class PassThePopcorn(MovieProvider, Base):
'bd50': {'Codec': ['BD50']}, 'bd50': {'Codec': ['BD50']},
'1080p': {'Resolution': ['1080p']}, '1080p': {'Resolution': ['1080p']},
'720p': {'Resolution': ['720p']}, '720p': {'Resolution': ['720p']},
'brrip': {'Quality': ['High Definition'], 'Container': ['!ISO']}, 'brrip': {'Source': ['Blu-ray'], 'Quality': ['High Definition'], 'Container': ['!ISO']},
'dvdr': {'Codec': ['DVD5', 'DVD9']}, 'dvdr': {'Codec': ['DVD5', 'DVD9']},
'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']}, 'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']},
'scr': {'Source': ['DVD-Screener']}, 'scr': {'Source': ['DVD-Screener']},
@@ -11,17 +11,17 @@ autoload = 'TorrentLeech'
class TorrentLeech(MovieProvider, Base): class TorrentLeech(MovieProvider, Base):
cat_ids = [ cat_ids = [
([13], ['720p', '1080p', 'bd50']), ([13], ['720p', '1080p']),
([8], ['cam']), ([8], ['cam']),
([9], ['ts', 'tc']), ([9], ['ts', 'tc']),
([10], ['r5', 'scr']), ([10], ['r5', 'scr']),
([11], ['dvdrip']), ([11], ['dvdrip']),
([13, 14], ['brrip']), ([14], ['brrip']),
([12], ['dvdr']), ([12], ['dvdr']),
] ]
def buildUrl(self, title, media, quality): def buildUrl(self, title, media, quality):
return ( return (
tryUrlencode(title.replace(':', '')), tryUrlencode(title.replace(':', '')),
','.join([str(x) for x in self.getCatId(quality)]) self.getCatId(quality)[0]
) )
@@ -22,8 +22,8 @@ class TorrentShack(MovieProvider, Base):
# Movies-SD Pack - 983 (not included) # Movies-SD Pack - 983 (not included)
cat_ids = [ cat_ids = [
([970, 320], ['bd50']), ([970], ['bd50']),
([300, 320], ['720p', '1080p']), ([300], ['720p', '1080p']),
([350], ['dvdr']), ([350], ['dvdr']),
([400], ['brrip', 'dvdrip']), ([400], ['brrip', 'dvdrip']),
] ]
@@ -1,5 +1,6 @@
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.torrent.torrentz import Base from couchpotato.core.media._base.providers.torrent.torrentz import Base
from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.core.media.movie.providers.base import MovieProvider
@@ -10,5 +11,5 @@ autoload = 'Torrentz'
class Torrentz(MovieProvider, Base): class Torrentz(MovieProvider, Base):
def buildUrl(self, title, media, quality): def buildUrl(self, media):
return tryUrlencode('"%s %s"' % (title, media['info']['year'])) return tryUrlencode('"%s"' % fireEvent('library.query', media, single = True))
@@ -3,7 +3,7 @@ import re
from bs4 import SoupStrainer, BeautifulSoup from bs4 import SoupStrainer, BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import mergeDicts, getTitle, getIdentifier from couchpotato.core.helpers.variable import mergeDicts, getTitle
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider
from requests import HTTPError from requests import HTTPError
@@ -29,7 +29,7 @@ class HDTrailers(TrailerProvider):
url = self.urls['api'] % self.movieUrlName(movie_name) url = self.urls['api'] % self.movieUrlName(movie_name)
try: try:
data = self.getCache('hdtrailers.%s' % getIdentifier(group), url, show_error = False) data = self.getCache('hdtrailers.%s' % group['identifier'], url, show_error = False)
except HTTPError: except HTTPError:
log.debug('No page found for: %s', movie_name) log.debug('No page found for: %s', movie_name)
data = None data = None
@@ -59,7 +59,7 @@ class HDTrailers(TrailerProvider):
url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name}))
try: try:
data = self.getCache('hdtrailers.alt.%s' % getIdentifier(group), url, show_error = False) data = self.getCache('hdtrailers.alt.%s' % group['identifier'], url, show_error = False)
except HTTPError: except HTTPError:
log.debug('No alternative page found for: %s', movie_name) log.debug('No alternative page found for: %s', movie_name)
data = None data = None
@@ -68,7 +68,7 @@ class HDTrailers(TrailerProvider):
return results return results
try: try:
html = BeautifulSoup(data, parse_only = self.only_tables_tags) html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
result_table = html.find_all('h2', text = re.compile(movie_name)) result_table = html.find_all('h2', text = re.compile(movie_name))
for h2 in result_table: for h2 in result_table:
@@ -90,7 +90,7 @@ class HDTrailers(TrailerProvider):
results = {'480p':[], '720p':[], '1080p':[]} results = {'480p':[], '720p':[], '1080p':[]}
try: try:
html = BeautifulSoup(data, parse_only = self.only_tables_tags) html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
result_table = html.find('table', attrs = {'class':'bottomTable'}) result_table = html.find('table', attrs = {'class':'bottomTable'})
for tr in result_table.find_all('tr'): for tr in result_table.find_all('tr'):
@@ -25,6 +25,6 @@ class Filmstarts(UserscriptBase):
name = html.find("meta", {"property":"og:title"})['content'] name = html.find("meta", {"property":"og:title"})['content']
# Year of production is not available in the meta data, so get it from the table # Year of production is not available in the meta data, so get it from the table
year = table.find(text="Produktionsjahr").parent.parent.next_sibling.text year = table.find("tr", text="Produktionsjahr").parent.parent.parent.td.text
return self.search(name, year) return self.search(name, year)
@@ -12,7 +12,7 @@ autoload = 'RottenTomatoes'
class RottenTomatoes(UserscriptBase): class RottenTomatoes(UserscriptBase):
includes = ['*://www.rottentomatoes.com/m/*'] includes = ['*://www.rottentomatoes.com/m/*/']
excludes = ['*://www.rottentomatoes.com/m/*/*/'] excludes = ['*://www.rottentomatoes.com/m/*/*/']
version = 2 version = 2
+31 -34
View File
@@ -74,7 +74,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
self.in_progress = True self.in_progress = True
fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started') fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started')
medias = [x['_id'] for x in fireEvent('media.with_status', 'active', types = 'movie', with_doc = False, single = True)] medias = [x['_id'] for x in fireEvent('media.with_status', 'active', with_doc = False, single = True)]
random.shuffle(medias) random.shuffle(medias)
total = len(medias) total = len(medias)
@@ -89,13 +89,12 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
for media_id in medias: for media_id in medias:
media = fireEvent('media.get', media_id, single = True) media = fireEvent('media.get', media_id, single = True)
if not media: continue
try: try:
self.single(media, search_protocols, manual = manual) self.single(media, search_protocols, manual = manual)
except IndexError: except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc())) log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc()))
fireEvent('movie.update', media_id) fireEvent('movie.update_info', media_id)
except: except:
log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc())) log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc()))
@@ -141,17 +140,17 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
previous_releases = movie.get('releases', []) previous_releases = movie.get('releases', [])
too_early_to_search = [] too_early_to_search = []
outside_eta_results = 0 outside_eta_results = 0
always_search = self.conf('always_search') alway_search = self.conf('always_search')
ignore_eta = manual ignore_eta = manual
total_result_count = 0 total_result_count = 0
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title) fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title)
# Ignore eta once every 7 days # Ignore eta once every 7 days
if not always_search: if not alway_search:
prop_name = 'last_ignored_eta.%s' % movie['_id'] prop_name = 'last_ignored_eta.%s' % movie['_id']
last_ignored_eta = float(Env.prop(prop_name, default = 0)) last_ignored_eta = float(Env.prop(prop_name, default = 0))
if last_ignored_eta < time.time() - 604800: if last_ignored_eta > time.time() - 604800:
ignore_eta = True ignore_eta = True
Env.prop(prop_name, value = time.time()) Env.prop(prop_name, value = time.time())
@@ -166,12 +165,11 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
'quality': q_identifier, 'quality': q_identifier,
'finish': profile['finish'][index], 'finish': profile['finish'][index],
'wait_for': tryInt(profile['wait_for'][index]), 'wait_for': tryInt(profile['wait_for'][index]),
'3d': profile['3d'][index] if profile.get('3d') else False, '3d': profile['3d'][index] if profile.get('3d') else False
'minimum_score': profile.get('minimum_score', 1),
} }
could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']) could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year'])
if not always_search and could_not_be_released: if not alway_search and could_not_be_released:
too_early_to_search.append(q_identifier) too_early_to_search.append(q_identifier)
# Skip release, if ETA isn't ignored # Skip release, if ETA isn't ignored
@@ -197,20 +195,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
break break
quality = fireEvent('quality.single', identifier = q_identifier, single = True) quality = fireEvent('quality.single', identifier = q_identifier, single = True)
log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if always_search or ignore_eta else '')) log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if alway_search or ignore_eta else ''))
# Extend quality with profile customs # Extend quality with profile customs
quality['custom'] = quality_custom quality['custom'] = quality_custom
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
results_count = len(results)
# Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True):
break
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
results_count = len(found_releases)
total_result_count += results_count total_result_count += results_count
if results_count == 0: if results_count == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label'])) log.debug('Nothing found for %s in %s', (default_title, quality['label']))
@@ -218,12 +209,20 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
# Keep track of releases found outside ETA window # Keep track of releases found outside ETA window
outside_eta_results += results_count if could_not_be_released else 0 outside_eta_results += results_count if could_not_be_released else 0
# Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True):
break
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
# Don't trigger download, but notify user of available releases # Don't trigger download, but notify user of available releases
if could_not_be_released and results_count > 0: if could_not_be_released:
log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) if results_count > 0:
log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title))
# Try find a valid result and download it # Try find a valid result and download it
if (force_download or not could_not_be_released or always_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True): if (force_download or not could_not_be_released or alway_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True):
ret = True ret = True
# Remove releases that aren't found anymore # Remove releases that aren't found anymore
@@ -241,7 +240,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
break break
if total_result_count > 0: if total_result_count > 0:
fireEvent('media.tag', movie['_id'], 'recent', update_edited = True, single = True) fireEvent('media.tag', movie['_id'], 'recent', single = True)
if len(too_early_to_search) > 0: if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
@@ -278,7 +277,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
# Contains lower quality string # Contains lower quality string
contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True) contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True)
if contains_other and isinstance(contains_other, dict): if contains_other != False:
log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality')) log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality'))
return False return False
@@ -382,21 +381,19 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
def tryNextRelease(self, media_id, manual = False, force_download = False): def tryNextRelease(self, media_id, manual = False, force_download = False):
try: try:
db = get_db()
rels = fireEvent('release.for_media', media_id, single = True) rels = fireEvent('media.with_status', ['snatched', 'done'], single = True)
for rel in rels: for rel in rels:
if rel.get('status') in ['snatched', 'done']: rel['status'] = 'ignored'
fireEvent('release.update_status', rel.get('_id'), status = 'ignored') db.update(rel)
media = fireEvent('media.get', media_id, single = True) movie_dict = fireEvent('media.get', media_id, single = True)
if media: log.info('Trying next release for: %s', getTitle(movie_dict))
log.info('Trying next release for: %s', getTitle(media)) self.single(movie_dict, manual = manual, force_download = force_download)
self.single(media, manual = manual, force_download = force_download)
return True
return True
return False
except: except:
log.error('Failed searching for next release: %s', traceback.format_exc()) log.error('Failed searching for next release: %s', traceback.format_exc())
return False return False
+1 -1
View File
@@ -27,7 +27,7 @@ class Suggestion(Plugin):
else: else:
if not movies or len(movies) == 0: if not movies or len(movies) == 0:
active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) active_movies = fireEvent('media.with_status', ['active', 'done'], single = True)
movies = [getIdentifier(x) for x in active_movies] movies = [getIdentifier(x) for x in active_movies]
if not ignored or len(ignored) == 0: if not ignored or len(ignored) == 0:
@@ -2,8 +2,6 @@ var SuggestList = new Class({
Implements: [Options, Events], Implements: [Options, Events],
shown_once: false,
initialize: function(options){ initialize: function(options){
var self = this; var self = this;
self.setOptions(options); self.setOptions(options);
@@ -46,13 +44,12 @@ var SuggestList = new Class({
} }
}); });
var cookie_menu_select = Cookie.read('suggestions_charts_menu_selected') || 'suggestions'; var cookie_menu_select = Cookie.read('suggestions_charts_menu_selected');
if( cookie_menu_select === 'suggestions') if( cookie_menu_select === 'suggestions' || cookie_menu_select === null ) self.el.show(); else self.el.hide();
self.show();
else self.api_request = Api.request('suggestion.view', {
self.hide(); 'onComplete': self.fill.bind(self)
});
self.fireEvent.delay(0, self, 'created');
}, },
@@ -148,24 +145,6 @@ var SuggestList = new Class({
}, },
show: function(){
var self = this;
self.el.show();
if(!self.shown_once){
self.api_request = Api.request('suggestion.view', {
'onComplete': self.fill.bind(self)
});
self.shown_once = true;
}
},
hide: function(){
this.el.hide();
},
toElement: function(){ toElement: function(){
return this.el; return this.el;
} }
+17 -30
View File
@@ -3,7 +3,6 @@ import threading
import time import time
import traceback import traceback
import uuid import uuid
from CodernityDB.database import RecordDeleted
from couchpotato import get_db from couchpotato import get_db
from couchpotato.api import addApiView, addNonBlockApiView from couchpotato.api import addApiView, addNonBlockApiView
@@ -14,7 +13,6 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification from couchpotato.core.notifications.base import Notification
from .index import NotificationIndex, NotificationUnreadIndex from .index import NotificationIndex, NotificationUnreadIndex
from couchpotato.environment import Env from couchpotato.environment import Env
from tornado.ioloop import IOLoop
log = CPLog(__name__) log = CPLog(__name__)
@@ -68,9 +66,7 @@ class CoreNotifier(Notification):
fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds = 15, single = True) fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds = 15, single = True)
addEvent('app.load', self.clean) addEvent('app.load', self.clean)
addEvent('app.load', self.checkMessages)
if not Env.get('dev'):
addEvent('app.load', self.checkMessages)
self.messages = [] self.messages = []
self.listeners = [] self.listeners = []
@@ -111,11 +107,11 @@ class CoreNotifier(Notification):
if limit_offset: if limit_offset:
splt = splitString(limit_offset) splt = splitString(limit_offset)
limit = tryInt(splt[0]) limit = splt[0]
offset = tryInt(0 if len(splt) is 1 else splt[1]) offset = 0 if len(splt) is 1 else splt[1]
results = db.all('notification', limit = limit, offset = offset, with_doc = True) results = db.get_many('notification', limit = limit, offset = offset, with_doc = True)
else: else:
results = db.all('notification', limit = 200, with_doc = True) results = db.get_many('notification', limit = 200, with_doc = True)
notifications = [] notifications = []
for n in results: for n in results:
@@ -149,28 +145,24 @@ class CoreNotifier(Notification):
def notify(self, message = '', data = None, listener = None): def notify(self, message = '', data = None, listener = None):
if not data: data = {} if not data: data = {}
n = {
'_t': 'notification',
'time': int(time.time()),
}
try: try:
db = get_db() db = get_db()
n['message'] = toUnicode(message) data['notification_type'] = listener if listener else 'unknown'
if data.get('sticky'):
n['sticky'] = True
if data.get('important'):
n['important'] = True
n = {
'_t': 'notification',
'time': int(time.time()),
'message': toUnicode(message),
'data': data
}
db.insert(n) db.insert(n)
self.frontend(type = listener, data = n) self.frontend(type = listener, data = n)
return True return True
except: except:
log.error('Failed notify "%s": %s', (n, traceback.format_exc())) log.error('Failed notify: %s', traceback.format_exc())
def frontend(self, type = 'notification', data = None, message = None): def frontend(self, type = 'notification', data = None, message = None):
if not data: data = {} if not data: data = {}
@@ -190,7 +182,7 @@ class CoreNotifier(Notification):
while len(self.listeners) > 0 and not self.shuttingDown(): while len(self.listeners) > 0 and not self.shuttingDown():
try: try:
listener, last_id = self.listeners.pop() listener, last_id = self.listeners.pop()
IOLoop.current().add_callback(listener, { listener({
'success': True, 'success': True,
'result': [notification], 'result': [notification],
}) })
@@ -271,16 +263,11 @@ class CoreNotifier(Notification):
if init: if init:
db = get_db() db = get_db()
notifications = db.all('notification') notifications = db.all('notification', with_doc = True)
for n in notifications: for n in notifications:
if n['doc'].get('time') > (time.time() - 604800):
try: messages.append(n['doc'])
doc = db.get('id', n.get('_id'))
if doc.get('time') > (time.time() - 604800):
messages.append(doc)
except RecordDeleted:
pass
return { return {
'success': True, 'success': True,
@@ -50,7 +50,7 @@ var NotificationBase = new Class({
, 'top'); , 'top');
self.notifications.include(result); self.notifications.include(result);
if((result.important !== undefined || result.sticky !== undefined) && !result.read){ if((result.data.important !== undefined || result.data.sticky !== undefined) && !result.read){
var sticky = true; var sticky = true;
App.trigger('message', [result.message, sticky, result]) App.trigger('message', [result.message, sticky, result])
} }
@@ -72,7 +72,7 @@ var NotificationBase = new Class({
if(!force_ids) { if(!force_ids) {
var rn = self.notifications.filter(function(n){ var rn = self.notifications.filter(function(n){
return !n.read && n.important === undefined return !n.read && n.data.important === undefined
}); });
var ids = []; var ids = [];
+1 -1
View File
@@ -42,7 +42,7 @@ class Email(Notification):
# Open the SMTP connection, via SSL if requested # Open the SMTP connection, via SSL if requested
log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port)) log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port))
log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled")) log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled"))
mailserver = smtplib.SMTP_SSL(smtp_server, smtp_port) if ssl == 1 else smtplib.SMTP(smtp_server, smtp_port) mailserver = smtplib.SMTP_SSL(smtp_server) if ssl == 1 else smtplib.SMTP(smtp_server)
if starttls: if starttls:
log.debug("Using StartTLS to initiate the connection with the SMTP server") log.debug("Using StartTLS to initiate the connection with the SMTP server")
+4 -4
View File
@@ -34,9 +34,9 @@ class Growl(Notification):
self.growl = notifier.GrowlNotifier( self.growl = notifier.GrowlNotifier(
applicationName = Env.get('appname'), applicationName = Env.get('appname'),
notifications = ['Updates'], notifications = ["Updates"],
defaultNotifications = ['Updates'], defaultNotifications = ["Updates"],
applicationIcon = self.getNotificationImage('medium'), applicationIcon = '%s/static/images/couch.png' % fireEvent('app.api_url', single = True),
hostname = hostname if hostname else 'localhost', hostname = hostname if hostname else 'localhost',
password = password if password else None, password = password if password else None,
port = port if port else 23053 port = port if port else 23053
@@ -56,7 +56,7 @@ class Growl(Notification):
try: try:
self.growl.notify( self.growl.notify(
noteType = 'Updates', noteType = "Updates",
title = self.default_title, title = self.default_title,
description = message, description = message,
sticky = False, sticky = False,
@@ -0,0 +1,68 @@
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from pynmwp import PyNMWP
import six
log = CPLog(__name__)
autoload = 'NotifyMyWP'
class NotifyMyWP(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
keys = splitString(self.conf('api_key'))
p = PyNMWP(keys, self.conf('dev_key'))
response = p.push(application = self.default_title, event = message, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1)
for key in keys:
if not response[key]['Code'] == six.u('200'):
log.error('Could not send notification to NotifyMyWindowsPhone (%s). %s', (key, response[key]['message']))
return False
return response
config = [{
'name': 'notifymywp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifymywp',
'label': 'Windows Phone',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'api_key',
'description': 'Multiple keys seperated by a comma. Maximum of 5.'
},
{
'name': 'dev_key',
'advanced': True,
},
{
'name': 'priority',
'default': 0,
'type': 'dropdown',
'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)],
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
@@ -23,26 +23,6 @@ config = [{
'default': 'localhost', 'default': 'localhost',
'description': 'Hostname/IP, default localhost' 'description': 'Hostname/IP, default localhost'
}, },
{
'name': 'username',
'label': 'Username',
'default': '',
'description': 'Required for myPlex'
},
{
'name': 'password',
'label': 'Password',
'default': '',
'type': 'password',
'description': 'Required for myPlex'
},
{
'name': 'auth_token',
'label': 'Auth Token',
'default': '',
'advanced': True,
'description': 'Required for myPlex'
},
{ {
'name': 'clients', 'name': 'clients',
'default': '', 'default': '',
+4 -39
View File
@@ -35,46 +35,11 @@ class PlexServer(object):
if path.startswith('/'): if path.startswith('/'):
path = path[1:] path = path[1:]
#Maintain support for older Plex installations without myPlex data = self.plex.urlopen('%s/%s' % (
if not self.plex.conf('auth_token') and not self.plex.conf('username') and not self.plex.conf('password'): self.createHost(self.plex.conf('media_server'), port = 32400),
data = self.plex.urlopen('%s/%s' % ( path
self.createHost(self.plex.conf('media_server'), port = 32400), ))
path
))
else:
#Fetch X-Plex-Token if it doesn't exist but a username/password do
if not self.plex.conf('auth_token') and (self.plex.conf('username') and self.plex.conf('password')):
import urllib2, base64
log.info("Fetching a new X-Plex-Token from plex.tv")
username = self.plex.conf('username')
password = self.plex.conf('password')
req = urllib2.Request("https://plex.tv/users/sign_in.xml", data="")
authheader = "Basic %s" % base64.encodestring('%s:%s' % (username, password))[:-1]
req.add_header("Authorization", authheader)
req.add_header("X-Plex-Product", "Couchpotato Notifier")
req.add_header("X-Plex-Client-Identifier", "b3a6b24dcab2224bdb101fc6aa08ea5e2f3147d6")
req.add_header("X-Plex-Version", "1.0")
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log.info("Error fetching token from plex.tv")
try:
auth_tree = etree.parse(response)
token = auth_tree.findall(".//authentication-token")[0].text
self.plex.conf('auth_token', token)
except (ValueError, IndexError) as e:
log.info("Error parsing plex.tv response: " + ex(e))
#Add X-Plex-Token header for myPlex support workaround
data = self.plex.urlopen('%s/%s?X-Plex-Token=%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path,
self.plex.conf('auth_token')
))
if data_type == 'xml': if data_type == 'xml':
return etree.fromstring(data) return etree.fromstring(data)
else: else:
+1 -2
View File
@@ -84,8 +84,7 @@ config = [{
}, },
{ {
'name': 'api_key', 'name': 'api_key',
'label': 'Access Token', 'label': 'User API Key'
'description': 'Can be found on <a href="https://www.pushbullet.com/account" target="_blank">Account Settings</a>',
}, },
{ {
'name': 'devices', 'name': 'devices',
+3 -3
View File
@@ -1,7 +1,7 @@
from httplib import HTTPSConnection from httplib import HTTPSConnection
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import getTitle, getIdentifier from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification from couchpotato.core.notifications.base import Notification
@@ -27,9 +27,9 @@ class Pushover(Notification):
'sound': self.conf('sound'), 'sound': self.conf('sound'),
} }
if data and getIdentifier(data): if data and data.get('identifier'):
api_data.update({ api_data.update({
'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)), 'url': toUnicode('http://www.imdb.com/title/%s/' % data['identifier']),
'url_title': toUnicode('%s on IMDb' % getTitle(data)), 'url_title': toUnicode('%s on IMDb' % getTitle(data)),
}) })
+3 -4
View File
@@ -1,4 +1,4 @@
from couchpotato.core.helpers.variable import getTitle, getIdentifier from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification from couchpotato.core.notifications.base import Notification
@@ -16,8 +16,7 @@ class Trakt(Notification):
'test': 'account/test/%s', 'test': 'account/test/%s',
} }
listen_to = ['movie.snatched'] listen_to = ['movie.downloaded']
enabled_option = 'notification_enabled'
def notify(self, message = '', data = None, listener = None): def notify(self, message = '', data = None, listener = None):
if not data: data = {} if not data: data = {}
@@ -39,7 +38,7 @@ class Trakt(Notification):
'username': self.conf('automation_username'), 'username': self.conf('automation_username'),
'password': self.conf('automation_password'), 'password': self.conf('automation_password'),
'movies': [{ 'movies': [{
'imdb_id': getIdentifier(data), 'imdb_id': data['identifier'],
'title': getTitle(data), 'title': getTitle(data),
'year': data['info']['year'] 'year': data['info']['year']
}] if data else [] }] if data else []
-68
View File
@@ -1,68 +0,0 @@
import traceback
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Webhook'
class Webhook(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
post_data = {
'message': toUnicode(message)
}
if getIdentifier(data):
post_data.update({
'imdb_id': getIdentifier(data)
})
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
try:
self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False)
return True
except:
log.error('Webhook notification failed: %s', traceback.format_exc())
return False
config = [{
'name': 'webhook',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'webhook',
'label': 'Webhook',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'url',
'description': 'The URL to send notification data to when '
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
}
]
}
]
}]
+4 -4
View File
@@ -7,8 +7,8 @@ import urllib
from couchpotato.core.helpers.variable import splitString, getTitle from couchpotato.core.helpers.variable import splitString, getTitle
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification from couchpotato.core.notifications.base import Notification
from requests.exceptions import ConnectionError, Timeout import requests
from requests.packages.urllib3.exceptions import MaxRetryError from requests.packages.urllib3.exceptions import MaxRetryError, ConnectionError
log = CPLog(__name__) log = CPLog(__name__)
@@ -172,7 +172,7 @@ class XBMC(Notification):
# manually fake expected response array # manually fake expected response array
return [{'result': 'Error'}] return [{'result': 'Error'}]
except (MaxRetryError, Timeout, ConnectionError): except (MaxRetryError, requests.exceptions.Timeout, ConnectionError):
log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off') log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [{'result': 'Error'}] return [{'result': 'Error'}]
except: except:
@@ -208,7 +208,7 @@ class XBMC(Notification):
log.debug('Returned from request %s: %s', (host, response)) log.debug('Returned from request %s: %s', (host, response))
return response return response
except (MaxRetryError, Timeout, ConnectionError): except (MaxRetryError, requests.exceptions.Timeout):
log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off') log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [] return []
except: except:
+1 -2
View File
@@ -46,8 +46,7 @@ class Automation(Plugin):
break break
movie_dict = fireEvent('media.get', movie_id, single = True) movie_dict = fireEvent('media.get', movie_id, single = True)
if movie_dict: fireEvent('movie.searcher.single', movie_dict)
fireEvent('movie.searcher.single', movie_dict)
return True return True
+37 -86
View File
@@ -1,4 +1,3 @@
import threading
from urllib import quote from urllib import quote
from urlparse import urlparse from urlparse import urlparse
import glob import glob
@@ -11,8 +10,7 @@ import traceback
from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \ from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP, scanForPassword, tryInt, getIdentifier, \ from couchpotato.core.helpers.variable import getExt, md5, isLocalIP, scanForPassword, tryInt, getIdentifier
randomString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.environment import Env from couchpotato.environment import Env
import requests import requests
@@ -37,9 +35,7 @@ class Plugin(object):
_needs_shutdown = False _needs_shutdown = False
_running = None _running = None
_locks = {} user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20130519 Firefox/24.0'
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'
http_last_use = {} http_last_use = {}
http_time_between_calls = 0 http_time_between_calls = 0
http_failed_request = {} http_failed_request = {}
@@ -122,31 +118,15 @@ class Plugin(object):
if os.path.exists(path): if os.path.exists(path):
log.debug('%s already exists, overwriting file with new version', path) log.debug('%s already exists, overwriting file with new version', path)
write_type = 'w+' if not binary else 'w+b' try:
f = open(path, 'w+' if not binary else 'w+b')
# Stream file using response object f.write(content)
if isinstance(content, requests.models.Response): f.close()
os.chmod(path, Env.getPermission('file'))
# Write file to temp except:
with open('%s.tmp' % path, write_type) as f: log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
for chunk in content.iter_content(chunk_size = 1048576): if os.path.isfile(path):
if chunk: # filter out keep-alive new chunks os.remove(path)
f.write(chunk)
f.flush()
# Rename to destination
os.rename('%s.tmp' % path, path)
else:
try:
f = open(path, write_type)
f.write(content)
f.close()
os.chmod(path, Env.getPermission('file'))
except:
log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path): def makeDir(self, path):
path = sp(path) path = sp(path)
@@ -163,17 +143,21 @@ class Plugin(object):
folder = sp(folder) folder = sp(folder)
for item in os.listdir(folder): for item in os.listdir(folder):
full_folder = sp(os.path.join(folder, item)) full_folder = os.path.join(folder, item)
if not only_clean or (item in only_clean and os.path.isdir(full_folder)): if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for subfolder, dirs, files in os.walk(full_folder, topdown = False): for root, dirs, files in os.walk(full_folder):
try: for dir_name in dirs:
os.rmdir(subfolder) full_path = os.path.join(root, dir_name)
except:
if show_error: if len(os.listdir(full_path)) == 0:
log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc())) try:
os.rmdir(full_path)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
try: try:
os.rmdir(folder) os.rmdir(folder)
@@ -182,7 +166,7 @@ class Plugin(object):
log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
# http request # http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, stream = False): def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True):
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]") url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {} if not headers: headers = {}
@@ -193,7 +177,7 @@ class Plugin(object):
host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else '')) host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else ''))
headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host)) headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host))
headers['Host'] = headers.get('Host', None) headers['Host'] = headers.get('Host', host)
headers['User-Agent'] = headers.get('User-Agent', self.user_agent) headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip') headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
headers['Connection'] = headers.get('Connection', 'keep-alive') headers['Connection'] = headers.get('Connection', 'keep-alive')
@@ -206,7 +190,7 @@ class Plugin(object):
if self.http_failed_disabled[host] > (time.time() - 900): if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host) log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error: if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host) raise Exception('Disabled calls to %s for 15 minutes because so many failed requests')
else: else:
return '' return ''
else: else:
@@ -214,7 +198,6 @@ class Plugin(object):
del self.http_failed_disabled[host] del self.http_failed_disabled[host]
self.wait(host) self.wait(host)
status_code = None
try: try:
kwargs = { kwargs = {
@@ -223,16 +206,14 @@ class Plugin(object):
'timeout': timeout, 'timeout': timeout,
'files': files, 'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates.. 'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
'stream': stream,
} }
method = 'post' if len(data) > 0 or files else 'get' method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data')) log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, **kwargs) response = r.request(method, url, **kwargs)
status_code = response.status_code
if response.status_code == requests.codes.ok: if response.status_code == requests.codes.ok:
data = response if stream else response.content data = response.content
else: else:
response.raise_for_status() response.raise_for_status()
@@ -243,12 +224,6 @@ class Plugin(object):
# Save failed requests by hosts # Save failed requests by hosts
try: try:
# To many requests
if status_code in [429]:
self.http_failed_request[host] = 1
self.http_failed_disabled[host] = time.time()
if not self.http_failed_request.get(host): if not self.http_failed_request.get(host):
self.http_failed_request[host] = 1 self.http_failed_request[host] = 1
else: else:
@@ -279,8 +254,8 @@ class Plugin(object):
wait = (last_use - now) + self.http_time_between_calls wait = (last_use - now) + self.http_time_between_calls
if wait > 0: if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait))) log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
time.sleep(min(wait, 30)) time.sleep(wait)
def beforeCall(self, handler): def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__)) self.isRunning('%s.%s' % (self.getName(), handler.__name__))
@@ -347,9 +322,9 @@ class Plugin(object):
Env.get('cache').set(cache_key_md5, value, timeout) Env.get('cache').set(cache_key_md5, value, timeout)
return value return value
def createNzbName(self, data, media, unique_tag = False): def createNzbName(self, data, media):
release_name = data.get('name') release_name = data.get('name')
tag = self.cpTag(media, unique_tag = unique_tag) tag = self.cpTag(media)
# Check if password is filename # Check if password is filename
name_password = scanForPassword(data.get('name')) name_password = scanForPassword(data.get('name'))
@@ -362,26 +337,18 @@ class Plugin(object):
max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames
return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag) return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag)
def createFileName(self, data, filedata, media, unique_tag = False): def createFileName(self, data, filedata, media):
name = self.createNzbName(data, media, unique_tag = unique_tag) name = self.createNzbName(data, media)
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata: if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar') return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol')) return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, media, unique_tag = False): def cpTag(self, media):
if Env.setting('enabled', 'renamer'):
identifier = getIdentifier(media)
return '.cp(' + identifier + ')' if identifier else ''
tag = '' return ''
if Env.setting('enabled', 'renamer') or unique_tag:
identifier = getIdentifier(media) or ''
unique_tag = ', ' + randomString() if unique_tag else ''
tag = '.cp('
tag += identifier
tag += ', ' if unique_tag and identifier else ''
tag += randomString() if unique_tag else ''
tag += ')'
return tag if len(tag) > 7 else ''
def checkFilesChanged(self, files, unchanged_for = 60): def checkFilesChanged(self, files, unchanged_for = 60):
now = time.time() now = time.time()
@@ -426,19 +393,3 @@ class Plugin(object):
def isEnabled(self): def isEnabled(self):
return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None
def acquireLock(self, key):
lock = self._locks.get(key)
if not lock:
self._locks[key] = threading.RLock()
log.debug('Acquiring lock: %s', key)
self._locks.get(key).acquire()
def releaseLock(self, key):
lock = self._locks.get(key)
if lock:
log.debug('Releasing lock: %s', key)
self._locks.get(key).release()
+9 -21
View File
@@ -1,18 +1,12 @@
import ctypes import ctypes
import os import os
import string import string
import traceback
import time
from couchpotato import CPLog
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.encoding import sp, ss, toUnicode
from couchpotato.core.helpers.variable import getUserDir from couchpotato.core.helpers.variable import getUserDir
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
import six
log = CPLog(__name__)
if os.name == 'nt': if os.name == 'nt':
@@ -59,9 +53,9 @@ class FileBrowser(Plugin):
dirs = [] dirs = []
path = sp(path) path = sp(path)
for f in os.listdir(path): for f in os.listdir(path):
p = sp(os.path.join(path, f)) p = os.path.join(path, f)
if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)):
dirs.append(toUnicode('%s%s' % (p, os.path.sep))) dirs.append(p + os.path.sep)
return sorted(dirs) return sorted(dirs)
@@ -72,8 +66,8 @@ class FileBrowser(Plugin):
driveletters = [] driveletters = []
for drive in string.ascii_uppercase: for drive in string.ascii_uppercase:
if win32file.GetDriveType(drive + ':') in [win32file.DRIVE_FIXED, win32file.DRIVE_REMOTE, win32file.DRIVE_RAMDISK, win32file.DRIVE_REMOVABLE]: if win32file.GetDriveType(drive + ":") in [win32file.DRIVE_FIXED, win32file.DRIVE_REMOTE, win32file.DRIVE_RAMDISK, win32file.DRIVE_REMOVABLE]:
driveletters.append(drive + ':\\') driveletters.append(drive + ":\\")
return driveletters return driveletters
@@ -87,7 +81,6 @@ class FileBrowser(Plugin):
try: try:
dirs = self.getDirectories(path = path, show_hidden = show_hidden) dirs = self.getDirectories(path = path, show_hidden = show_hidden)
except: except:
log.error('Failed getting directory "%s" : %s', (path, traceback.format_exc()))
dirs = [] dirs = []
parent = os.path.dirname(path.rstrip(os.path.sep)) parent = os.path.dirname(path.rstrip(os.path.sep))
@@ -107,19 +100,14 @@ class FileBrowser(Plugin):
def is_hidden(self, filepath): def is_hidden(self, filepath):
name = ss(os.path.basename(os.path.abspath(filepath))) name = os.path.basename(os.path.abspath(filepath))
return name.startswith('.') or self.has_hidden_attribute(filepath) return name.startswith('.') or self.has_hidden_attribute(filepath)
def has_hidden_attribute(self, filepath): def has_hidden_attribute(self, filepath):
result = False
try: try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(sp(filepath)) #@UndefinedVariable attrs = ctypes.windll.kernel32.GetFileAttributesW(six.text_type(filepath)) #@UndefinedVariable
assert attrs != -1 assert attrs != -1
result = bool(attrs & 2) result = bool(attrs & 2)
except (AttributeError, AssertionError): except (AttributeError, AssertionError):
pass result = False
except:
log.error('Failed getting hidden attribute: %s', traceback.format_exc())
return result return result
+1 -1
View File
@@ -27,7 +27,7 @@ class CategoryPlugin(Plugin):
'desc': 'List all available categories', 'desc': 'List all available categories',
'return': {'type': 'object', 'example': """{ 'return': {'type': 'object', 'example': """{
'success': True, 'success': True,
'categories': array, categories 'list': array, categories
}"""} }"""}
}) })

Some files were not shown because too many files have changed in this diff Show More