Compare commits
235 Commits
build/2.6.
...
tv_old
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
796aff4514 | ||
|
|
2a2fe448e7 | ||
|
|
23967d11dd | ||
|
|
7a3251f649 | ||
|
|
9ba8910281 | ||
|
|
e83a3cf263 | ||
|
|
b3c2945d9b | ||
|
|
fc3cf08675 | ||
|
|
8162cd31b7 | ||
|
|
1ea6fdc9a7 | ||
|
|
1b0c9f40cc | ||
|
|
c0111a467b | ||
|
|
64175151f8 | ||
|
|
586957e840 | ||
|
|
f2fc775963 | ||
|
|
b8bce948c8 | ||
|
|
0a996857dd | ||
|
|
26509f614c | ||
|
|
3e28d5a936 | ||
|
|
95ff427873 | ||
|
|
8ed10037df | ||
|
|
7a090dd4a2 | ||
|
|
49f34cb48d | ||
|
|
2a76de50dd | ||
|
|
8adf7fc600 | ||
|
|
f4c053f56f | ||
|
|
5cb5a1677d | ||
|
|
9fb9f0ef5b | ||
|
|
242d69a981 | ||
|
|
eb151a4c5d | ||
|
|
2520b19798 | ||
|
|
319c9e979a | ||
|
|
93aa5b1920 | ||
|
|
f648af66a6 | ||
|
|
7c4185e1fa | ||
|
|
0fb06a3fd3 | ||
|
|
1e39d643a8 | ||
|
|
69d58663ef | ||
|
|
e59b53fab2 | ||
|
|
a66f6f0166 | ||
|
|
1344f03b16 | ||
|
|
a23c409939 | ||
|
|
a6b1cc833f | ||
|
|
d2c7e3ef56 | ||
|
|
6c87008d7b | ||
|
|
6b3af21e45 | ||
|
|
5a5cc0005c | ||
|
|
d65117c0e3 | ||
|
|
d8884bb655 | ||
|
|
afe9aed2eb | ||
|
|
01e64e989e | ||
|
|
9496df9e9d | ||
|
|
8b4c67b977 | ||
|
|
f77a8f5573 | ||
|
|
de8aefebb7 | ||
|
|
8f0d22a6f2 | ||
|
|
721190028b | ||
|
|
50e565142e | ||
|
|
bead3e2b07 | ||
|
|
71aa0cbb9a | ||
|
|
8de19cbd52 | ||
|
|
8573832ff7 | ||
|
|
7c1d3f8762 | ||
|
|
9cd1adcdee | ||
|
|
f017ac9dca | ||
|
|
907704e45f | ||
|
|
b17f937389 | ||
|
|
f591c56dd4 | ||
|
|
2fd54901e7 | ||
|
|
1bf6c5a82e | ||
|
|
45484461b5 | ||
|
|
aa394f59ae | ||
|
|
717111f5d2 | ||
|
|
e3461dc35f | ||
|
|
9b834f62a9 | ||
|
|
935938474c | ||
|
|
6573196186 | ||
|
|
9a07f2ed65 | ||
|
|
613ff3b729 | ||
|
|
def62fc865 | ||
|
|
037c355836 | ||
|
|
180b2bbffe | ||
|
|
143dcad4f3 | ||
|
|
b0e352ab6d | ||
|
|
5ea7dc5920 | ||
|
|
3c675b5b8a | ||
|
|
11ea9b4e91 | ||
|
|
e8a2139ecf | ||
|
|
dc57d7b6d1 | ||
|
|
0925f1312d | ||
|
|
efc02f66f5 | ||
|
|
9ce8ffc14b | ||
|
|
bab07a05e7 | ||
|
|
1df9f7c83f | ||
|
|
efdf77ef6c | ||
|
|
a989c93505 | ||
|
|
d122bd1b43 | ||
|
|
ab81824f4c | ||
|
|
4eb73e3609 | ||
|
|
6bcb279f0e | ||
|
|
f446c8ed33 | ||
|
|
10a34f2b69 | ||
|
|
cc3ebd79e8 | ||
|
|
3e035f84b1 | ||
|
|
611c159373 | ||
|
|
db65980ba4 | ||
|
|
180576f2b7 | ||
|
|
46d4d34da7 | ||
|
|
3fa21560be | ||
|
|
b902186389 | ||
|
|
da87e68fad | ||
|
|
f23412ea7e | ||
|
|
07abf7c83d | ||
|
|
6259684487 | ||
|
|
0a0935d635 | ||
|
|
fb5b17005f | ||
|
|
e3745b5d74 | ||
|
|
8d24d96804 | ||
|
|
529b535d9f | ||
|
|
0793668e5c | ||
|
|
8d368ecf29 | ||
|
|
2d2b0c9048 | ||
|
|
fb0719d677 | ||
|
|
7ffa5dc7b6 | ||
|
|
32c289fd3d | ||
|
|
ff63b8a1c5 | ||
|
|
60d8934444 | ||
|
|
e0aba01866 | ||
|
|
1ae498e3c8 | ||
|
|
d1db099f71 | ||
|
|
f4ef64290d | ||
|
|
026151d1a1 | ||
|
|
70dada8ef6 | ||
|
|
9ef752f8a3 | ||
|
|
d265a5bddd | ||
|
|
b2b6e3eb33 | ||
|
|
2b6c7a8f94 | ||
|
|
6070209d33 | ||
|
|
fa78d18890 | ||
|
|
40eaf2a96b | ||
|
|
73dd0916c0 | ||
|
|
77d32fe16b | ||
|
|
7def0944a6 | ||
|
|
8782cd77d5 | ||
|
|
1b59fd9af0 | ||
|
|
9dca8a03be | ||
|
|
132f4882e5 | ||
|
|
9e32a38288 | ||
|
|
c1b13cd076 | ||
|
|
820588aa5f | ||
|
|
8fbf050510 | ||
|
|
dd5ae3c4ee | ||
|
|
ab51707607 | ||
|
|
8acdc56df1 | ||
|
|
d345a05b3c | ||
|
|
5f427ec6ea | ||
|
|
a95c030885 | ||
|
|
bef6a74dfe | ||
|
|
01da470c21 | ||
|
|
5fdf4d9085 | ||
|
|
bc51e263e1 | ||
|
|
4c527f0931 | ||
|
|
e9fc528a0f | ||
|
|
c9ba3c804e | ||
|
|
ee9fe347c7 | ||
|
|
515aafe112 | ||
|
|
314016e1fa | ||
|
|
906a54ef09 | ||
|
|
ec2facd056 | ||
|
|
ddbfef575f | ||
|
|
8dd7a4771c | ||
|
|
49ba1f1acd | ||
|
|
c4d661535c | ||
|
|
bd52ab7ab1 | ||
|
|
cce0a8ec62 | ||
|
|
d02e62f89f | ||
|
|
e180addc3c | ||
|
|
a37a4a8cd4 | ||
|
|
8328c18728 | ||
|
|
7ae07d6c15 | ||
|
|
770bcf5bc6 | ||
|
|
7bd6a295d8 | ||
|
|
4063761313 | ||
|
|
d62b346a74 | ||
|
|
155732ab1a | ||
|
|
b3713b7ae5 | ||
|
|
19d026756c | ||
|
|
3cddd29425 | ||
|
|
23bde0b866 | ||
|
|
6f895c1805 | ||
|
|
96089074ce | ||
|
|
2ed53df008 | ||
|
|
060859483a | ||
|
|
eced476eaf | ||
|
|
8d5b55a753 | ||
|
|
7296dc54d0 | ||
|
|
e5e9cf7d5f | ||
|
|
b106229a78 | ||
|
|
73efd5549f | ||
|
|
8139016636 | ||
|
|
59c0d0416e | ||
|
|
cd559ece04 | ||
|
|
120a4ad1ed | ||
|
|
3363e164fd | ||
|
|
6d6d5caeb6 | ||
|
|
21030e7cb4 | ||
|
|
9b238ba712 | ||
|
|
b3d2d5349b | ||
|
|
f9bad281de | ||
|
|
72ce919989 | ||
|
|
ff782669f6 | ||
|
|
36950993f1 | ||
|
|
7df93dc1b4 | ||
|
|
a45913eee7 | ||
|
|
a25eac6c4e | ||
|
|
dd0fcf0bc1 | ||
|
|
2267235eca | ||
|
|
029cf9ecac | ||
|
|
f4217ecd3d | ||
|
|
31cd993506 | ||
|
|
fb579561de | ||
|
|
37eb424827 | ||
|
|
4348451692 | ||
|
|
e93e55a0f7 | ||
|
|
bc11f90529 | ||
|
|
8fcc246f25 | ||
|
|
4d7c38d6db | ||
|
|
c8d79cde21 | ||
|
|
f4d792079b | ||
|
|
78ab419cd8 | ||
|
|
3e93983f6e | ||
|
|
6a4822cc26 | ||
|
|
92b08bb5d5 | ||
|
|
e270e09969 | ||
|
|
40cd5218db |
@@ -170,7 +170,7 @@ def natcmp(a, b):
|
||||
return cmp(natsortKey(a), natsortKey(b))
|
||||
|
||||
def toIterable(value):
|
||||
if isinstance(value, collections.Iterable):
|
||||
if type(value) in [list, tuple]:
|
||||
return value
|
||||
return [value]
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from couchpotato import get_session
|
||||
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
|
||||
from couchpotato.core.helpers.variable import mergeDicts
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.core.settings.model import Media
|
||||
|
||||
@@ -17,6 +18,13 @@ class MediaBase(Plugin):
|
||||
'category': {},
|
||||
}
|
||||
|
||||
search_dict = mergeDicts({
|
||||
'library': {
|
||||
'related_libraries': {},
|
||||
'root_library': {}
|
||||
},
|
||||
}, default_dict)
|
||||
|
||||
def initType(self):
|
||||
addEvent('media.types', self.getType)
|
||||
|
||||
@@ -28,7 +36,7 @@ class MediaBase(Plugin):
|
||||
def onComplete():
|
||||
db = get_session()
|
||||
media = db.query(Media).filter_by(id = id).first()
|
||||
fireEventAsync('%s.searcher.single' % media.type, media.to_dict(self.default_dict), on_complete = self.createNotifyFront(id))
|
||||
fireEventAsync('%s.searcher.single' % media.type, media.to_dict(self.search_dict), on_complete = self.createNotifyFront(id))
|
||||
db.expire_all()
|
||||
|
||||
return onComplete
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from .main import Library
|
||||
|
||||
def start():
|
||||
return Library()
|
||||
|
||||
class LibraryBase(Plugin):
|
||||
|
||||
_type = None
|
||||
|
||||
def initType(self):
|
||||
addEvent('library.types', self.getType)
|
||||
|
||||
def getType(self):
|
||||
return self._type
|
||||
config = []
|
||||
|
||||
13
couchpotato/core/media/_base/library/base.py
Normal file
13
couchpotato/core/media/_base/library/base.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
|
||||
|
||||
class LibraryBase(Plugin):
|
||||
|
||||
_type = None
|
||||
|
||||
def initType(self):
|
||||
addEvent('library.types', self.getType)
|
||||
|
||||
def getType(self):
|
||||
return self._type
|
||||
18
couchpotato/core/media/_base/library/main.py
Normal file
18
couchpotato/core/media/_base/library/main.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.media._base.library.base import LibraryBase
|
||||
|
||||
|
||||
class Library(LibraryBase):
|
||||
def __init__(self):
|
||||
addEvent('library.title', self.title)
|
||||
|
||||
def title(self, library):
|
||||
return fireEvent(
|
||||
'library.query',
|
||||
library,
|
||||
|
||||
condense = False,
|
||||
include_year = False,
|
||||
include_identifier = False,
|
||||
single = True
|
||||
)
|
||||
6
couchpotato/core/media/_base/matcher/__init__.py
Normal file
6
couchpotato/core/media/_base/matcher/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .main import Matcher
|
||||
|
||||
def start():
|
||||
return Matcher()
|
||||
|
||||
config = []
|
||||
84
couchpotato/core/media/_base/matcher/base.py
Normal file
84
couchpotato/core/media/_base/matcher/base.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.encoding import simplifyString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class MatcherBase(Plugin):
|
||||
type = None
|
||||
|
||||
def __init__(self):
|
||||
if self.type:
|
||||
addEvent('%s.matcher.correct' % self.type, self.correct)
|
||||
|
||||
def correct(self, chain, release, media, quality):
|
||||
raise NotImplementedError()
|
||||
|
||||
def flattenInfo(self, info):
|
||||
# Flatten dictionary of matches (chain info)
|
||||
if isinstance(info, dict):
|
||||
return dict([(key, self.flattenInfo(value)) for key, value in info.items()])
|
||||
|
||||
# Flatten matches
|
||||
result = None
|
||||
|
||||
for match in info:
|
||||
if isinstance(match, dict):
|
||||
if result is None:
|
||||
result = {}
|
||||
|
||||
for key, value in match.items():
|
||||
if key not in result:
|
||||
result[key] = []
|
||||
|
||||
result[key].append(value)
|
||||
else:
|
||||
if result is None:
|
||||
result = []
|
||||
|
||||
result.append(match)
|
||||
|
||||
return result
|
||||
|
||||
def constructFromRaw(self, match):
|
||||
if not match:
|
||||
return None
|
||||
|
||||
parts = [
|
||||
''.join([
|
||||
y for y in x[1:] if y
|
||||
]) for x in match
|
||||
]
|
||||
|
||||
return ''.join(parts)[:-1].strip()
|
||||
|
||||
def simplifyValue(self, value):
|
||||
if not value:
|
||||
return value
|
||||
|
||||
if isinstance(value, basestring):
|
||||
return simplifyString(value)
|
||||
|
||||
if isinstance(value, list):
|
||||
return [self.simplifyValue(x) for x in value]
|
||||
|
||||
raise ValueError("Unsupported value type")
|
||||
|
||||
def chainMatch(self, chain, group, tags):
|
||||
info = self.flattenInfo(chain.info[group])
|
||||
|
||||
found_tags = []
|
||||
for tag, accepted in tags.items():
|
||||
values = [self.simplifyValue(x) for x in info.get(tag, [None])]
|
||||
|
||||
if any([val in accepted for val in values]):
|
||||
found_tags.append(tag)
|
||||
|
||||
log.debug('tags found: %s, required: %s' % (found_tags, tags.keys()))
|
||||
|
||||
if set(tags.keys()) == set(found_tags):
|
||||
return True
|
||||
|
||||
return all([key in found_tags for key, value in tags.items()])
|
||||
88
couchpotato/core/media/_base/matcher/main.py
Normal file
88
couchpotato/core/media/_base/matcher/main.py
Normal file
@@ -0,0 +1,88 @@
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.variable import possibleTitles
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.matcher.base import MatcherBase
|
||||
from caper import Caper
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Matcher(MatcherBase):
|
||||
def __init__(self):
|
||||
super(Matcher, self).__init__()
|
||||
|
||||
self.caper = Caper()
|
||||
|
||||
addEvent('matcher.parse', self.parse)
|
||||
addEvent('matcher.match', self.match)
|
||||
|
||||
addEvent('matcher.flatten_info', self.flattenInfo)
|
||||
addEvent('matcher.construct_from_raw', self.constructFromRaw)
|
||||
|
||||
addEvent('matcher.correct_title', self.correctTitle)
|
||||
addEvent('matcher.correct_quality', self.correctQuality)
|
||||
|
||||
def parse(self, name, parser='scene'):
|
||||
return self.caper.parse(name, parser)
|
||||
|
||||
def match(self, release, media, quality):
|
||||
match = fireEvent('matcher.parse', release['name'], single = True)
|
||||
|
||||
if len(match.chains) < 1:
|
||||
log.info2('Wrong: %s, unable to parse release name (no chains)', release['name'])
|
||||
return False
|
||||
|
||||
for chain in match.chains:
|
||||
if fireEvent('%s.matcher.correct' % media['type'], chain, release, media, quality, single = True):
|
||||
return chain
|
||||
|
||||
return False
|
||||
|
||||
def correctTitle(self, chain, media):
|
||||
root_library = media['library']['root_library']
|
||||
|
||||
if 'show_name' not in chain.info or not len(chain.info['show_name']):
|
||||
log.info('Wrong: missing show name in parsed result')
|
||||
return False
|
||||
|
||||
# Get the lower-case parsed show name from the chain
|
||||
chain_words = [x.lower() for x in chain.info['show_name']]
|
||||
|
||||
# Build a list of possible titles of the media we are searching for
|
||||
titles = root_library['info']['titles']
|
||||
|
||||
# Add year suffix titles (will result in ['<name_one>', '<name_one> <suffix_one>', '<name_two>', ...])
|
||||
suffixes = [None, root_library['info']['year']]
|
||||
|
||||
titles = [
|
||||
title + ((' %s' % suffix) if suffix else '')
|
||||
for title in titles
|
||||
for suffix in suffixes
|
||||
]
|
||||
|
||||
# Check show titles match
|
||||
# TODO check xem names
|
||||
for title in titles:
|
||||
for valid_words in [x.split(' ') for x in possibleTitles(title)]:
|
||||
|
||||
if valid_words == chain_words:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def correctQuality(self, chain, quality, quality_map):
|
||||
if quality['identifier'] not in quality_map:
|
||||
log.info2('Wrong: unknown preferred quality %s', quality['identifier'])
|
||||
return False
|
||||
|
||||
if 'video' not in chain.info:
|
||||
log.info2('Wrong: no video tags found')
|
||||
return False
|
||||
|
||||
video_tags = quality_map[quality['identifier']]
|
||||
|
||||
if not self.chainMatch(chain, 'video', video_tags):
|
||||
log.info2('Wrong: %s tags not in chain', video_tags)
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -203,14 +203,14 @@ class MediaPlugin(MediaBase):
|
||||
|
||||
# List release statuses
|
||||
releases = db.query(Release) \
|
||||
.filter(Release.movie_id.in_(media_ids)) \
|
||||
.filter(Release.media_id.in_(media_ids)) \
|
||||
.all()
|
||||
|
||||
release_statuses = dict((m, set()) for m in media_ids)
|
||||
releases_count = dict((m, 0) for m in media_ids)
|
||||
for release in releases:
|
||||
release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id))
|
||||
releases_count[release.movie_id] += 1
|
||||
release_statuses[release.media_id].add('%d,%d' % (release.status_id, release.quality_id))
|
||||
releases_count[release.media_id] += 1
|
||||
|
||||
# Get main movie data
|
||||
q2 = db.query(Media) \
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
from couchpotato import get_session
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.encoding import simplifyString
|
||||
from couchpotato.core.helpers.variable import splitString
|
||||
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
|
||||
from couchpotato.core.helpers.variable import md5, getTitle, splitString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.searcher.base import SearcherBase
|
||||
from couchpotato.core.settings.model import Media, Release, ReleaseInfo
|
||||
from couchpotato.environment import Env
|
||||
from inspect import ismethod, isfunction
|
||||
import datetime
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
@@ -165,7 +171,7 @@ class Searcher(SearcherBase):
|
||||
return False
|
||||
|
||||
def correctWords(self, rel_name, media):
|
||||
media_title = fireEvent('searcher.get_search_title', media, single = True)
|
||||
media_title = fireEvent('library.title', media['library'], single = True)
|
||||
media_words = re.split('\W+', simplifyString(media_title))
|
||||
|
||||
rel_name = simplifyString(rel_name)
|
||||
|
||||
@@ -176,7 +176,7 @@ class MovieBase(MovieTypeBase):
|
||||
|
||||
fireEvent('media.restatus', m.id)
|
||||
|
||||
movie_dict = m.to_dict(self.default_dict)
|
||||
movie_dict = m.to_dict(self.search_dict)
|
||||
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))
|
||||
|
||||
db.expire_all()
|
||||
|
||||
@@ -54,7 +54,7 @@ var Movie = new Class({
|
||||
// Reload when releases have updated
|
||||
self.global_events['release.update_status'] = function(notification){
|
||||
var data = notification.data
|
||||
if(data && self.data.id == data.movie_id){
|
||||
if(data && self.data.id == data.media_id){
|
||||
|
||||
if(!self.data.releases)
|
||||
self.data.releases = [];
|
||||
|
||||
@@ -181,7 +181,7 @@ Block.Search.MovieItem = new Class({
|
||||
if(categories.length == 0)
|
||||
self.category_select.hide();
|
||||
else {
|
||||
self.category_select.show();
|
||||
self.category_select.movie();
|
||||
categories.each(function(category){
|
||||
new Element('option', {
|
||||
'value': category.data.id,
|
||||
|
||||
@@ -2,7 +2,7 @@ from couchpotato import get_session
|
||||
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.library import LibraryBase
|
||||
from couchpotato.core.media._base.library.base import LibraryBase
|
||||
from couchpotato.core.settings.model import Library, LibraryTitle, File
|
||||
from string import ascii_letters
|
||||
import time
|
||||
@@ -16,27 +16,46 @@ class MovieLibraryPlugin(LibraryBase):
|
||||
default_dict = {'titles': {}, 'files':{}}
|
||||
|
||||
def __init__(self):
|
||||
addEvent('library.query', self.query)
|
||||
addEvent('library.add.movie', self.add)
|
||||
addEvent('library.update.movie', self.update)
|
||||
addEvent('library.update.movie.release_date', self.updateReleaseDate)
|
||||
|
||||
def add(self, attrs = None, update_after = True):
|
||||
if not attrs: attrs = {}
|
||||
def query(self, library, first = True, include_year = True, **kwargs):
|
||||
if library.get('type') != 'movie':
|
||||
return
|
||||
|
||||
titles = [title['title'] for title in library['titles']]
|
||||
|
||||
# Add year identifier to titles
|
||||
if include_year:
|
||||
titles = [title + (' %s' % str(library['year'])) for title in titles]
|
||||
|
||||
if first:
|
||||
return titles[0] if titles else None
|
||||
|
||||
return titles
|
||||
|
||||
def add(self, attrs = {}, update_after = True):
|
||||
# movies don't yet contain these, so lets make sure to set defaults
|
||||
type = attrs.get('type', 'movie')
|
||||
primary_provider = attrs.get('primary_provider', 'imdb')
|
||||
|
||||
db = get_session()
|
||||
|
||||
l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first()
|
||||
l = db.query(Library).filter_by(type = type, identifier = attrs.get('identifier')).first()
|
||||
if not l:
|
||||
status = fireEvent('status.get', 'needs_update', single = True)
|
||||
l = Library(
|
||||
type = type,
|
||||
primary_provider = primary_provider,
|
||||
year = attrs.get('year'),
|
||||
identifier = attrs.get('identifier'),
|
||||
plot = toUnicode(attrs.get('plot')),
|
||||
tagline = toUnicode(attrs.get('tagline')),
|
||||
status_id = status.get('id'),
|
||||
info = {}
|
||||
info = {},
|
||||
parent = None
|
||||
)
|
||||
|
||||
title = LibraryTitle(
|
||||
|
||||
@@ -30,7 +30,6 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
|
||||
addEvent('movie.searcher.try_next_release', self.tryNextRelease)
|
||||
addEvent('movie.searcher.could_be_released', self.couldBeReleased)
|
||||
addEvent('searcher.correct_release', self.correctRelease)
|
||||
addEvent('searcher.get_search_title', self.getSearchTitle)
|
||||
|
||||
addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = {
|
||||
'desc': 'Marks the snatched results as ignored and try the next best release',
|
||||
@@ -210,7 +209,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
|
||||
|
||||
if media.get('type') != 'movie': return
|
||||
|
||||
media_title = fireEvent('searcher.get_search_title', media, single = True)
|
||||
media_title = fireEvent('library.title', media['library'], single = True)
|
||||
|
||||
imdb_results = kwargs.get('imdb_results', False)
|
||||
retention = Env.setting('retention', section = 'nzb')
|
||||
@@ -329,7 +328,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
|
||||
try:
|
||||
db = get_session()
|
||||
rels = db.query(Release) \
|
||||
.filter_by(movie_id = media_id) \
|
||||
.filter_by(media_id = media_id) \
|
||||
.filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \
|
||||
.all()
|
||||
|
||||
@@ -347,9 +346,5 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
|
||||
log.error('Failed searching for next release: %s', traceback.format_exc())
|
||||
return False
|
||||
|
||||
def getSearchTitle(self, media):
|
||||
if media['type'] == 'movie':
|
||||
return getTitle(media['library'])
|
||||
|
||||
class SearchSetupError(Exception):
|
||||
pass
|
||||
|
||||
0
couchpotato/core/media/show/__init__.py
Normal file
0
couchpotato/core/media/show/__init__.py
Normal file
6
couchpotato/core/media/show/_base/__init__.py
Normal file
6
couchpotato/core/media/show/_base/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .main import ShowBase
|
||||
|
||||
def start():
|
||||
return ShowBase()
|
||||
|
||||
config = []
|
||||
239
couchpotato/core/media/show/_base/main.py
Normal file
239
couchpotato/core/media/show/_base/main.py
Normal file
@@ -0,0 +1,239 @@
|
||||
from couchpotato import get_session
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media import MediaBase
|
||||
from couchpotato.core.settings.model import Media
|
||||
import time
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class ShowBase(MediaBase):
|
||||
|
||||
_type = 'show'
|
||||
|
||||
def __init__(self):
|
||||
super(ShowBase, self).__init__()
|
||||
|
||||
addApiView('show.add', self.addView, docs = {
|
||||
'desc': 'Add new movie to the wanted list',
|
||||
'params': {
|
||||
'identifier': {'desc': 'IMDB id of the movie your want to add.'},
|
||||
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
|
||||
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
|
||||
}
|
||||
})
|
||||
|
||||
addEvent('show.add', self.add)
|
||||
|
||||
def addView(self, **kwargs):
|
||||
add_dict = self.add(params = kwargs)
|
||||
|
||||
return {
|
||||
'success': True if add_dict else False,
|
||||
'show': add_dict,
|
||||
}
|
||||
|
||||
def add(self, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None):
|
||||
"""
|
||||
params
|
||||
{'category_id': u'-1',
|
||||
'identifier': u'tt1519931',
|
||||
'profile_id': u'12',
|
||||
'thetvdb_id': u'158661',
|
||||
'title': u'Haven'}
|
||||
"""
|
||||
log.debug("show.add")
|
||||
|
||||
# Add show parent to db first; need to update library so maps will be in place (if any)
|
||||
parent = self.addToDatabase(params = params, update_library = True, type = 'show')
|
||||
|
||||
# TODO: add by airdate
|
||||
|
||||
# Add by Season/Episode numbers
|
||||
self.addBySeasonEpisode(parent,
|
||||
params = params,
|
||||
force_readd = force_readd,
|
||||
search_after = search_after,
|
||||
update_library = update_library,
|
||||
status_id = status_id
|
||||
)
|
||||
|
||||
def addBySeasonEpisode(self, parent, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None):
|
||||
identifier = params.get('id')
|
||||
# 'tvdb' will always be the master for our purpose. All mapped data can be mapped
|
||||
# to another source for downloading, but it will always be remapped back to tvdb numbering
|
||||
# when renamed so media can be used in media players that use tvdb for info provider
|
||||
#
|
||||
# This currently means the episode must actually exist in tvdb in order to be found but
|
||||
# the numbering can be different
|
||||
|
||||
#master = 'tvdb'
|
||||
#destination = 'scene'
|
||||
#destination = 'anidb'
|
||||
#destination = 'rage'
|
||||
#destination = 'trakt'
|
||||
# TODO: auto mode. if anime exists use it. if scene exists use it else use tvdb
|
||||
|
||||
# XXX: We should abort adding show, etc if either tvdb or xem is down or we will have incorrent mappings
|
||||
# I think if tvdb gets error we wont have anydata anyway, but we must make sure XEM returns!!!!
|
||||
|
||||
# Only the master should return results here; all other info providers should just return False
|
||||
# since we are just interested in the structure at this point.
|
||||
seasons = fireEvent('season.info', merge = True, identifier = identifier)
|
||||
if seasons is not None:
|
||||
for season in seasons:
|
||||
# Make sure we are only dealing with 'tvdb' responses at this point
|
||||
if season.get('primary_provider', None) != 'thetvdb':
|
||||
continue
|
||||
season_id = season.get('id', None)
|
||||
if season_id is None: continue
|
||||
|
||||
season_params = {'season_identifier': season_id}
|
||||
# Calling all info providers; merge your info now for individual season
|
||||
single_season = fireEvent('season.info', merge = True, identifier = identifier, params = season_params)
|
||||
single_season['category_id'] = params.get('category_id')
|
||||
single_season['profile_id'] = params.get('profile_id')
|
||||
single_season['title'] = single_season.get('original_title', None)
|
||||
single_season['identifier'] = season_id
|
||||
single_season['parent_identifier'] = identifier
|
||||
log.info("Adding Season %s" % season_id)
|
||||
s = self.addToDatabase(params = single_season, type = "season")
|
||||
|
||||
episode_params = {'season_identifier': season_id}
|
||||
episodes = fireEvent('episode.info', merge = True, identifier = identifier, params = episode_params)
|
||||
if episodes is not None:
|
||||
for episode in episodes:
|
||||
# Make sure we are only dealing with 'tvdb' responses at this point
|
||||
if episode.get('primary_provider', None) != 'thetvdb':
|
||||
continue
|
||||
episode_id = episode.get('id', None)
|
||||
if episode_id is None: continue
|
||||
try:
|
||||
episode_number = int(episode.get('episodenumber', None))
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
try:
|
||||
absolute_number = int(episode.get('absolute_number', None))
|
||||
except (ValueError, TypeError):
|
||||
absolute_number = None
|
||||
|
||||
episode_params = {'season_identifier': season_id,
|
||||
'episode_identifier': episode_id,
|
||||
'episode': episode_number}
|
||||
if absolute_number:
|
||||
episode_params['absolute'] = absolute_number
|
||||
# Calling all info providers; merge your info now for individual episode
|
||||
single_episode = fireEvent('episode.info', merge = True, identifier = identifier, params = episode_params)
|
||||
single_episode['category_id'] = params.get('category_id')
|
||||
single_episode['profile_id'] = params.get('profile_id')
|
||||
single_episode['title'] = single_episode.get('original_title', None)
|
||||
single_episode['identifier'] = episode_id
|
||||
single_episode['parent_identifier'] = single_season['identifier']
|
||||
log.info("Adding [%sx%s] %s - %s" % (season_id,
|
||||
episode_number,
|
||||
params['title'],
|
||||
single_episode.get('original_title', '')))
|
||||
e = self.addToDatabase(params = single_episode, type = "episode")
|
||||
|
||||
# Start searching now that all the media has been added
|
||||
if search_after:
|
||||
onComplete = self.createOnComplete(parent['id'])
|
||||
onComplete()
|
||||
|
||||
return parent
|
||||
|
||||
def addToDatabase(self, params = {}, type = "show", force_readd = True, search_after = False, update_library = False, status_id = None):
|
||||
log.debug("show.addToDatabase")
|
||||
|
||||
if not params.get('identifier'):
|
||||
msg = 'Can\'t add show without imdb identifier.'
|
||||
log.error(msg)
|
||||
fireEvent('notify.frontend', type = 'show.is_tvshow', message = msg)
|
||||
return False
|
||||
#else:
|
||||
#try:
|
||||
#is_show = fireEvent('movie.is_show', identifier = params.get('identifier'), single = True)
|
||||
#if not is_show:
|
||||
#msg = 'Can\'t add show, seems to be a TV show.'
|
||||
#log.error(msg)
|
||||
#fireEvent('notify.frontend', type = 'show.is_tvshow', message = msg)
|
||||
#return False
|
||||
#except:
|
||||
#pass
|
||||
|
||||
library = fireEvent('library.add.%s' % type, single = True, attrs = params, update_after = update_library)
|
||||
if not library:
|
||||
return False
|
||||
|
||||
# Status
|
||||
status_active, snatched_status, ignored_status, done_status, downloaded_status = \
|
||||
fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True)
|
||||
|
||||
default_profile = fireEvent('profile.default', single = True)
|
||||
cat_id = params.get('category_id', None)
|
||||
|
||||
db = get_session()
|
||||
m = db.query(Media).filter_by(library_id = library.get('id')).first()
|
||||
added = True
|
||||
do_search = False
|
||||
if not m:
|
||||
m = Media(
|
||||
type = type,
|
||||
library_id = library.get('id'),
|
||||
profile_id = params.get('profile_id', default_profile.get('id')),
|
||||
status_id = status_id if status_id else status_active.get('id'),
|
||||
category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None,
|
||||
)
|
||||
db.add(m)
|
||||
db.commit()
|
||||
|
||||
onComplete = None
|
||||
if search_after:
|
||||
onComplete = self.createOnComplete(m.id)
|
||||
|
||||
fireEventAsync('library.update.%s' % type, params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete)
|
||||
search_after = False
|
||||
elif force_readd:
|
||||
|
||||
# Clean snatched history
|
||||
for release in m.releases:
|
||||
if release.status_id in [downloaded_status.get('id'), snatched_status.get('id'), done_status.get('id')]:
|
||||
if params.get('ignore_previous', False):
|
||||
release.status_id = ignored_status.get('id')
|
||||
else:
|
||||
fireEvent('release.delete', release.id, single = True)
|
||||
|
||||
m.profile_id = params.get('profile_id', default_profile.get('id'))
|
||||
m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None
|
||||
else:
|
||||
log.debug('Show already exists, not updating: %s', params)
|
||||
added = False
|
||||
|
||||
if force_readd:
|
||||
m.status_id = status_id if status_id else status_active.get('id')
|
||||
m.last_edit = int(time.time())
|
||||
do_search = True
|
||||
|
||||
db.commit()
|
||||
|
||||
# Remove releases
|
||||
available_status = fireEvent('status.get', 'available', single = True)
|
||||
for rel in m.releases:
|
||||
if rel.status_id is available_status.get('id'):
|
||||
db.delete(rel)
|
||||
db.commit()
|
||||
|
||||
show_dict = m.to_dict(self.default_dict)
|
||||
|
||||
if do_search and search_after:
|
||||
onComplete = self.createOnComplete(m.id)
|
||||
onComplete()
|
||||
|
||||
if added:
|
||||
fireEvent('notify.frontend', type = 'show.added', data = show_dict, message = 'Successfully added "%s" to your wanted list.' % params.get('title', ''))
|
||||
|
||||
db.expire_all()
|
||||
return show_dict
|
||||
232
couchpotato/core/media/show/_base/static/search.js
Normal file
232
couchpotato/core/media/show/_base/static/search.js
Normal file
@@ -0,0 +1,232 @@
|
||||
Block.Search.ShowItem = new Class({
|
||||
|
||||
Implements: [Options, Events],
|
||||
|
||||
initialize: function(info, options){
|
||||
var self = this;
|
||||
self.setOptions(options);
|
||||
|
||||
self.info = info;
|
||||
self.alternative_titles = [];
|
||||
|
||||
self.create();
|
||||
},
|
||||
|
||||
create: function(){
|
||||
var self = this,
|
||||
info = self.info;
|
||||
|
||||
self.el = new Element('div.media_result', {
|
||||
'id': info.id
|
||||
}).adopt(
|
||||
self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', {
|
||||
'src': info.images.poster[0],
|
||||
'height': null,
|
||||
'width': null
|
||||
}) : null,
|
||||
self.options_el = new Element('div.options.inlay'),
|
||||
self.data_container = new Element('div.data', {
|
||||
'events': {
|
||||
'click': self.showOptions.bind(self)
|
||||
}
|
||||
}).adopt(
|
||||
self.info_container = new Element('div.info').adopt(
|
||||
new Element('h2').adopt(
|
||||
self.title = new Element('span.title', {
|
||||
'text': info.titles && info.titles.length > 0 ? info.titles[0] : 'Unknown'
|
||||
}),
|
||||
self.year = info.year ? new Element('span.year', {
|
||||
'text': info.year
|
||||
}) : null
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
if(info.titles)
|
||||
info.titles.each(function(title){
|
||||
self.alternativeTitle({
|
||||
'title': title
|
||||
});
|
||||
})
|
||||
},
|
||||
|
||||
alternativeTitle: function(alternative){
|
||||
var self = this;
|
||||
|
||||
self.alternative_titles.include(alternative);
|
||||
},
|
||||
|
||||
getTitle: function(){
|
||||
var self = this;
|
||||
try {
|
||||
return self.info.original_title ? self.info.original_title : self.info.titles[0];
|
||||
}
|
||||
catch(e){
|
||||
return 'Unknown';
|
||||
}
|
||||
},
|
||||
|
||||
get: function(key){
|
||||
return this.info[key]
|
||||
},
|
||||
|
||||
showOptions: function(){
|
||||
var self = this;
|
||||
|
||||
self.createOptions();
|
||||
|
||||
self.data_container.addClass('open');
|
||||
self.el.addEvent('outerClick', self.closeOptions.bind(self))
|
||||
|
||||
},
|
||||
|
||||
closeOptions: function(){
|
||||
var self = this;
|
||||
|
||||
self.data_container.removeClass('open');
|
||||
self.el.removeEvents('outerClick')
|
||||
},
|
||||
|
||||
add: function(e){
|
||||
var self = this;
|
||||
|
||||
if(e)
|
||||
(e).preventDefault();
|
||||
|
||||
self.loadingMask();
|
||||
|
||||
Api.request('show.add', {
|
||||
'data': {
|
||||
'identifier': self.info.id,
|
||||
'id': self.info.id,
|
||||
'type': self.info.type,
|
||||
'primary_provider': self.info.primary_provider,
|
||||
'title': self.title_select.get('value'),
|
||||
'profile_id': self.profile_select.get('value'),
|
||||
'category_id': self.category_select.get('value')
|
||||
},
|
||||
'onComplete': function(json){
|
||||
self.options_el.empty();
|
||||
self.options_el.adopt(
|
||||
new Element('div.message', {
|
||||
'text': json.added ? 'Show successfully added.' : 'Show didn\'t add properly. Check logs'
|
||||
})
|
||||
);
|
||||
self.mask.fade('out');
|
||||
|
||||
self.fireEvent('added');
|
||||
},
|
||||
'onFailure': function(){
|
||||
self.options_el.empty();
|
||||
self.options_el.adopt(
|
||||
new Element('div.message', {
|
||||
'text': 'Something went wrong, check the logs for more info.'
|
||||
})
|
||||
);
|
||||
self.mask.fade('out');
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
createOptions: function(){
|
||||
var self = this,
|
||||
info = self.info;
|
||||
|
||||
if(!self.options_el.hasClass('set')){
|
||||
|
||||
if(self.info.in_library){
|
||||
var in_library = [];
|
||||
self.info.in_library.releases.each(function(release){
|
||||
in_library.include(release.quality.label)
|
||||
});
|
||||
}
|
||||
|
||||
self.options_el.grab(
|
||||
new Element('div', {
|
||||
'class': self.info.in_wanted && self.info.in_wanted.profile_id || in_library ? 'in_library_wanted' : ''
|
||||
}).adopt(
|
||||
self.info.in_wanted && self.info.in_wanted.profile_id ? new Element('span.in_wanted', {
|
||||
'text': 'Already in wanted list: ' + Quality.getProfile(self.info.in_wanted.profile_id).get('label')
|
||||
}) : (in_library ? new Element('span.in_library', {
|
||||
'text': 'Already in library: ' + in_library.join(', ')
|
||||
}) : null),
|
||||
self.title_select = new Element('select', {
|
||||
'name': 'title'
|
||||
}),
|
||||
self.profile_select = new Element('select', {
|
||||
'name': 'profile'
|
||||
}),
|
||||
self.category_select = new Element('select', {
|
||||
'name': 'category'
|
||||
}).grab(
|
||||
new Element('option', {'value': -1, 'text': 'None'})
|
||||
),
|
||||
self.add_button = new Element('a.button', {
|
||||
'text': 'Add',
|
||||
'events': {
|
||||
'click': self.add.bind(self)
|
||||
}
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
Array.each(self.alternative_titles, function(alt){
|
||||
new Element('option', {
|
||||
'text': alt.title
|
||||
}).inject(self.title_select)
|
||||
})
|
||||
|
||||
|
||||
// Fill categories
|
||||
var categories = CategoryList.getAll();
|
||||
|
||||
if(categories.length == 0)
|
||||
self.category_select.hide();
|
||||
else {
|
||||
self.category_select.show();
|
||||
categories.each(function(category){
|
||||
new Element('option', {
|
||||
'value': category.data.id,
|
||||
'text': category.data.label
|
||||
}).inject(self.category_select);
|
||||
});
|
||||
}
|
||||
|
||||
// Fill profiles
|
||||
var profiles = Quality.getActiveProfiles();
|
||||
if(profiles.length == 1)
|
||||
self.profile_select.hide();
|
||||
|
||||
profiles.each(function(profile){
|
||||
new Element('option', {
|
||||
'value': profile.id ? profile.id : profile.data.id,
|
||||
'text': profile.label ? profile.label : profile.data.label
|
||||
}).inject(self.profile_select)
|
||||
});
|
||||
|
||||
self.options_el.addClass('set');
|
||||
|
||||
if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
|
||||
!(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
|
||||
self.add();
|
||||
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
loadingMask: function(){
|
||||
var self = this;
|
||||
|
||||
self.mask = new Element('div.mask').inject(self.el).fade('hide')
|
||||
|
||||
createSpinner(self.mask)
|
||||
self.mask.fade('in')
|
||||
|
||||
},
|
||||
|
||||
toElement: function(){
|
||||
return this.el
|
||||
}
|
||||
|
||||
});
|
||||
0
couchpotato/core/media/show/library/__init__.py
Normal file
0
couchpotato/core/media/show/library/__init__.py
Normal file
6
couchpotato/core/media/show/library/episode/__init__.py
Normal file
6
couchpotato/core/media/show/library/episode/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .main import EpisodeLibraryPlugin
|
||||
|
||||
def start():
|
||||
return EpisodeLibraryPlugin()
|
||||
|
||||
config = []
|
||||
266
couchpotato/core/media/show/library/episode/main.py
Normal file
266
couchpotato/core/media/show/library/episode/main.py
Normal file
@@ -0,0 +1,266 @@
|
||||
from couchpotato import get_session
|
||||
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.settings.model import EpisodeLibrary, SeasonLibrary, LibraryTitle, File
|
||||
from couchpotato.core.media._base.library.base import LibraryBase
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from string import ascii_letters
|
||||
import time
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class EpisodeLibraryPlugin(LibraryBase):
|
||||
|
||||
default_dict = {'titles': {}, 'files':{}}
|
||||
|
||||
def __init__(self):
|
||||
addEvent('library.query', self.query)
|
||||
addEvent('library.identifier', self.identifier)
|
||||
addEvent('library.add.episode', self.add)
|
||||
addEvent('library.update.episode', self.update)
|
||||
addEvent('library.update.episode_release_date', self.updateReleaseDate)
|
||||
|
||||
def query(self, library, first = True, condense = True, include_identifier = True, **kwargs):
|
||||
if library is list or library.get('type') != 'episode':
|
||||
return
|
||||
|
||||
# Get the titles of the season
|
||||
if not library.get('related_libraries', {}).get('season', []):
|
||||
log.warning('Invalid library, unable to determine title.')
|
||||
return
|
||||
|
||||
titles = fireEvent(
|
||||
'library.query',
|
||||
library['related_libraries']['season'][0],
|
||||
first=False,
|
||||
include_identifier=include_identifier,
|
||||
condense=condense,
|
||||
|
||||
single=True
|
||||
)
|
||||
|
||||
identifier = fireEvent('library.identifier', library, single = True)
|
||||
|
||||
# Add episode identifier to titles
|
||||
if include_identifier and identifier.get('episode'):
|
||||
titles = [title + ('E%02d' % identifier['episode']) for title in titles]
|
||||
|
||||
|
||||
if first:
|
||||
return titles[0] if titles else None
|
||||
|
||||
return titles
|
||||
|
||||
|
||||
def identifier(self, library):
|
||||
if library.get('type') != 'episode':
|
||||
return
|
||||
|
||||
identifier = {
|
||||
'season': None,
|
||||
'episode': None
|
||||
}
|
||||
|
||||
scene_map = library['info'].get('map_episode', {}).get('scene')
|
||||
|
||||
if scene_map:
|
||||
# Use scene mappings if they are available
|
||||
identifier['season'] = scene_map.get('season')
|
||||
identifier['episode'] = scene_map.get('episode')
|
||||
else:
|
||||
# Fallback to normal season/episode numbers
|
||||
identifier['season'] = library.get('season_number')
|
||||
identifier['episode'] = library.get('episode_number')
|
||||
|
||||
|
||||
# Cast identifiers to integers
|
||||
# TODO this will need changing to support identifiers with trailing 'a', 'b' characters
|
||||
identifier['season'] = tryInt(identifier['season'], None)
|
||||
identifier['episode'] = tryInt(identifier['episode'], None)
|
||||
|
||||
return identifier
|
||||
|
||||
def add(self, attrs = {}, update_after = True):
|
||||
type = attrs.get('type', 'episode')
|
||||
primary_provider = attrs.get('primary_provider', 'thetvdb')
|
||||
|
||||
db = get_session()
|
||||
parent_identifier = attrs.get('parent_identifier', None)
|
||||
|
||||
parent = None
|
||||
if parent_identifier:
|
||||
parent = db.query(SeasonLibrary).filter_by(primary_provider = primary_provider, identifier = attrs.get('parent_identifier')).first()
|
||||
|
||||
l = db.query(EpisodeLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
|
||||
if not l:
|
||||
status = fireEvent('status.get', 'needs_update', single = True)
|
||||
l = EpisodeLibrary(
|
||||
type = type,
|
||||
primary_provider = primary_provider,
|
||||
year = attrs.get('year'),
|
||||
identifier = attrs.get('identifier'),
|
||||
plot = toUnicode(attrs.get('plot')),
|
||||
tagline = toUnicode(attrs.get('tagline')),
|
||||
status_id = status.get('id'),
|
||||
info = {},
|
||||
parent = parent,
|
||||
season_number = tryInt(attrs.get('seasonnumber', None)),
|
||||
episode_number = tryInt(attrs.get('episodenumber', None)),
|
||||
absolute_number = tryInt(attrs.get('absolute_number', None))
|
||||
)
|
||||
|
||||
title = LibraryTitle(
|
||||
title = toUnicode(attrs.get('title')),
|
||||
simple_title = self.simplifyTitle(attrs.get('title')),
|
||||
)
|
||||
|
||||
l.titles.append(title)
|
||||
|
||||
db.add(l)
|
||||
db.commit()
|
||||
|
||||
# Update library info
|
||||
if update_after is not False:
|
||||
handle = fireEventAsync if update_after is 'async' else fireEvent
|
||||
handle('library.update.episode', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
|
||||
|
||||
library_dict = l.to_dict(self.default_dict)
|
||||
|
||||
db.expire_all()
|
||||
return library_dict
|
||||
|
||||
def update(self, identifier, default_title = '', force = False):
|
||||
|
||||
if self.shuttingDown():
|
||||
return
|
||||
|
||||
db = get_session()
|
||||
library = db.query(EpisodeLibrary).filter_by(identifier = identifier).first()
|
||||
done_status = fireEvent('status.get', 'done', single = True)
|
||||
|
||||
if library:
|
||||
library_dict = library.to_dict(self.default_dict)
|
||||
|
||||
do_update = True
|
||||
|
||||
parent_identifier = None
|
||||
if library.parent is not None:
|
||||
parent_identifier = library.parent.identifier
|
||||
|
||||
if library.status_id == done_status.get('id') and not force:
|
||||
do_update = False
|
||||
|
||||
episode_params = {'season_identifier': parent_identifier,
|
||||
'episode_identifier': identifier,
|
||||
'episode': library.episode_number,
|
||||
'absolute': library.absolute_number,}
|
||||
info = fireEvent('episode.info', merge = True, params = episode_params)
|
||||
|
||||
# Don't need those here
|
||||
try: del info['in_wanted']
|
||||
except: pass
|
||||
try: del info['in_library']
|
||||
except: pass
|
||||
|
||||
if not info or len(info) == 0:
|
||||
log.error('Could not update, no movie info to work with: %s', identifier)
|
||||
return False
|
||||
|
||||
# Main info
|
||||
if do_update:
|
||||
library.plot = toUnicode(info.get('plot', ''))
|
||||
library.tagline = toUnicode(info.get('tagline', ''))
|
||||
library.year = info.get('year', 0)
|
||||
library.status_id = done_status.get('id')
|
||||
library.season_number = tryInt(info.get('seasonnumber', None))
|
||||
library.episode_number = tryInt(info.get('episodenumber', None))
|
||||
library.absolute_number = tryInt(info.get('absolute_number', None))
|
||||
try:
|
||||
library.last_updated = int(info.get('lastupdated'))
|
||||
except:
|
||||
library.last_updated = int(time.time())
|
||||
library.info.update(info)
|
||||
db.commit()
|
||||
|
||||
# Titles
|
||||
[db.delete(title) for title in library.titles]
|
||||
db.commit()
|
||||
|
||||
titles = info.get('titles', [])
|
||||
log.debug('Adding titles: %s', titles)
|
||||
counter = 0
|
||||
for title in titles:
|
||||
if not title:
|
||||
continue
|
||||
title = toUnicode(title)
|
||||
t = LibraryTitle(
|
||||
title = title,
|
||||
simple_title = self.simplifyTitle(title),
|
||||
default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
|
||||
)
|
||||
library.titles.append(t)
|
||||
counter += 1
|
||||
|
||||
db.commit()
|
||||
|
||||
# Files
|
||||
images = info.get('images', [])
|
||||
for image_type in ['poster']:
|
||||
for image in images.get(image_type, []):
|
||||
if not isinstance(image, (str, unicode)):
|
||||
continue
|
||||
|
||||
file_path = fireEvent('file.download', url = image, single = True)
|
||||
if file_path:
|
||||
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
|
||||
try:
|
||||
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
|
||||
library.files.append(file_obj)
|
||||
db.commit()
|
||||
|
||||
break
|
||||
except:
|
||||
log.debug('Failed to attach to library: %s', traceback.format_exc())
|
||||
|
||||
library_dict = library.to_dict(self.default_dict)
|
||||
db.expire_all()
|
||||
return library_dict
|
||||
|
||||
def updateReleaseDate(self, identifier):
|
||||
'''XXX: Not sure what this is for yet in relation to an episode'''
|
||||
pass
|
||||
#db = get_session()
|
||||
#library = db.query(EpisodeLibrary).filter_by(identifier = identifier).first()
|
||||
|
||||
#if not library.info:
|
||||
#library_dict = self.update(identifier, force = True)
|
||||
#dates = library_dict.get('info', {}).get('release_date')
|
||||
#else:
|
||||
#dates = library.info.get('release_date')
|
||||
|
||||
#if dates and dates.get('expires', 0) < time.time() or not dates:
|
||||
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
|
||||
#library.info.update({'release_date': dates })
|
||||
#db.commit()
|
||||
|
||||
#db.expire_all()
|
||||
#return dates
|
||||
|
||||
|
||||
#TODO: Add to base class
|
||||
def simplifyTitle(self, title):
|
||||
|
||||
title = toUnicode(title)
|
||||
|
||||
nr_prefix = '' if title[0] in ascii_letters else '#'
|
||||
title = simplifyString(title)
|
||||
|
||||
for prefix in ['the ']:
|
||||
if prefix == title[:len(prefix)]:
|
||||
title = title[len(prefix):]
|
||||
break
|
||||
|
||||
return nr_prefix + title
|
||||
6
couchpotato/core/media/show/library/season/__init__.py
Normal file
6
couchpotato/core/media/show/library/season/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .main import SeasonLibraryPlugin
|
||||
|
||||
def start():
|
||||
return SeasonLibraryPlugin()
|
||||
|
||||
config = []
|
||||
242
couchpotato/core/media/show/library/season/main.py
Normal file
242
couchpotato/core/media/show/library/season/main.py
Normal file
@@ -0,0 +1,242 @@
|
||||
from couchpotato import get_session
|
||||
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.settings.model import SeasonLibrary, ShowLibrary, LibraryTitle, File
|
||||
from couchpotato.core.media._base.library.base import LibraryBase
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from string import ascii_letters
|
||||
import time
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class SeasonLibraryPlugin(LibraryBase):
|
||||
|
||||
default_dict = {'titles': {}, 'files':{}}
|
||||
|
||||
def __init__(self):
|
||||
addEvent('library.query', self.query)
|
||||
addEvent('library.identifier', self.identifier)
|
||||
addEvent('library.add.season', self.add)
|
||||
addEvent('library.update.season', self.update)
|
||||
addEvent('library.update.season_release_date', self.updateReleaseDate)
|
||||
|
||||
def query(self, library, first = True, condense = True, include_identifier = True, **kwargs):
|
||||
if library is list or library.get('type') != 'season':
|
||||
return
|
||||
|
||||
# Get the titles of the show
|
||||
if not library.get('related_libraries', {}).get('show', []):
|
||||
log.warning('Invalid library, unable to determine title.')
|
||||
return
|
||||
|
||||
titles = fireEvent(
|
||||
'library.query',
|
||||
library['related_libraries']['show'][0],
|
||||
first=False,
|
||||
condense=condense,
|
||||
|
||||
single=True
|
||||
)
|
||||
|
||||
# Add season map_names if they exist
|
||||
if 'map_names' in library['info']:
|
||||
season_names = library['info']['map_names'].get(str(library['season_number']), {})
|
||||
|
||||
# Add titles from all locations
|
||||
# TODO only add name maps from a specific location
|
||||
for location, names in season_names.items():
|
||||
titles += [name for name in names if name and name not in titles]
|
||||
|
||||
|
||||
identifier = fireEvent('library.identifier', library, single = True)
|
||||
|
||||
# Add season identifier to titles
|
||||
if include_identifier and identifier.get('season') is not None:
|
||||
titles = [title + (' S%02d' % identifier['season']) for title in titles]
|
||||
|
||||
|
||||
if first:
|
||||
return titles[0] if titles else None
|
||||
|
||||
return titles
|
||||
|
||||
def identifier(self, library):
|
||||
if library.get('type') != 'season':
|
||||
return
|
||||
|
||||
return {
|
||||
'season': tryInt(library['season_number'], None)
|
||||
}
|
||||
|
||||
def add(self, attrs = {}, update_after = True):
|
||||
type = attrs.get('type', 'season')
|
||||
primary_provider = attrs.get('primary_provider', 'thetvdb')
|
||||
|
||||
db = get_session()
|
||||
parent_identifier = attrs.get('parent_identifier', None)
|
||||
|
||||
parent = None
|
||||
if parent_identifier:
|
||||
parent = db.query(ShowLibrary).filter_by(primary_provider = primary_provider, identifier = attrs.get('parent_identifier')).first()
|
||||
|
||||
l = db.query(SeasonLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
|
||||
if not l:
|
||||
status = fireEvent('status.get', 'needs_update', single = True)
|
||||
l = SeasonLibrary(
|
||||
type = type,
|
||||
primary_provider = primary_provider,
|
||||
year = attrs.get('year'),
|
||||
identifier = attrs.get('identifier'),
|
||||
plot = toUnicode(attrs.get('plot')),
|
||||
tagline = toUnicode(attrs.get('tagline')),
|
||||
status_id = status.get('id'),
|
||||
info = {},
|
||||
parent = parent,
|
||||
)
|
||||
|
||||
title = LibraryTitle(
|
||||
title = toUnicode(attrs.get('title')),
|
||||
simple_title = self.simplifyTitle(attrs.get('title')),
|
||||
)
|
||||
|
||||
l.titles.append(title)
|
||||
|
||||
db.add(l)
|
||||
db.commit()
|
||||
|
||||
# Update library info
|
||||
if update_after is not False:
|
||||
handle = fireEventAsync if update_after is 'async' else fireEvent
|
||||
handle('library.update.season', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
|
||||
|
||||
library_dict = l.to_dict(self.default_dict)
|
||||
db.expire_all()
|
||||
return library_dict
|
||||
|
||||
def update(self, identifier, default_title = '', force = False):
|
||||
|
||||
if self.shuttingDown():
|
||||
return
|
||||
|
||||
db = get_session()
|
||||
library = db.query(SeasonLibrary).filter_by(identifier = identifier).first()
|
||||
done_status = fireEvent('status.get', 'done', single = True)
|
||||
|
||||
if library:
|
||||
library_dict = library.to_dict(self.default_dict)
|
||||
|
||||
do_update = True
|
||||
|
||||
parent_identifier = None
|
||||
if library.parent is not None:
|
||||
parent_identifier = library.parent.identifier
|
||||
|
||||
if library.status_id == done_status.get('id') and not force:
|
||||
do_update = False
|
||||
|
||||
season_params = {'season_identifier': identifier}
|
||||
info = fireEvent('season.info', merge = True, identifier = parent_identifier, params = season_params)
|
||||
|
||||
# Don't need those here
|
||||
try: del info['in_wanted']
|
||||
except: pass
|
||||
try: del info['in_library']
|
||||
except: pass
|
||||
|
||||
if not info or len(info) == 0:
|
||||
log.error('Could not update, no movie info to work with: %s', identifier)
|
||||
return False
|
||||
|
||||
# Main info
|
||||
if do_update:
|
||||
library.plot = toUnicode(info.get('plot', ''))
|
||||
library.tagline = toUnicode(info.get('tagline', ''))
|
||||
library.year = info.get('year', 0)
|
||||
library.status_id = done_status.get('id')
|
||||
library.season_number = tryInt(info.get('seasonnumber', None))
|
||||
library.info.update(info)
|
||||
db.commit()
|
||||
|
||||
# Titles
|
||||
[db.delete(title) for title in library.titles]
|
||||
db.commit()
|
||||
|
||||
titles = info.get('titles', [])
|
||||
log.debug('Adding titles: %s', titles)
|
||||
counter = 0
|
||||
for title in titles:
|
||||
if not title:
|
||||
continue
|
||||
title = toUnicode(title)
|
||||
t = LibraryTitle(
|
||||
title = title,
|
||||
simple_title = self.simplifyTitle(title),
|
||||
# XXX: default was None; so added a quick hack since we don't really need titiles for seasons anyway
|
||||
#default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
|
||||
default = True,
|
||||
)
|
||||
library.titles.append(t)
|
||||
counter += 1
|
||||
|
||||
db.commit()
|
||||
|
||||
# Files
|
||||
images = info.get('images', [])
|
||||
for image_type in ['poster']:
|
||||
for image in images.get(image_type, []):
|
||||
if not isinstance(image, (str, unicode)):
|
||||
continue
|
||||
|
||||
file_path = fireEvent('file.download', url = image, single = True)
|
||||
if file_path:
|
||||
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
|
||||
try:
|
||||
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
|
||||
library.files.append(file_obj)
|
||||
db.commit()
|
||||
break
|
||||
except:
|
||||
log.debug('Failed to attach to library: %s', traceback.format_exc())
|
||||
|
||||
library_dict = library.to_dict(self.default_dict)
|
||||
db.expire_all()
|
||||
return library_dict
|
||||
|
||||
def updateReleaseDate(self, identifier):
|
||||
'''XXX: Not sure what this is for yet in relation to a tvshow'''
|
||||
pass
|
||||
#db = get_session()
|
||||
#library = db.query(SeasonLibrary).filter_by(identifier = identifier).first()
|
||||
|
||||
#if not library.info:
|
||||
#library_dict = self.update(identifier, force = True)
|
||||
#dates = library_dict.get('info', {}).get('release_date')
|
||||
#else:
|
||||
#dates = library.info.get('release_date')
|
||||
|
||||
#if dates and dates.get('expires', 0) < time.time() or not dates:
|
||||
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
|
||||
#library.info.update({'release_date': dates })
|
||||
#db.commit()
|
||||
|
||||
#db.expire_all()
|
||||
#return dates
|
||||
|
||||
|
||||
#TODO: Add to base class
|
||||
def simplifyTitle(self, title):
|
||||
|
||||
title = toUnicode(title)
|
||||
|
||||
nr_prefix = '' if title[0] in ascii_letters else '#'
|
||||
title = simplifyString(title)
|
||||
|
||||
for prefix in ['the ']:
|
||||
if prefix == title[:len(prefix)]:
|
||||
title = title[len(prefix):]
|
||||
break
|
||||
|
||||
return nr_prefix + title
|
||||
6
couchpotato/core/media/show/library/show/__init__.py
Normal file
6
couchpotato/core/media/show/library/show/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .main import ShowLibraryPlugin
|
||||
|
||||
def start():
|
||||
return ShowLibraryPlugin()
|
||||
|
||||
config = []
|
||||
229
couchpotato/core/media/show/library/show/main.py
Normal file
229
couchpotato/core/media/show/library/show/main.py
Normal file
@@ -0,0 +1,229 @@
|
||||
from couchpotato import get_session
|
||||
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.settings.model import ShowLibrary, LibraryTitle, File
|
||||
from couchpotato.core.media._base.library.base import LibraryBase
|
||||
from qcond.helpers import simplify
|
||||
from qcond import QueryCondenser
|
||||
from string import ascii_letters
|
||||
import time
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class ShowLibraryPlugin(LibraryBase):
|
||||
|
||||
default_dict = {'titles': {}, 'files':{}}
|
||||
|
||||
def __init__(self):
|
||||
self.query_condenser = QueryCondenser()
|
||||
|
||||
addEvent('library.query', self.query)
|
||||
addEvent('library.add.show', self.add)
|
||||
addEvent('library.update.show', self.update)
|
||||
addEvent('library.update.show_release_date', self.updateReleaseDate)
|
||||
|
||||
def query(self, library, first = True, condense = True, **kwargs):
|
||||
if library is list or library.get('type') != 'show':
|
||||
return
|
||||
|
||||
titles = [title['title'] for title in library['titles']]
|
||||
|
||||
if condense:
|
||||
# Use QueryCondenser to build a list of optimal search titles
|
||||
condensed_titles = self.query_condenser.distinct(titles)
|
||||
|
||||
if condensed_titles:
|
||||
# Use condensed titles if we got a valid result
|
||||
titles = condensed_titles
|
||||
else:
|
||||
# Fallback to simplifying titles
|
||||
titles = [simplify(title) for title in titles]
|
||||
|
||||
if first:
|
||||
return titles[0] if titles else None
|
||||
|
||||
return titles
|
||||
|
||||
def add(self, attrs = {}, update_after = True):
|
||||
type = attrs.get('type', 'show')
|
||||
primary_provider = attrs.get('primary_provider', 'thetvdb')
|
||||
|
||||
db = get_session()
|
||||
|
||||
l = db.query(ShowLibrary).filter_by(type = type, identifier = attrs.get('identifier')).first()
|
||||
if not l:
|
||||
status = fireEvent('status.get', 'needs_update', single = True)
|
||||
l = ShowLibrary(
|
||||
type = type,
|
||||
primary_provider = primary_provider,
|
||||
year = attrs.get('year'),
|
||||
identifier = attrs.get('identifier'),
|
||||
plot = toUnicode(attrs.get('plot')),
|
||||
tagline = toUnicode(attrs.get('tagline')),
|
||||
status_id = status.get('id'),
|
||||
info = {},
|
||||
parent = None,
|
||||
)
|
||||
|
||||
title = LibraryTitle(
|
||||
title = toUnicode(attrs.get('title')),
|
||||
simple_title = self.simplifyTitle(attrs.get('title')),
|
||||
)
|
||||
|
||||
l.titles.append(title)
|
||||
|
||||
db.add(l)
|
||||
db.commit()
|
||||
|
||||
# Update library info
|
||||
if update_after is not False:
|
||||
handle = fireEventAsync if update_after is 'async' else fireEvent
|
||||
handle('library.update.show', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
|
||||
|
||||
library_dict = l.to_dict(self.default_dict)
|
||||
db.expire_all()
|
||||
return library_dict
|
||||
|
||||
def update(self, identifier, default_title = '', force = False):
|
||||
|
||||
if self.shuttingDown():
|
||||
return
|
||||
|
||||
db = get_session()
|
||||
library = db.query(ShowLibrary).filter_by(identifier = identifier).first()
|
||||
done_status = fireEvent('status.get', 'done', single = True)
|
||||
|
||||
if library:
|
||||
library_dict = library.to_dict(self.default_dict)
|
||||
|
||||
do_update = True
|
||||
|
||||
info = fireEvent('show.info', merge = True, identifier = identifier)
|
||||
|
||||
# Don't need those here
|
||||
try: del info['in_wanted']
|
||||
except: pass
|
||||
try: del info['in_library']
|
||||
except: pass
|
||||
|
||||
if not info or len(info) == 0:
|
||||
log.error('Could not update, no show info to work with: %s', identifier)
|
||||
return False
|
||||
|
||||
# Main info
|
||||
if do_update:
|
||||
library.plot = toUnicode(info.get('plot', ''))
|
||||
library.tagline = toUnicode(info.get('tagline', ''))
|
||||
library.year = info.get('year', 0)
|
||||
library.status_id = done_status.get('id')
|
||||
library.show_status = toUnicode(info.get('status', '').lower())
|
||||
library.airs_time = info.get('airs_time', None)
|
||||
|
||||
# Bits
|
||||
days_of_week_map = {
|
||||
u'Monday': 1,
|
||||
u'Tuesday': 2,
|
||||
u'Wednesday': 4,
|
||||
u'Thursday': 8,
|
||||
u'Friday': 16,
|
||||
u'Saturday': 32,
|
||||
u'Sunday': 64,
|
||||
u'Daily': 127,
|
||||
}
|
||||
try:
|
||||
library.airs_dayofweek = days_of_week_map.get(info.get('airs_dayofweek'))
|
||||
except:
|
||||
library.airs_dayofweek = 0
|
||||
|
||||
try:
|
||||
library.last_updated = int(info.get('lastupdated'))
|
||||
except:
|
||||
library.last_updated = int(time.time())
|
||||
|
||||
library.info.update(info)
|
||||
|
||||
db.commit()
|
||||
|
||||
# Titles
|
||||
[db.delete(title) for title in library.titles]
|
||||
db.commit()
|
||||
|
||||
titles = info.get('titles', [])
|
||||
log.debug('Adding titles: %s', titles)
|
||||
counter = 0
|
||||
for title in titles:
|
||||
if not title:
|
||||
continue
|
||||
title = toUnicode(title)
|
||||
t = LibraryTitle(
|
||||
title = title,
|
||||
simple_title = self.simplifyTitle(title),
|
||||
default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title)
|
||||
)
|
||||
library.titles.append(t)
|
||||
counter += 1
|
||||
|
||||
db.commit()
|
||||
|
||||
# Files
|
||||
images = info.get('images', [])
|
||||
for image_type in ['poster']:
|
||||
for image in images.get(image_type, []):
|
||||
if not isinstance(image, (str, unicode)):
|
||||
continue
|
||||
|
||||
file_path = fireEvent('file.download', url = image, single = True)
|
||||
if file_path:
|
||||
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
|
||||
try:
|
||||
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
|
||||
library.files.append(file_obj)
|
||||
db.commit()
|
||||
|
||||
break
|
||||
except:
|
||||
log.debug('Failed to attach to library: %s', traceback.format_exc())
|
||||
|
||||
library_dict = library.to_dict(self.default_dict)
|
||||
|
||||
db.expire_all()
|
||||
return library_dict
|
||||
|
||||
def updateReleaseDate(self, identifier):
|
||||
'''XXX: Not sure what this is for yet in relation to a show'''
|
||||
pass
|
||||
#db = get_session()
|
||||
#library = db.query(ShowLibrary).filter_by(identifier = identifier).first()
|
||||
|
||||
#if not library.info:
|
||||
#library_dict = self.update(identifier, force = True)
|
||||
#dates = library_dict.get('info', {}).get('release_date')
|
||||
#else:
|
||||
#dates = library.info.get('release_date')
|
||||
|
||||
#if dates and dates.get('expires', 0) < time.time() or not dates:
|
||||
#dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
|
||||
#library.info.update({'release_date': dates })
|
||||
#db.commit()
|
||||
|
||||
#db.expire_all()
|
||||
#return dates
|
||||
|
||||
|
||||
#TODO: Add to base class
|
||||
def simplifyTitle(self, title):
|
||||
|
||||
title = toUnicode(title)
|
||||
|
||||
nr_prefix = '' if title[0] in ascii_letters else '#'
|
||||
title = simplifyString(title)
|
||||
|
||||
for prefix in ['the ']:
|
||||
if prefix == title[:len(prefix)]:
|
||||
title = title[len(prefix):]
|
||||
break
|
||||
|
||||
return nr_prefix + title
|
||||
6
couchpotato/core/media/show/matcher/__init__.py
Normal file
6
couchpotato/core/media/show/matcher/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .main import ShowMatcher
|
||||
|
||||
def start():
|
||||
return ShowMatcher()
|
||||
|
||||
config = []
|
||||
127
couchpotato/core/media/show/matcher/main.py
Normal file
127
couchpotato/core/media/show/matcher/main.py
Normal file
@@ -0,0 +1,127 @@
|
||||
from couchpotato import CPLog
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.variable import dictIsSubset, tryInt, toIterable
|
||||
from couchpotato.core.media._base.matcher.base import MatcherBase
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class ShowMatcher(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Season, Episode]
|
||||
|
||||
|
||||
class Base(MatcherBase):
|
||||
# TODO come back to this later, think this could be handled better, this is starting to get out of hand....
|
||||
quality_map = {
|
||||
'bluray_1080p': {'resolution': ['1080p'], 'source': ['bluray']},
|
||||
'bluray_720p': {'resolution': ['720p'], 'source': ['bluray']},
|
||||
|
||||
'bdrip_1080p': {'resolution': ['1080p'], 'source': ['BDRip']},
|
||||
'bdrip_720p': {'resolution': ['720p'], 'source': ['BDRip']},
|
||||
|
||||
'brrip_1080p': {'resolution': ['1080p'], 'source': ['BRRip']},
|
||||
'brrip_720p': {'resolution': ['720p'], 'source': ['BRRip']},
|
||||
|
||||
'webdl_1080p': {'resolution': ['1080p'], 'source': ['webdl', ['web', 'dl']]},
|
||||
'webdl_720p': {'resolution': ['720p'], 'source': ['webdl', ['web', 'dl']]},
|
||||
'webdl_480p': {'resolution': ['480p'], 'source': ['webdl', ['web', 'dl']]},
|
||||
|
||||
'hdtv_720p': {'resolution': ['720p'], 'source': ['hdtv']},
|
||||
'hdtv_sd': {'resolution': ['480p', None], 'source': ['hdtv']},
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super(Base, self).__init__()
|
||||
|
||||
addEvent('%s.matcher.correct_identifier' % self.type, self.correctIdentifier)
|
||||
|
||||
def correct(self, chain, release, media, quality):
|
||||
log.info("Checking if '%s' is valid", release['name'])
|
||||
log.info2('Release parsed as: %s', chain.info)
|
||||
|
||||
if not fireEvent('matcher.correct_quality', chain, quality, self.quality_map, single = True):
|
||||
log.info('Wrong: %s, quality does not match', release['name'])
|
||||
return False
|
||||
|
||||
if not fireEvent('%s.matcher.correct_identifier' % self.type, chain, media):
|
||||
log.info('Wrong: %s, identifier does not match', release['name'])
|
||||
return False
|
||||
|
||||
if not fireEvent('matcher.correct_title', chain, media):
|
||||
log.info("Wrong: '%s', undetermined naming.", (' '.join(chain.info['show_name'])))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def correctIdentifier(self, chain, media):
|
||||
raise NotImplementedError()
|
||||
|
||||
def getChainIdentifier(self, chain):
|
||||
if 'identifier' not in chain.info:
|
||||
return None
|
||||
|
||||
identifier = self.flattenInfo(chain.info['identifier'])
|
||||
|
||||
# Try cast values to integers
|
||||
for key, value in identifier.items():
|
||||
if isinstance(value, list):
|
||||
if len(value) <= 1:
|
||||
value = value[0]
|
||||
else:
|
||||
log.warning('Wrong: identifier contains multiple season or episode values, unsupported')
|
||||
return None
|
||||
|
||||
identifier[key] = tryInt(value, value)
|
||||
|
||||
return identifier
|
||||
|
||||
|
||||
class Episode(Base):
|
||||
type = 'episode'
|
||||
|
||||
def correctIdentifier(self, chain, media):
|
||||
identifier = self.getChainIdentifier(chain)
|
||||
if not identifier:
|
||||
log.info2('Wrong: release identifier is not valid (unsupported or missing identifier)')
|
||||
return False
|
||||
|
||||
# TODO - Parse episode ranges from identifier to determine if they are multi-part episodes
|
||||
if any([x in identifier for x in ['episode_from', 'episode_to']]):
|
||||
log.info2('Wrong: releases with identifier ranges are not supported yet')
|
||||
return False
|
||||
|
||||
required = fireEvent('library.identifier', media['library'], single = True)
|
||||
|
||||
# TODO - Support air by date episodes
|
||||
# TODO - Support episode parts
|
||||
|
||||
if identifier != required:
|
||||
log.info2('Wrong: required identifier (%s) does not match release identifier (%s)', (required, identifier))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
class Season(Base):
|
||||
type = 'season'
|
||||
|
||||
def correctIdentifier(self, chain, media):
|
||||
identifier = self.getChainIdentifier(chain)
|
||||
if not identifier:
|
||||
log.info2('Wrong: release identifier is not valid (unsupported or missing identifier)')
|
||||
return False
|
||||
|
||||
# TODO - Parse episode ranges from identifier to determine if they are season packs
|
||||
if any([x in identifier for x in ['episode_from', 'episode_to']]):
|
||||
log.info2('Wrong: releases with identifier ranges are not supported yet')
|
||||
return False
|
||||
|
||||
required = fireEvent('library.identifier', media['library'], single = True)
|
||||
|
||||
if identifier != required:
|
||||
log.info2('Wrong: required identifier (%s) does not match release identifier (%s)', (required, identifier))
|
||||
return False
|
||||
|
||||
return True
|
||||
7
couchpotato/core/media/show/searcher/__init__.py
Normal file
7
couchpotato/core/media/show/searcher/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from .main import ShowSearcher
|
||||
import random
|
||||
|
||||
def start():
|
||||
return ShowSearcher()
|
||||
|
||||
config = []
|
||||
189
couchpotato/core/media/show/searcher/main.py
Normal file
189
couchpotato/core/media/show/searcher/main.py
Normal file
@@ -0,0 +1,189 @@
|
||||
from couchpotato import Env, get_session
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.variable import getTitle, toIterable
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.media._base.searcher.main import SearchSetupError
|
||||
from couchpotato.core.media.show._base import ShowBase
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.core.settings.model import Media
|
||||
from qcond import QueryCondenser
|
||||
from qcond.helpers import simplify
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class ShowSearcher(Plugin):
|
||||
|
||||
type = ['show', 'season', 'episode']
|
||||
|
||||
in_progress = False
|
||||
|
||||
def __init__(self):
|
||||
super(ShowSearcher, self).__init__()
|
||||
|
||||
self.query_condenser = QueryCondenser()
|
||||
|
||||
for type in toIterable(self.type):
|
||||
addEvent('%s.searcher.single' % type, self.single)
|
||||
|
||||
addEvent('searcher.correct_release', self.correctRelease)
|
||||
|
||||
def single(self, media, search_protocols = None, manual = False):
|
||||
show, season, episode = self.getLibraries(media['library'])
|
||||
|
||||
db = get_session()
|
||||
|
||||
if media['type'] == 'show':
|
||||
for library in season:
|
||||
# TODO ideally we shouldn't need to fetch the media for each season library here
|
||||
m = db.query(Media).filter_by(library_id = library['library_id']).first()
|
||||
|
||||
fireEvent('season.searcher.single', m.to_dict(ShowBase.search_dict))
|
||||
|
||||
return
|
||||
|
||||
# Find out search type
|
||||
try:
|
||||
if not search_protocols:
|
||||
search_protocols = fireEvent('searcher.protocols', single = True)
|
||||
except SearchSetupError:
|
||||
return
|
||||
|
||||
done_status, available_status, ignored_status, failed_status = fireEvent('status.get', ['done', 'available', 'ignored', 'failed'], single = True)
|
||||
|
||||
if not media['profile'] or media['status_id'] == done_status.get('id'):
|
||||
log.debug('Episode doesn\'t have a profile or already done, assuming in manage tab.')
|
||||
return
|
||||
|
||||
#pre_releases = fireEvent('quality.pre_releases', single = True)
|
||||
|
||||
found_releases = []
|
||||
too_early_to_search = []
|
||||
|
||||
default_title = fireEvent('library.query', media['library'], condense = False, single=True)
|
||||
if not default_title:
|
||||
log.error('No proper info found for episode, removing it from library to cause it from having more issues.')
|
||||
#fireEvent('episode.delete', episode['id'], single = True)
|
||||
return
|
||||
|
||||
if not show or not season:
|
||||
log.error('Unable to find show or season library in database, missing required data for searching')
|
||||
return
|
||||
|
||||
fireEvent('notify.frontend', type = 'show.searcher.started.%s' % media['id'], data = True, message = 'Searching for "%s"' % default_title)
|
||||
|
||||
ret = False
|
||||
has_better_quality = None
|
||||
|
||||
for quality_type in media['profile']['types']:
|
||||
# TODO check air date?
|
||||
#if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates, movie['library']['year']):
|
||||
# too_early_to_search.append(quality_type['quality']['identifier'])
|
||||
# continue
|
||||
|
||||
has_better_quality = 0
|
||||
|
||||
# See if better quality is available
|
||||
for release in media['releases']:
|
||||
if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id'), failed_status.get('id')]:
|
||||
has_better_quality += 1
|
||||
|
||||
# Don't search for quality lower then already available.
|
||||
if has_better_quality is 0:
|
||||
|
||||
log.info('Search for %s S%02d%s in %s', (
|
||||
getTitle(show),
|
||||
season['season_number'],
|
||||
"E%02d" % episode['episode_number'] if episode and len(episode) == 1 else "",
|
||||
quality_type['quality']['label'])
|
||||
)
|
||||
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
|
||||
|
||||
results = fireEvent('searcher.search', search_protocols, media, quality, single = True)
|
||||
if len(results) == 0:
|
||||
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
|
||||
|
||||
# Check if movie isn't deleted while searching
|
||||
if not db.query(Media).filter_by(id = media.get('id')).first():
|
||||
break
|
||||
|
||||
# Add them to this movie releases list
|
||||
found_releases += fireEvent('release.create_from_search', results, media, quality_type, single = True)
|
||||
|
||||
# Try find a valid result and download it
|
||||
if fireEvent('release.try_download_result', results, media, quality_type, manual, single = True):
|
||||
ret = True
|
||||
|
||||
# Remove releases that aren't found anymore
|
||||
for release in media.get('releases', []):
|
||||
if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases:
|
||||
fireEvent('release.delete', release.get('id'), single = True)
|
||||
else:
|
||||
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
|
||||
fireEvent('media.restatus', media['id'])
|
||||
break
|
||||
|
||||
# Break if CP wants to shut down
|
||||
if self.shuttingDown() or ret:
|
||||
break
|
||||
|
||||
if len(too_early_to_search) > 0:
|
||||
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
|
||||
elif media['type'] == 'season' and not ret and has_better_quality is 0:
|
||||
# If nothing was found, start searching for episodes individually
|
||||
log.info('No season pack found, starting individual episode search')
|
||||
|
||||
for library in episode:
|
||||
# TODO ideally we shouldn't need to fetch the media for each episode library here
|
||||
m = db.query(Media).filter_by(library_id = library['library_id']).first()
|
||||
|
||||
fireEvent('episode.searcher.single', m.to_dict(ShowBase.search_dict))
|
||||
|
||||
|
||||
fireEvent('notify.frontend', type = 'show.searcher.ended.%s' % media['id'], data = True)
|
||||
|
||||
return ret
|
||||
|
||||
def correctRelease(self, release = None, media = None, quality = None, **kwargs):
|
||||
|
||||
if media.get('type') not in ['season', 'episode']: return
|
||||
|
||||
retention = Env.setting('retention', section = 'nzb')
|
||||
|
||||
if release.get('seeders') is None and 0 < retention < release.get('age', 0):
|
||||
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (release['age'], retention, release['name']))
|
||||
return False
|
||||
|
||||
# Check for required and ignored words
|
||||
if not fireEvent('searcher.correct_words', release['name'], media, single = True):
|
||||
return False
|
||||
|
||||
# TODO Matching is quite costly, maybe we should be caching release matches somehow? (also look at caper optimizations)
|
||||
match = fireEvent('matcher.match', release, media, quality, single = True)
|
||||
if match:
|
||||
return match.weight
|
||||
|
||||
return False
|
||||
|
||||
def getLibraries(self, library):
|
||||
if 'related_libraries' not in library:
|
||||
log.warning("'related_libraries' missing from media library, unable to continue searching")
|
||||
return None, None, None
|
||||
|
||||
libraries = library['related_libraries']
|
||||
|
||||
# Show always collapses as there can never be any multiples
|
||||
show = libraries.get('show', [])
|
||||
show = show[0] if len(show) else None
|
||||
|
||||
# Season collapses if the subject is a season or episode
|
||||
season = libraries.get('season', [])
|
||||
if library['type'] in ['season', 'episode']:
|
||||
season = season[0] if len(season) else None
|
||||
|
||||
# Episode collapses if the subject is a episode
|
||||
episode = libraries.get('episode', [])
|
||||
if library['type'] == 'episode':
|
||||
episode = episode[0] if len(episode) else None
|
||||
|
||||
return show, season, episode
|
||||
@@ -26,7 +26,25 @@ class QualityPlugin(Plugin):
|
||||
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':[]},
|
||||
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':[]},
|
||||
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':[]},
|
||||
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':[]}
|
||||
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':[]},
|
||||
|
||||
# TODO come back to this later, think this could be handled better, this is starting to get out of hand....
|
||||
# BluRay
|
||||
{'identifier': 'bluray_1080p', 'hd': True, 'size': (800, 5000), 'label': 'BluRay - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
{'identifier': 'bluray_720p', 'hd': True, 'size': (800, 5000), 'label': 'BluRay - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
# BDRip
|
||||
{'identifier': 'bdrip_1080p', 'hd': True, 'size': (800, 5000), 'label': 'BDRip - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
{'identifier': 'bdrip_720p', 'hd': True, 'size': (800, 5000), 'label': 'BDRip - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
# BRRip
|
||||
{'identifier': 'brrip_1080p', 'hd': True, 'size': (800, 5000), 'label': 'BRRip - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
{'identifier': 'brrip_720p', 'hd': True, 'size': (800, 5000), 'label': 'BRRip - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
# WEB-DL
|
||||
{'identifier': 'webdl_1080p', 'hd': True, 'size': (800, 5000), 'label': 'WEB-DL - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
{'identifier': 'webdl_720p', 'hd': True, 'size': (800, 5000), 'label': 'WEB-DL - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
{'identifier': 'webdl_480p', 'hd': True, 'size': (100, 5000), 'label': 'WEB-DL - 480p', 'width': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
# HDTV
|
||||
{'identifier': 'hdtv_720p', 'hd': True, 'size': (800, 5000), 'label': 'HDTV - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
|
||||
{'identifier': 'hdtv_sd', 'hd': False, 'size': (100, 1000), 'label': 'HDTV - SD', 'width': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'mp4', 'avi']},
|
||||
]
|
||||
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ class Release(Plugin):
|
||||
if not rel:
|
||||
rel = Relea(
|
||||
identifier = identifier,
|
||||
movie = media,
|
||||
media = media,
|
||||
quality_id = group['meta_data']['quality'].get('id'),
|
||||
status_id = done_status.get('id')
|
||||
)
|
||||
@@ -231,6 +231,12 @@ class Release(Plugin):
|
||||
item['protocol'] = item['type']
|
||||
item['type'] = 'movie'
|
||||
|
||||
success = self.download(data = item, media = rel.media.to_dict({
|
||||
'profile': {'types': {'quality': {}}},
|
||||
'releases': {'status': {}, 'quality': {}},
|
||||
'library': {'titles': {}, 'files':{}},
|
||||
'files': {}
|
||||
}), manual = True)
|
||||
if item.get('protocol') != 'torrent_magnet':
|
||||
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
|
||||
|
||||
@@ -376,8 +382,7 @@ class Release(Plugin):
|
||||
if not rls:
|
||||
rls = Relea(
|
||||
identifier = rel_identifier,
|
||||
movie_id = media.get('id'),
|
||||
#media_id = media.get('id'),
|
||||
media_id = media.get('id'),
|
||||
quality_id = quality_type.get('quality_id'),
|
||||
status_id = available_status.get('id')
|
||||
)
|
||||
@@ -414,7 +419,7 @@ class Release(Plugin):
|
||||
releases_raw = db.query(Relea) \
|
||||
.options(joinedload_all('info')) \
|
||||
.options(joinedload_all('files')) \
|
||||
.filter(Relea.movie_id == id) \
|
||||
.filter(Relea.media_id == id) \
|
||||
.all()
|
||||
|
||||
releases = [r.to_dict({'info':{}, 'files':{}}) for r in releases_raw]
|
||||
|
||||
@@ -240,7 +240,7 @@ class Renamer(Plugin):
|
||||
# Overwrite destination when set in category
|
||||
destination = to_folder
|
||||
category_label = ''
|
||||
for movie in library_ent.movies:
|
||||
for movie in library_ent.media:
|
||||
|
||||
if movie.category and movie.category.label:
|
||||
category_label = movie.category.label
|
||||
@@ -414,13 +414,13 @@ class Renamer(Plugin):
|
||||
remove_leftovers = True
|
||||
|
||||
# Add it to the wanted list before we continue
|
||||
if len(library_ent.movies) == 0:
|
||||
if len(library_ent.media) == 0:
|
||||
profile = db.query(Profile).filter_by(core = True, label = group['meta_data']['quality']['label']).first()
|
||||
fireEvent('movie.add', params = {'identifier': group['library']['identifier'], 'profile_id': profile.id}, search_after = False)
|
||||
db.expire_all()
|
||||
library_ent = db.query(Library).filter_by(identifier = group['library']['identifier']).first()
|
||||
|
||||
for movie in library_ent.movies:
|
||||
for movie in library_ent.media:
|
||||
|
||||
# Mark movie "done" once it's found the quality with the finish check
|
||||
try:
|
||||
@@ -867,7 +867,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
|
||||
try:
|
||||
for rel in rels:
|
||||
rel_dict = rel.to_dict({'info': {}})
|
||||
movie_dict = fireEvent('media.get', media_id = rel.movie_id, single = True)
|
||||
movie_dict = fireEvent('media.get', media_id = rel.media_id, single = True)
|
||||
|
||||
if not isinstance(rel_dict['info'], (dict)):
|
||||
log.error('Faulty release found without any info, ignoring.')
|
||||
@@ -950,7 +950,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
|
||||
fireEvent('download.remove_failed', release_download, single = True)
|
||||
|
||||
if self.conf('next_on_failed'):
|
||||
fireEvent('movie.searcher.try_next_release', media_id = rel.movie_id)
|
||||
fireEvent('movie.searcher.try_next_release', media_id = rel.media_id)
|
||||
|
||||
elif release_download['status'] == 'completed':
|
||||
log.info('Download of %s completed!', release_download['name'])
|
||||
|
||||
@@ -35,8 +35,8 @@ class Score(Plugin):
|
||||
# Torrents only
|
||||
if nzb.get('seeders'):
|
||||
try:
|
||||
score += nzb.get('seeders') * 100 / 15
|
||||
score += nzb.get('leechers') * 100 / 30
|
||||
score += nzb.get('seeders') / 5
|
||||
score += nzb.get('leechers') / 10
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ class Subtitle(Plugin):
|
||||
library = db.query(Library).all()
|
||||
done_status = fireEvent('status.get', 'done', single = True)
|
||||
|
||||
for movie in library.movies:
|
||||
for movie in library.media:
|
||||
|
||||
for release in movie.releases:
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
|
||||
possibleTitles, getTitle
|
||||
possibleTitles, toIterable
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.environment import Env
|
||||
@@ -114,7 +114,9 @@ class YarrProvider(Provider):
|
||||
def __init__(self):
|
||||
addEvent('provider.enabled_protocols', self.getEnabledProtocol)
|
||||
addEvent('provider.belongs_to', self.belongsTo)
|
||||
addEvent('provider.search.%s.%s' % (self.protocol, self.type), self.search)
|
||||
|
||||
for type in toIterable(self.type):
|
||||
addEvent('provider.search.%s.%s' % (self.protocol, type), self.search)
|
||||
|
||||
def getEnabledProtocol(self):
|
||||
if self.isEnabled():
|
||||
@@ -178,7 +180,7 @@ class YarrProvider(Provider):
|
||||
|
||||
return 'try_next'
|
||||
|
||||
def search(self, movie, quality):
|
||||
def search(self, media, quality):
|
||||
|
||||
if self.isDisabled():
|
||||
return []
|
||||
@@ -190,15 +192,17 @@ class YarrProvider(Provider):
|
||||
|
||||
# Create result container
|
||||
imdb_results = hasattr(self, '_search')
|
||||
results = ResultList(self, movie, quality, imdb_results = imdb_results)
|
||||
results = ResultList(self, media, quality, imdb_results = imdb_results)
|
||||
|
||||
# Do search based on imdb id
|
||||
if imdb_results:
|
||||
self._search(movie, quality, results)
|
||||
self._search(media, quality, results)
|
||||
# Search possible titles
|
||||
else:
|
||||
for title in possibleTitles(getTitle(movie['library'])):
|
||||
self._searchOnTitle(title, movie, quality, results)
|
||||
media_title = fireEvent('library.query', media['library'], single = True)
|
||||
|
||||
for title in possibleTitles(media_title):
|
||||
self._searchOnTitle(title, media, quality, results)
|
||||
|
||||
return results
|
||||
|
||||
@@ -241,8 +245,7 @@ class YarrProvider(Provider):
|
||||
|
||||
def getCatId(self, identifier):
|
||||
|
||||
for cats in self.cat_ids:
|
||||
ids, qualities = cats
|
||||
for ids, qualities in self.cat_ids:
|
||||
if identifier in qualities:
|
||||
return ids
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .main import MovieResultModifier
|
||||
from .main import InfoResultModifier
|
||||
|
||||
def start():
|
||||
|
||||
return MovieResultModifier()
|
||||
return InfoResultModifier()
|
||||
|
||||
config = []
|
||||
|
||||
@@ -3,6 +3,7 @@ from couchpotato.core.event import addEvent, fireEvent
|
||||
from couchpotato.core.helpers.variable import mergeDicts, randomString
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.settings.model import Library
|
||||
import copy
|
||||
import traceback
|
||||
@@ -10,7 +11,17 @@ import traceback
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class MovieResultModifier(Plugin):
|
||||
class InfoResultModifier(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Movie, Show]
|
||||
|
||||
|
||||
class ModifierBase(Plugin):
|
||||
pass
|
||||
|
||||
|
||||
class Movie(ModifierBase):
|
||||
|
||||
default_info = {
|
||||
'tmdb_id': 0,
|
||||
@@ -94,7 +105,7 @@ class MovieResultModifier(Plugin):
|
||||
# Statuses
|
||||
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
|
||||
|
||||
for movie in l.movies:
|
||||
for movie in l.media:
|
||||
if movie.status_id == active_status['id']:
|
||||
temp['in_wanted'] = fireEvent('media.get', movie.id, single = True)
|
||||
|
||||
@@ -113,3 +124,7 @@ class MovieResultModifier(Plugin):
|
||||
if result and result.get('imdb'):
|
||||
return mergeDicts(result, self.getLibraryTags(result['imdb']))
|
||||
return result
|
||||
|
||||
|
||||
class Show(ModifierBase):
|
||||
pass
|
||||
|
||||
@@ -3,3 +3,15 @@ from couchpotato.core.providers.base import Provider
|
||||
|
||||
class MovieProvider(Provider):
|
||||
type = 'movie'
|
||||
|
||||
|
||||
class ShowProvider(Provider):
|
||||
type = 'show'
|
||||
|
||||
|
||||
class SeasonProvider(Provider):
|
||||
type = 'season'
|
||||
|
||||
|
||||
class EpisodeProvider(Provider):
|
||||
type = 'episode'
|
||||
|
||||
@@ -82,6 +82,10 @@ class OMDBAPI(MovieProvider):
|
||||
if tmp_movie.get(key).lower() == 'n/a':
|
||||
del movie[key]
|
||||
|
||||
# Ignore series from omdbapi for now, should we use this in the future?
|
||||
if movie.get('Type') == "series":
|
||||
return
|
||||
|
||||
year = tryInt(movie.get('Year', ''))
|
||||
|
||||
movie_data = {
|
||||
|
||||
24
couchpotato/core/providers/info/thetvdb/__init__.py
Normal file
24
couchpotato/core/providers/info/thetvdb/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from .main import TheTVDb
|
||||
|
||||
def start():
|
||||
return TheTVDb()
|
||||
|
||||
config = [{
|
||||
'name': 'thetvdb',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'providers',
|
||||
'name': 'tmdb',
|
||||
'label': 'TheTVDB',
|
||||
'hidden': True,
|
||||
'description': 'Used for all calls to TheTVDB.',
|
||||
'options': [
|
||||
{
|
||||
'name': 'api_key',
|
||||
'default': '7966C02F860586D2',
|
||||
'label': 'Api Key',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
468
couchpotato/core/providers/info/thetvdb/main.py
Normal file
468
couchpotato/core/providers/info/thetvdb/main.py
Normal file
@@ -0,0 +1,468 @@
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.info.base import ShowProvider
|
||||
from couchpotato.environment import Env
|
||||
from tvdb_api import tvdb_api, tvdb_exceptions
|
||||
from datetime import datetime
|
||||
import traceback
|
||||
import os
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
# TODO: Consider grabbing zips to put less strain on tvdb
|
||||
# TODO: Unicode stuff (check)
|
||||
# TODO: Notigy frontend on error (tvdb down at monent)
|
||||
# TODO: Expose apikey in setting so it can be changed by user
|
||||
|
||||
class TheTVDb(ShowProvider):
|
||||
|
||||
def __init__(self):
|
||||
addEvent('info.search', self.search, priority = 1)
|
||||
addEvent('show.search', self.search, priority = 1)
|
||||
addEvent('show.info', self.getShowInfo, priority = 1)
|
||||
addEvent('season.info', self.getSeasonInfo, priority = 1)
|
||||
addEvent('episode.info', self.getEpisodeInfo, priority = 1)
|
||||
|
||||
self.tvdb_api_parms = {
|
||||
'apikey': self.conf('api_key'),
|
||||
'banners': True,
|
||||
'language': 'en',
|
||||
'cache': os.path.join(Env.get('cache_dir'), 'thetvdb_api'),
|
||||
}
|
||||
self._setup()
|
||||
|
||||
def _setup(self):
|
||||
self.tvdb = tvdb_api.Tvdb(**self.tvdb_api_parms)
|
||||
self.valid_languages = self.tvdb.config['valid_languages']
|
||||
|
||||
def search(self, q, limit = 12, language = 'en'):
|
||||
''' Find show by name
|
||||
show = { 'id': 74713,
|
||||
'language': 'en',
|
||||
'lid': 7,
|
||||
'seriesid': '74713',
|
||||
'seriesname': u'Breaking Bad',}
|
||||
'''
|
||||
|
||||
if self.isDisabled():
|
||||
return False
|
||||
|
||||
if language != self.tvdb_api_parms['language'] and language in self.valid_languages:
|
||||
self.tvdb_api_parms['language'] = language
|
||||
self._setup()
|
||||
|
||||
search_string = simplifyString(q)
|
||||
cache_key = 'thetvdb.cache.%s.%s' % (search_string, limit)
|
||||
results = self.getCache(cache_key)
|
||||
|
||||
if not results:
|
||||
log.debug('Searching for show: %s', q)
|
||||
raw = None
|
||||
try:
|
||||
raw = self.tvdb.search(search_string)
|
||||
except (tvdb_exceptions.tvdb_error, IOError), e:
|
||||
log.error('Failed searching TheTVDB for "%s": %s', (search_string, traceback.format_exc()))
|
||||
return False
|
||||
|
||||
results = []
|
||||
if raw:
|
||||
try:
|
||||
nr = 0
|
||||
for show_info in raw:
|
||||
show = self.tvdb[int(show_info['id'])]
|
||||
results.append(self._parseShow(show))
|
||||
nr += 1
|
||||
if nr == limit:
|
||||
break
|
||||
log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results])
|
||||
self.setCache(cache_key, results)
|
||||
return results
|
||||
except (tvdb_exceptions.tvdb_error, IOError), e:
|
||||
log.error('Failed parsing TheTVDB for "%s": %s', (show, traceback.format_exc()))
|
||||
return False
|
||||
return results
|
||||
|
||||
def getShow(self, identifier = None):
|
||||
show = None
|
||||
try:
|
||||
log.debug('Getting show: %s', identifier)
|
||||
show = self.tvdb[int(identifier)]
|
||||
except (tvdb_exceptions.tvdb_error, IOError), e:
|
||||
log.error('Failed to getShowInfo for show id "%s": %s', (identifier, traceback.format_exc()))
|
||||
return None
|
||||
|
||||
return show
|
||||
|
||||
def getShowInfo(self, identifier = None):
|
||||
if not identifier:
|
||||
return None
|
||||
|
||||
cache_key = 'thetvdb.cache.%s' % identifier
|
||||
log.debug('Getting showInfo: %s', cache_key)
|
||||
result = self.getCache(cache_key) or {}
|
||||
if result:
|
||||
return result
|
||||
|
||||
show = self.getShow(identifier = identifier)
|
||||
if show:
|
||||
result = self._parseShow(show)
|
||||
self.setCache(cache_key, result)
|
||||
|
||||
return result
|
||||
|
||||
def getSeasonInfo(self, identifier = None, params = {}):
|
||||
"""Either return a list of all seasons or a single season by number.
|
||||
identifier is the show 'id'
|
||||
"""
|
||||
if not identifier:
|
||||
return False
|
||||
|
||||
season_identifier = params.get('season_identifier', None)
|
||||
|
||||
# season_identifier must contain the 'show id : season number' since there is no tvdb id
|
||||
# for season and we need a reference to both the show id and season number
|
||||
if season_identifier:
|
||||
try: season_identifier = int(season_identifier.split(':')[1])
|
||||
except: return False
|
||||
|
||||
cache_key = 'thetvdb.cache.%s.%s' % (identifier, season_identifier)
|
||||
log.debug('Getting SeasonInfo: %s', cache_key)
|
||||
result = self.getCache(cache_key) or {}
|
||||
if result:
|
||||
return result
|
||||
|
||||
try:
|
||||
show = self.tvdb[int(identifier)]
|
||||
except (tvdb_exceptions.tvdb_error, IOError), e:
|
||||
log.error('Failed parsing TheTVDB SeasonInfo for "%s" id "%s": %s', (show, identifier, traceback.format_exc()))
|
||||
return False
|
||||
|
||||
result = []
|
||||
for number, season in show.items():
|
||||
if season_identifier is not None and number == season_identifier:
|
||||
result = self._parseSeason(show, (number, season))
|
||||
self.setCache(cache_key, result)
|
||||
return result
|
||||
else:
|
||||
result.append(self._parseSeason(show, (number, season)))
|
||||
|
||||
self.setCache(cache_key, result)
|
||||
return result
|
||||
|
||||
def getEpisodeInfo(self, identifier = None, params = {}):
|
||||
"""Either return a list of all episodes or a single episode.
|
||||
If episode_identifer contains an episode number to search for
|
||||
"""
|
||||
season_identifier = params.get('season_identifier', None)
|
||||
episode_identifier = params.get('episode_identifier', None)
|
||||
|
||||
if not identifier and season_identifier is None:
|
||||
return False
|
||||
|
||||
# season_identifier must contain the 'show id : season number' since there is no tvdb id
|
||||
# for season and we need a reference to both the show id and season number
|
||||
if season_identifier:
|
||||
try:
|
||||
identifier, season_identifier = season_identifier.split(':')
|
||||
season_identifier = int(season_identifier)
|
||||
except: return None
|
||||
|
||||
cache_key = 'thetvdb.cache.%s.%s.%s' % (identifier, episode_identifier, season_identifier)
|
||||
log.debug('Getting EpisodeInfo: %s', cache_key)
|
||||
result = self.getCache(cache_key) or {}
|
||||
if result:
|
||||
return result
|
||||
|
||||
try:
|
||||
show = self.tvdb[int(identifier)]
|
||||
except (tvdb_exceptions.tvdb_error, IOError), e:
|
||||
log.error('Failed parsing TheTVDB EpisodeInfo for "%s" id "%s": %s', (show, identifier, traceback.format_exc()))
|
||||
return False
|
||||
|
||||
result = []
|
||||
for number, season in show.items():
|
||||
if season_identifier is not None and number != season_identifier:
|
||||
continue
|
||||
|
||||
for episode in season.values():
|
||||
if episode_identifier is not None and episode['id'] == toUnicode(episode_identifier):
|
||||
result = self._parseEpisode(show, episode)
|
||||
self.setCache(cache_key, result)
|
||||
return result
|
||||
else:
|
||||
result.append(self._parseEpisode(show, episode))
|
||||
|
||||
self.setCache(cache_key, result)
|
||||
return result
|
||||
|
||||
def _parseShow(self, show):
|
||||
"""
|
||||
'actors': u'|Bryan Cranston|Aaron Paul|Dean Norris|RJ Mitte|Betsy Brandt|Anna Gunn|Laura Fraser|Jesse Plemons|Christopher Cousins|Steven Michael Quezada|Jonathan Banks|Giancarlo Esposito|Bob Odenkirk|',
|
||||
'added': None,
|
||||
'addedby': None,
|
||||
'airs_dayofweek': u'Sunday',
|
||||
'airs_time': u'9:00 PM',
|
||||
'banner': u'http://thetvdb.com/banners/graphical/81189-g13.jpg',
|
||||
'contentrating': u'TV-MA',
|
||||
'fanart': u'http://thetvdb.com/banners/fanart/original/81189-28.jpg',
|
||||
'firstaired': u'2008-01-20',
|
||||
'genre': u'|Crime|Drama|Suspense|',
|
||||
'id': u'81189',
|
||||
'imdb_id': u'tt0903747',
|
||||
'language': u'en',
|
||||
'lastupdated': u'1376620212',
|
||||
'network': u'AMC',
|
||||
'networkid': None,
|
||||
'overview': u"Walter White, a struggling high school chemistry teacher is diagnosed with advanced lung cancer. He turns to a life of crime, producing and selling methamphetamine accompanied by a former student, Jesse Pinkman with the aim of securing his family's financial future before he dies.",
|
||||
'poster': u'http://thetvdb.com/banners/posters/81189-22.jpg',
|
||||
'rating': u'9.3',
|
||||
'ratingcount': u'473',
|
||||
'runtime': u'60',
|
||||
'seriesid': u'74713',
|
||||
'seriesname': u'Breaking Bad',
|
||||
'status': u'Continuing',
|
||||
'zap2it_id': u'SH01009396'
|
||||
"""
|
||||
|
||||
#
|
||||
# NOTE: show object only allows direct access via
|
||||
# show['id'], not show.get('id')
|
||||
#
|
||||
|
||||
# TODO: Make sure we have a valid show id, not '' or None
|
||||
#if len (show['id']) is 0:
|
||||
# return None
|
||||
|
||||
## Images
|
||||
poster = show['poster'] or None
|
||||
backdrop = show['fanart'] or None
|
||||
|
||||
genres = [] if show['genre'] is None else show['genre'].strip('|').split('|')
|
||||
if show['firstaired'] is not None:
|
||||
try: year = datetime.strptime(show['firstaired'], '%Y-%m-%d').year
|
||||
except: year = None
|
||||
else:
|
||||
year = None
|
||||
|
||||
try:
|
||||
id = int(show['id'])
|
||||
except:
|
||||
id = None
|
||||
|
||||
show_data = {
|
||||
'id': id,
|
||||
'type': 'show',
|
||||
'primary_provider': 'thetvdb',
|
||||
'titles': [show['seriesname'] or u'', ],
|
||||
'original_title': show['seriesname'] or u'',
|
||||
'images': {
|
||||
'poster': [poster] if poster else [],
|
||||
'backdrop': [backdrop] if backdrop else [],
|
||||
'poster_original': [],
|
||||
'backdrop_original': [],
|
||||
},
|
||||
'year': year,
|
||||
'genres': genres,
|
||||
'imdb': show['imdb_id'] or None,
|
||||
'zap2it_id': show['zap2it_id'] or None,
|
||||
'seriesid': show['seriesid'] or None,
|
||||
'network': show['network'] or None,
|
||||
'networkid': show['networkid'] or None,
|
||||
'airs_dayofweek': show['airs_dayofweek'] or None,
|
||||
'airs_time': show['airs_time'] or None,
|
||||
'firstaired': show['firstaired'] or None,
|
||||
'released': show['firstaired'] or None,
|
||||
'runtime': show['runtime'] or None,
|
||||
'contentrating': show['contentrating'] or None,
|
||||
'rating': show['rating'] or None,
|
||||
'ratingcount': show['ratingcount'] or None,
|
||||
'actors': show['actors'] or None,
|
||||
'lastupdated': show['lastupdated'] or None,
|
||||
'status': show['status'] or None,
|
||||
'language': show['language'] or None,
|
||||
}
|
||||
|
||||
show_data = dict((k, v) for k, v in show_data.iteritems() if v)
|
||||
|
||||
# Add alternative titles
|
||||
try:
|
||||
raw = self.tvdb.search(show['seriesname'])
|
||||
if raw:
|
||||
for show_info in raw:
|
||||
if show_info['id'] == show_data['id'] and show_info.get('aliasnames', None):
|
||||
for alt_name in show_info['aliasnames'].split('|'):
|
||||
show_data['titles'].append(toUnicode(alt_name))
|
||||
except (tvdb_exceptions.tvdb_error, IOError), e:
|
||||
log.error('Failed searching TheTVDB for "%s": %s', (show['seriesname'], traceback.format_exc()))
|
||||
|
||||
return show_data
|
||||
|
||||
def _parseSeason(self, show, season_tuple):
|
||||
"""
|
||||
contains no data
|
||||
"""
|
||||
|
||||
number, season = season_tuple
|
||||
title = toUnicode('%s - Season %s' % (show['seriesname'] or u'', str(number)))
|
||||
poster = []
|
||||
try:
|
||||
for id, data in show.data['_banners']['season']['season'].items():
|
||||
if data.get('season', None) == str(number) and data['bannertype'] == 'season' and data['bannertype2'] == 'season':
|
||||
poster.append(data.get('_bannerpath'))
|
||||
break # Only really need one
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
id = (show['id'] + ':' + str(number))
|
||||
except:
|
||||
id = None
|
||||
|
||||
# XXX: work on title; added defualt_title to fix an error
|
||||
season_data = {
|
||||
'id': id,
|
||||
'type': 'season',
|
||||
'primary_provider': 'thetvdb',
|
||||
'titles': [title, ],
|
||||
'original_title': title,
|
||||
'via_thetvdb': True,
|
||||
'parent_identifier': show['id'] or None,
|
||||
'seasonnumber': str(number),
|
||||
'images': {
|
||||
'poster': poster,
|
||||
'backdrop': [],
|
||||
'poster_original': [],
|
||||
'backdrop_original': [],
|
||||
},
|
||||
'year': None,
|
||||
'genres': None,
|
||||
'imdb': None,
|
||||
}
|
||||
|
||||
season_data = dict((k, v) for k, v in season_data.iteritems() if v)
|
||||
return season_data
|
||||
|
||||
def _parseEpisode(self, show, episode):
|
||||
"""
|
||||
('episodenumber', u'1'),
|
||||
('thumb_added', None),
|
||||
('rating', u'7.7'),
|
||||
('overview',
|
||||
u'Experienced waitress Max Black meets her new co-worker, former rich-girl Caroline Channing, and puts her skills to the test at an old but re-emerging Brooklyn diner. Despite her initial distaste for Caroline, Max eventually softens and the two team up for a new business venture.'),
|
||||
('dvd_episodenumber', None),
|
||||
('dvd_discid', None),
|
||||
('combined_episodenumber', u'1'),
|
||||
('epimgflag', u'7'),
|
||||
('id', u'4099506'),
|
||||
('seasonid', u'465948'),
|
||||
('thumb_height', u'225'),
|
||||
('tms_export', u'1374789754'),
|
||||
('seasonnumber', u'1'),
|
||||
('writer', u'|Michael Patrick King|Whitney Cummings|'),
|
||||
('lastupdated', u'1371420338'),
|
||||
('filename', u'http://thetvdb.com/banners/episodes/248741/4099506.jpg'),
|
||||
('absolute_number', u'1'),
|
||||
('ratingcount', u'102'),
|
||||
('combined_season', u'1'),
|
||||
('thumb_width', u'400'),
|
||||
('imdb_id', u'tt1980319'),
|
||||
('director', u'James Burrows'),
|
||||
('dvd_chapter', None),
|
||||
('dvd_season', None),
|
||||
('gueststars',
|
||||
u'|Brooke Lyons|Noah Mills|Shoshana Bush|Cale Hartmann|Adam Korson|Alex Enriquez|Matt Cook|Bill Parks|Eugene Shaw|Sergey Brusilovsky|Greg Lewis|Cocoa Brown|Nick Jameson|'),
|
||||
('seriesid', u'248741'),
|
||||
('language', u'en'),
|
||||
('productioncode', u'296793'),
|
||||
('firstaired', u'2011-09-19'),
|
||||
('episodename', u'Pilot')]
|
||||
"""
|
||||
|
||||
poster = episode.get('filename', [])
|
||||
backdrop = []
|
||||
genres = []
|
||||
plot = "%s - %sx%s - %s" % (show['seriesname'] or u'',
|
||||
episode.get('seasonnumber', u'?'),
|
||||
episode.get('episodenumber', u'?'),
|
||||
episode.get('overview', u''))
|
||||
if episode.get('firstaired', None) is not None:
|
||||
try: year = datetime.strptime(episode['firstaired'], '%Y-%m-%d').year
|
||||
except: year = None
|
||||
else:
|
||||
year = None
|
||||
|
||||
try:
|
||||
id = int(episode['id'])
|
||||
except:
|
||||
id = None
|
||||
|
||||
episode_data = {
|
||||
'id': id,
|
||||
'type': 'episode',
|
||||
'primary_provider': 'thetvdb',
|
||||
'via_thetvdb': True,
|
||||
'thetvdb_id': id,
|
||||
'titles': [episode.get('episodename', u''), ],
|
||||
'original_title': episode.get('episodename', u'') ,
|
||||
'images': {
|
||||
'poster': [poster] if poster else [],
|
||||
'backdrop': [backdrop] if backdrop else [],
|
||||
'poster_original': [],
|
||||
'backdrop_original': [],
|
||||
},
|
||||
'imdb': episode.get('imdb_id', None),
|
||||
'runtime': None,
|
||||
'released': episode.get('firstaired', None),
|
||||
'year': year,
|
||||
'plot': plot,
|
||||
'genres': genres,
|
||||
'parent_identifier': show['id'] or None,
|
||||
'seasonnumber': episode.get('seasonnumber', None),
|
||||
'episodenumber': episode.get('episodenumber', None),
|
||||
'combined_episodenumber': episode.get('combined_episodenumber', None),
|
||||
'absolute_number': episode.get('absolute_number', None),
|
||||
'combined_season': episode.get('combined_season', None),
|
||||
'productioncode': episode.get('productioncode', None),
|
||||
'seriesid': episode.get('seriesid', None),
|
||||
'seasonid': episode.get('seasonid', None),
|
||||
'firstaired': episode.get('firstaired', None),
|
||||
'thumb_added': episode.get('thumb_added', None),
|
||||
'thumb_height': episode.get('thumb_height', None),
|
||||
'thumb_width': episode.get('thumb_width', None),
|
||||
'rating': episode.get('rating', None),
|
||||
'ratingcount': episode.get('ratingcount', None),
|
||||
'epimgflag': episode.get('epimgflag', None),
|
||||
'dvd_episodenumber': episode.get('dvd_episodenumber', None),
|
||||
'dvd_discid': episode.get('dvd_discid', None),
|
||||
'dvd_chapter': episode.get('dvd_chapter', None),
|
||||
'dvd_season': episode.get('dvd_season', None),
|
||||
'tms_export': episode.get('tms_export', None),
|
||||
'writer': episode.get('writer', None),
|
||||
'director': episode.get('director', None),
|
||||
'gueststars': episode.get('gueststars', None),
|
||||
'lastupdated': episode.get('lastupdated', None),
|
||||
'language': episode.get('language', None),
|
||||
}
|
||||
|
||||
episode_data = dict((k, v) for k, v in episode_data.iteritems() if v)
|
||||
return episode_data
|
||||
|
||||
#def getImage(self, show, type = 'poster', size = 'cover'):
|
||||
#""""""
|
||||
## XXX: Need to implement size
|
||||
#image_url = ''
|
||||
|
||||
#for res, res_data in show['_banners'].get(type, {}).items():
|
||||
#for bid, banner_info in res_data.items():
|
||||
#image_url = banner_info.get('_bannerpath', '')
|
||||
#break
|
||||
|
||||
#return image_url
|
||||
|
||||
def isDisabled(self):
|
||||
if self.conf('api_key') == '':
|
||||
log.error('No API key provided.')
|
||||
True
|
||||
else:
|
||||
False
|
||||
24
couchpotato/core/providers/info/xem/__init__.py
Normal file
24
couchpotato/core/providers/info/xem/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from .main import Xem
|
||||
|
||||
def start():
|
||||
return Xem()
|
||||
|
||||
config = [{
|
||||
'name': 'xem',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'providers',
|
||||
'name': 'xem',
|
||||
'label': 'TheXem',
|
||||
'hidden': True,
|
||||
'description': 'Used for all calls to TheXem.',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'default': True,
|
||||
'label': 'Enabled',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
184
couchpotato/core/providers/info/xem/main.py
Normal file
184
couchpotato/core/providers/info/xem/main.py
Normal file
@@ -0,0 +1,184 @@
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.info.base import ShowProvider
|
||||
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Xem(ShowProvider):
|
||||
'''
|
||||
Mapping Information
|
||||
===================
|
||||
|
||||
Single
|
||||
------
|
||||
You will need the id / identifier of the show e.g. tvdb-id for American Dad! is 73141
|
||||
the origin is the name of the site/entity the episode, season (and/or absolute) numbers are based on
|
||||
|
||||
http://thexem.de/map/single?id=&origin=&episode=&season=&absolute=
|
||||
|
||||
episode, season and absolute are all optional but it wont work if you don't provide either episode and season OR absolute in
|
||||
addition you can provide destination as the name of the wished destination, if not provided it will output all available
|
||||
|
||||
When a destination has two or more addresses another entry will be added as _ ... for now the second address gets the index "2"
|
||||
(the first index is omitted) and so on
|
||||
|
||||
http://thexem.de/map/single?id=7529&origin=anidb&season=1&episode=2&destination=trakt
|
||||
{
|
||||
"result":"success",
|
||||
"data":{
|
||||
"trakt": {"season":1,"episode":3,"absolute":3},
|
||||
"trakt_2":{"season":1,"episode":4,"absolute":4}
|
||||
},
|
||||
"message":"single mapping for 7529 on anidb."
|
||||
}
|
||||
|
||||
All
|
||||
---
|
||||
Basically same as "single" just a little easier
|
||||
The origin address is added into the output too!!
|
||||
|
||||
http://thexem.de/map/all?id=7529&origin=anidb
|
||||
|
||||
All Names
|
||||
---------
|
||||
Get all names xem has to offer
|
||||
non optional params: origin(an entity string like 'tvdb')
|
||||
optional params: season, language
|
||||
- season: a season number or a list like: 1,3,5 or a compare operator like ne,gt,ge,lt,le,eq and a season number. default would
|
||||
return all
|
||||
- language: a language string like 'us' or 'jp' default is all
|
||||
- defaultNames: 1(yes) or 0(no) should the default names be added to the list ? default is 0(no)
|
||||
|
||||
http://thexem.de/map/allNames?origin=tvdb&season=le1
|
||||
|
||||
{
|
||||
"result": "success",
|
||||
"data": {
|
||||
"248812": ["Dont Trust the Bitch in Apartment 23", "Don't Trust the Bitch in Apartment 23"],
|
||||
"257571": ["Nazo no Kanojo X"],
|
||||
"257875": ["Lupin III - Mine Fujiko to Iu Onna", "Lupin III Fujiko to Iu Onna", "Lupin the Third - Mine Fujiko to Iu Onna"]
|
||||
},
|
||||
"message": ""
|
||||
}
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
addEvent('show.info', self.getShowInfo, priority = 5)
|
||||
addEvent('episode.info', self.getEpisodeInfo, priority = 5)
|
||||
|
||||
self.config = {}
|
||||
self.config['base_url'] = "http://thexem.de"
|
||||
self.config['url_single'] = u"%(base_url)s/map/single?" % self.config
|
||||
self.config['url_all'] = u"%(base_url)s/map/all?" % self.config
|
||||
self.config['url_names'] = u"%(base_url)s/map/names?" % self.config
|
||||
self.config['url_all_names'] = u"%(base_url)s/map/allNames?" % self.config
|
||||
|
||||
# TODO: Also get show aliases (store as titles)
|
||||
def getShowInfo(self, identifier = None):
|
||||
if self.isDisabled():
|
||||
return {}
|
||||
|
||||
cache_key = 'xem.cache.%s' % identifier
|
||||
log.debug('Getting showInfo: %s', cache_key)
|
||||
result = self.getCache(cache_key) or {}
|
||||
if result:
|
||||
return result
|
||||
|
||||
# Create season/episode and absolute mappings
|
||||
url = self.config['url_all'] + "id=%s&origin=tvdb" % tryUrlencode(identifier)
|
||||
response = self.getJsonData(url)
|
||||
if response:
|
||||
if response.get('result') == 'success':
|
||||
data = response.get('data', None)
|
||||
result = self._parse(data)
|
||||
|
||||
# Create name alias mappings
|
||||
url = self.config['url_names'] + "id=%s&origin=tvdb" % tryUrlencode(identifier)
|
||||
response = self.getJsonData(url)
|
||||
if response:
|
||||
if response.get('result') == 'success':
|
||||
data = response.get('data', None)
|
||||
result.update({'map_names': data})
|
||||
|
||||
self.setCache(cache_key, result)
|
||||
return result
|
||||
|
||||
def getEpisodeInfo(self, identifier = None, params = {}):
|
||||
episode = params.get('episode', None)
|
||||
if episode is None:
|
||||
return False
|
||||
|
||||
season_identifier = params.get('season_identifier', None)
|
||||
if season_identifier is None:
|
||||
return False
|
||||
|
||||
episode_identifier = params.get('episode_identifier', None)
|
||||
absolute = params.get('absolute', None)
|
||||
|
||||
# season_identifier must contain the 'show id : season number' since there is no tvdb id
|
||||
# for season and we need a reference to both the show id and season number
|
||||
if season_identifier:
|
||||
try:
|
||||
identifier, season_identifier = season_identifier.split(':')
|
||||
season = int(season_identifier)
|
||||
except: return False
|
||||
|
||||
result = self.getShowInfo(identifier)
|
||||
map = {}
|
||||
if result:
|
||||
map_episode = result.get('map_episode', {}).get(season, {}).get(episode, {})
|
||||
if map_episode:
|
||||
map.update({'map_episode': map_episode})
|
||||
|
||||
if absolute:
|
||||
map_absolute = result.get('map_absolute', {}).get(absolute, {})
|
||||
if map_absolute:
|
||||
map.update({'map_absolute': map_absolute})
|
||||
|
||||
map_names = result.get('map_names', {}).get(toUnicode(season), {})
|
||||
if map_names:
|
||||
map.update({'map_names': map_names})
|
||||
|
||||
return map
|
||||
|
||||
|
||||
def _parse(self, data, master = 'tvdb'):
|
||||
'''parses xem map and returns a custom formatted dict map
|
||||
|
||||
To retreive map for scene:
|
||||
if 'scene' in map['map_episode'][1][1]:
|
||||
print map['map_episode'][1][1]['scene']['season']
|
||||
'''
|
||||
if not isinstance(data, list):
|
||||
return {}
|
||||
|
||||
map = {'map_episode': {}, 'map_absolute': {}}
|
||||
for maps in data:
|
||||
origin = maps.pop(master, None)
|
||||
if origin is None:
|
||||
continue # No master origin to map to
|
||||
map.get('map_episode').setdefault(origin['season'], {}).setdefault(origin['episode'], maps.copy())
|
||||
map.get('map_absolute').setdefault(origin['absolute'], maps.copy())
|
||||
|
||||
return map
|
||||
|
||||
def isDisabled(self):
|
||||
if __name__ == '__main__':
|
||||
return False
|
||||
if self.conf('enabled'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
#XXX: REMOVE, just for degugging
|
||||
def main():
|
||||
"""Simple example of using xem
|
||||
"""
|
||||
xem_instance = Xem()
|
||||
print xem_instance.getShowInfo(identifier=73141) # (American Dad)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -2,6 +2,9 @@ from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.nzb.base import NZBProvider
|
||||
from couchpotato.environment import Env
|
||||
import re
|
||||
@@ -9,8 +12,12 @@ import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class BinSearch(MultiProvider):
|
||||
|
||||
class BinSearch(NZBProvider):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(NZBProvider):
|
||||
|
||||
urls = {
|
||||
'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s',
|
||||
@@ -20,21 +27,9 @@ class BinSearch(NZBProvider):
|
||||
|
||||
http_time_between_calls = 4 # Seconds
|
||||
|
||||
def _search(self, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'q': movie['library']['identifier'],
|
||||
'm': 'n',
|
||||
'max': 400,
|
||||
'adv_age': Env.setting('retention', 'nzb'),
|
||||
'adv_sort': 'date',
|
||||
'adv_col': 'on',
|
||||
'adv_nfo': 'on',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
})
|
||||
|
||||
data = self.getHTMLData(self.urls['search'] % arguments)
|
||||
data = self.getHTMLData(self.urls['search'] % self.buildUrl(media, quality))
|
||||
|
||||
if data:
|
||||
try:
|
||||
@@ -102,3 +97,50 @@ class BinSearch(NZBProvider):
|
||||
|
||||
return 'try_next'
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'q': media['library']['identifier'], # TODO should this use library.title?
|
||||
'm': 'n',
|
||||
'max': 400,
|
||||
'adv_age': Env.setting('retention', 'nzb'),
|
||||
'adv_sort': 'date',
|
||||
'adv_col': 'on',
|
||||
'adv_nfo': 'on',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
})
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'q': fireEvent('library.query', media['library'], single = True),
|
||||
'm': 'n',
|
||||
'max': 400,
|
||||
'adv_age': Env.setting('retention', 'nzb'),
|
||||
'adv_sort': 'date',
|
||||
'adv_col': 'on',
|
||||
'adv_nfo': 'on',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
})
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'q': fireEvent('library.query', media['library'], single = True),
|
||||
'm': 'n',
|
||||
'max': 400,
|
||||
'adv_age': Env.setting('retention', 'nzb'),
|
||||
'adv_sort': 'date',
|
||||
'adv_col': 'on',
|
||||
'adv_nfo': 'on',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
})
|
||||
return query
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.base import ResultList
|
||||
from couchpotato.core.providers.base import MultiProvider, ResultList
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.nzb.base import NZBProvider
|
||||
from couchpotato.environment import Env
|
||||
from dateutil.parser import parse
|
||||
@@ -14,40 +16,40 @@ import urllib2
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class Newznab(MultiProvider):
|
||||
|
||||
class Newznab(NZBProvider, RSS):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'download': 'get&id=%s',
|
||||
'detail': 'details&id=%s',
|
||||
'search': 'movie',
|
||||
'download': 't=get&id=%s'
|
||||
}
|
||||
|
||||
limits_reached = {}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def search(self, movie, quality):
|
||||
def search(self, media, quality):
|
||||
hosts = self.getHosts()
|
||||
|
||||
results = ResultList(self, movie, quality, imdb_results = True)
|
||||
results = ResultList(self, media, quality, imdb_results = True)
|
||||
|
||||
for host in hosts:
|
||||
if self.isDisabled(host):
|
||||
continue
|
||||
|
||||
self._searchOnHost(host, movie, quality, results)
|
||||
self._searchOnHost(host, media, quality, results)
|
||||
|
||||
return results
|
||||
|
||||
def _searchOnHost(self, host, movie, quality, results):
|
||||
def _searchOnHost(self, host, media, quality, results):
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'imdbid': movie['library']['identifier'].replace('tt', ''),
|
||||
'apikey': host['api_key'],
|
||||
'extended': 1
|
||||
})
|
||||
url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments)
|
||||
query = self.buildUrl(media, host['api_key'])
|
||||
|
||||
url = '%s&%s' % (self.getUrl(host['host']), query)
|
||||
|
||||
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
|
||||
|
||||
@@ -88,7 +90,7 @@ class Newznab(NZBProvider, RSS):
|
||||
'name_extra': name_extra,
|
||||
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
|
||||
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
|
||||
'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
|
||||
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
|
||||
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)),
|
||||
'content': self.getTextElement(nzb, 'description'),
|
||||
'score': host['extra_score'],
|
||||
@@ -128,11 +130,11 @@ class Newznab(NZBProvider, RSS):
|
||||
if result:
|
||||
return result
|
||||
|
||||
def getUrl(self, host, type):
|
||||
def getUrl(self, host):
|
||||
if '?page=newznabapi' in host:
|
||||
return cleanHost(host)[:-1] + '&t=' + type
|
||||
return cleanHost(host)[:-1] + '&'
|
||||
|
||||
return cleanHost(host) + 'api?t=' + type
|
||||
return cleanHost(host) + 'api?'
|
||||
|
||||
def isDisabled(self, host = None):
|
||||
return not self.isEnabled(host)
|
||||
@@ -184,3 +186,45 @@ class Newznab(NZBProvider, RSS):
|
||||
log.error('Failed download from %s: %s', (host, traceback.format_exc()))
|
||||
|
||||
return 'try_next'
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
def buildUrl(self, media, api_key):
|
||||
query = tryUrlencode({
|
||||
't': 'movie',
|
||||
'imdbid': media['library']['identifier'].replace('tt', ''),
|
||||
'apikey': api_key,
|
||||
'extended': 1
|
||||
})
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
def buildUrl(self, media, api_key):
|
||||
search_title = fireEvent('library.query', media['library'], include_identifier = False, single = True)
|
||||
identifier = fireEvent('library.identifier', media['library'], single = True)
|
||||
|
||||
query = tryUrlencode({
|
||||
't': 'tvsearch',
|
||||
'q': search_title,
|
||||
'season': identifier['season'],
|
||||
'apikey': api_key,
|
||||
'extended': 1
|
||||
})
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
def buildUrl(self, media, api_key):
|
||||
search_title = fireEvent('library.query', media['library'], include_identifier = False, single = True)
|
||||
identifier = fireEvent('library.identifier', media['library'], single = True)
|
||||
|
||||
query = tryUrlencode({
|
||||
't': 'tvsearch',
|
||||
'q': search_title,
|
||||
'season': identifier['season'],
|
||||
'ep': identifier['episode'],
|
||||
'apikey': api_key,
|
||||
'extended': 1
|
||||
})
|
||||
return query
|
||||
|
||||
@@ -3,14 +3,22 @@ from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.nzb.base import NZBProvider
|
||||
from dateutil.parser import parse
|
||||
import time
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class NZBClub(MultiProvider):
|
||||
|
||||
class NZBClub(NZBProvider, RSS):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
|
||||
class Base(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'search': 'http://www.nzbclub.com/nzbfeed.aspx?%s',
|
||||
@@ -18,20 +26,9 @@ class NZBClub(NZBProvider, RSS):
|
||||
|
||||
http_time_between_calls = 4 #seconds
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
q = '"%s %s"' % (title, movie['library']['year'])
|
||||
|
||||
params = tryUrlencode({
|
||||
'q': q,
|
||||
'ig': 1,
|
||||
'rpp': 200,
|
||||
'st': 5,
|
||||
'sp': 1,
|
||||
'ns': 1,
|
||||
})
|
||||
|
||||
nzbs = self.getRSSData(self.urls['search'] % params)
|
||||
nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media))
|
||||
|
||||
for nzb in nzbs:
|
||||
|
||||
@@ -78,3 +75,42 @@ class NZBClub(NZBProvider, RSS):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
query = tryUrlencode({
|
||||
'q': '"%s"' % fireEvent('library.query', media['library'], single = True),
|
||||
'ig': 1,
|
||||
'rpp': 200,
|
||||
'st': 5,
|
||||
'sp': 1,
|
||||
'ns': 1,
|
||||
})
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
query = tryUrlencode({
|
||||
'q': fireEvent('library.query', media['library'], single = True),
|
||||
'ig': 1,
|
||||
'rpp': 200,
|
||||
'st': 5,
|
||||
'sp': 1,
|
||||
'ns': 1,
|
||||
})
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
query = tryUrlencode({
|
||||
'q': fireEvent('library.query', media['library'], single = True),
|
||||
'ig': 1,
|
||||
'rpp': 200,
|
||||
'st': 5,
|
||||
'sp': 1,
|
||||
'ns': 1,
|
||||
})
|
||||
return query
|
||||
|
||||
@@ -3,6 +3,9 @@ from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
|
||||
from couchpotato.core.helpers.rss import RSS
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.nzb.base import NZBProvider
|
||||
from couchpotato.environment import Env
|
||||
from dateutil.parser import parse
|
||||
@@ -11,8 +14,13 @@ import time
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class NzbIndex(MultiProvider):
|
||||
|
||||
class NzbIndex(NZBProvider, RSS):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
|
||||
class Base(NZBProvider, RSS):
|
||||
|
||||
urls = {
|
||||
'download': 'https://www.nzbindex.com/download/',
|
||||
@@ -21,28 +29,44 @@ class NzbIndex(NZBProvider, RSS):
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
q = '"%s %s" | "%s (%s)"' % (title, movie['library']['year'], title, movie['library']['year'])
|
||||
arguments = tryUrlencode({
|
||||
'q': q,
|
||||
'age': Env.setting('retention', 'nzb'),
|
||||
'sort': 'agedesc',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
'rating': 1,
|
||||
'max': 250,
|
||||
'more': 1,
|
||||
'complete': 1,
|
||||
})
|
||||
|
||||
nzbs = self.getRSSData(self.urls['search'] % arguments)
|
||||
nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media, quality))
|
||||
|
||||
for nzb in nzbs:
|
||||
|
||||
enclosure = self.getElement(nzb, 'enclosure').attrib
|
||||
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
|
||||
|
||||
title = self.getTextElement(nzb, "title")
|
||||
|
||||
match = fireEvent('matcher.parse', title, parser='usenet', single = True)
|
||||
if not match.chains:
|
||||
log.info('Unable to parse release with title "%s"', title)
|
||||
continue
|
||||
|
||||
# TODO should we consider other lower-weight chains here?
|
||||
info = fireEvent('matcher.flatten_info', match.chains[0].info, single = True)
|
||||
|
||||
release_name = fireEvent('matcher.construct_from_raw', info.get('release_name'), single = True)
|
||||
|
||||
file_name = info.get('detail', {}).get('file_name')
|
||||
file_name = file_name[0] if file_name else None
|
||||
|
||||
title = release_name or file_name
|
||||
|
||||
# Strip extension from parsed title (if one exists)
|
||||
ext_pos = title.rfind('.')
|
||||
|
||||
# Assume extension if smaller than 4 characters
|
||||
# TODO this should probably be done a better way
|
||||
if len(title[ext_pos + 1:]) <= 4:
|
||||
title = title[:ext_pos]
|
||||
|
||||
if not title:
|
||||
log.info('Unable to find release name from match')
|
||||
continue
|
||||
|
||||
try:
|
||||
description = self.getTextElement(nzb, "description")
|
||||
except:
|
||||
@@ -57,7 +81,7 @@ class NzbIndex(NZBProvider, RSS):
|
||||
|
||||
results.append({
|
||||
'id': nzbindex_id,
|
||||
'name': self.getTextElement(nzb, "title"),
|
||||
'name': title,
|
||||
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
|
||||
'size': tryInt(enclosure['length']) / 1024 / 1024,
|
||||
'url': enclosure['url'],
|
||||
@@ -77,3 +101,53 @@ class NzbIndex(NZBProvider, RSS):
|
||||
except:
|
||||
pass
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
title = fireEvent('library.query', media['library'], include_year = False, single = True)
|
||||
year = media['library']['year']
|
||||
|
||||
query = tryUrlencode({
|
||||
'q': '"%s %s" | "%s (%s)"' % (title, year, title, year),
|
||||
'age': Env.setting('retention', 'nzb'),
|
||||
'sort': 'agedesc',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
'rating': 1,
|
||||
'max': 250,
|
||||
'more': 1,
|
||||
'complete': 1,
|
||||
})
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'q': fireEvent('library.query', media['library'], single = True),
|
||||
'age': Env.setting('retention', 'nzb'),
|
||||
'sort': 'agedesc',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
'rating': 1,
|
||||
'max': 250,
|
||||
'more': 1,
|
||||
'complete': 1,
|
||||
})
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'q': fireEvent('library.query', media['library'], single = True),
|
||||
'age': Env.setting('retention', 'nzb'),
|
||||
'sort': 'agedesc',
|
||||
'minsize': quality.get('size_min'),
|
||||
'maxsize': quality.get('size_max'),
|
||||
'rating': 1,
|
||||
'max': 250,
|
||||
'more': 1,
|
||||
'complete': 1,
|
||||
})
|
||||
return query
|
||||
|
||||
@@ -2,12 +2,20 @@ from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class BiTHDTV(TorrentProvider):
|
||||
class BiTHDTV(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test' : 'http://www.bit-hdtv.com/',
|
||||
@@ -18,18 +26,13 @@ class BiTHDTV(TorrentProvider):
|
||||
}
|
||||
|
||||
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
|
||||
cat_id_movies = 7
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'search': '%s %s' % (title.replace(':', ''), movie['library']['year']),
|
||||
'cat': self.cat_id_movies
|
||||
})
|
||||
query = self.buildUrl(media)
|
||||
|
||||
url = "%s&%s" % (self.urls['search'], arguments)
|
||||
url = "%s&%s" % (self.urls['search'], query)
|
||||
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
@@ -86,3 +89,31 @@ class BiTHDTV(TorrentProvider):
|
||||
return 'logout.php' in output.lower()
|
||||
|
||||
loginCheckSuccess = loginSuccess
|
||||
|
||||
# Only searches BiT-HDTV's main category, subcategory and resolution search filters appear to be broken
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
query = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'cat': 7 # Movie cat
|
||||
})
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
query = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'cat': 12 # Season cat
|
||||
})
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
query = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'cat': 10 # Episode cat
|
||||
})
|
||||
return query
|
||||
|
||||
@@ -2,36 +2,40 @@ from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import EpisodeProvider, SeasonProvider, MovieProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Bitsoup(TorrentProvider):
|
||||
class Bitsoup(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'https://www.bitsoup.me/',
|
||||
'login' : 'https://www.bitsoup.me/takelogin.php',
|
||||
'login_check': 'https://www.bitsoup.me/my.php',
|
||||
'search': 'https://www.bitsoup.me/browse.php?',
|
||||
'search': 'https://www.bitsoup.me/browse.php?%s',
|
||||
'baseurl': 'https://www.bitsoup.me/%s',
|
||||
}
|
||||
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
|
||||
q = '"%s" %s' % (simplifyString(title), movie['library']['year'])
|
||||
arguments = tryUrlencode({
|
||||
'search': q,
|
||||
})
|
||||
url = "%s&%s" % (self.urls['search'], arguments)
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
url = self.urls['search'] % self.buildUrl(media, quality)
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
if data:
|
||||
html = BeautifulSoup(data)
|
||||
html = BeautifulSoup(data, "html.parser")
|
||||
|
||||
try:
|
||||
result_table = html.find('table', attrs = {'class': 'koptekst'})
|
||||
@@ -73,11 +77,11 @@ class Bitsoup(TorrentProvider):
|
||||
|
||||
|
||||
def getLoginParams(self):
|
||||
return {
|
||||
return tryUrlencode({
|
||||
'username': self.conf('username'),
|
||||
'password': self.conf('password'),
|
||||
'ssl': 'yes',
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
def loginSuccess(self, output):
|
||||
@@ -85,3 +89,57 @@ class Bitsoup(TorrentProvider):
|
||||
|
||||
loginCheckSuccess = loginSuccess
|
||||
|
||||
# Bitsoup Categories
|
||||
# Movies
|
||||
# Movies/3D - 17 (unused)
|
||||
# Movies/DVD-R - 20
|
||||
# Movies/Packs - 27 (unused)
|
||||
# Movies/XviD - 19
|
||||
# The site doesn't have HD Movie caterogies, they bundle HD under x264
|
||||
# x264 - 41
|
||||
# TV
|
||||
# TV-HDx264 - 42
|
||||
# TV-Packs - 45
|
||||
# TV-SDx264 - 49
|
||||
# TV-XVID - 7 (unused)
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
cat_ids = [
|
||||
([41], ['720p', '1080p']),
|
||||
([20], ['dvdr']),
|
||||
([19], ['brrip', 'dvdrip']),
|
||||
]
|
||||
cat_backup_id = 0
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'search': '"%s" %s' % (
|
||||
fireEvent('library.query', media['library'], include_year = False, single = True),
|
||||
media['library']['year']
|
||||
),
|
||||
'cat': self.getCatId(quality['identifier'])[0],
|
||||
})
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
# For season bundles, bitsoup currently only has one category
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'cat': 45 # TV-Packs Category
|
||||
})
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
cat_ids = [
|
||||
([42], ['hdtv_720p', 'webdl_720p', 'webdl_1080p', 'bdrip_1080p', 'bdrip_720p', 'brrip_1080p', 'brrip_720p']),
|
||||
([49], ['hdtv_sd', 'webdl_480p'])
|
||||
]
|
||||
cat_backup_id = 0
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'cat': self.getCatId(quality['identifier'])[0],
|
||||
})
|
||||
return query
|
||||
|
||||
@@ -2,42 +2,57 @@ from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class IPTorrents(TorrentProvider):
|
||||
class IPTorrents(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test' : 'http://www.iptorrents.com/',
|
||||
'base_url' : 'http://www.iptorrents.com',
|
||||
'login' : 'http://www.iptorrents.com/torrents/',
|
||||
'login_check': 'http://www.iptorrents.com/inbox.php',
|
||||
'search' : 'http://www.iptorrents.com/torrents/?l%d=1%s&q=%s&qf=ti&p=%d',
|
||||
'search' : 'http://www.iptorrents.com/torrents/?%s%%s&q=%s&qf=ti&p=%%d',
|
||||
}
|
||||
|
||||
cat_ids = [
|
||||
([48], ['720p', '1080p', 'bd50']),
|
||||
([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
|
||||
([7], ['dvdrip', 'brrip']),
|
||||
([6], ['dvdr']),
|
||||
]
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
cat_backup_id = None
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def buildUrl(self, title, media, quality):
|
||||
return self._buildUrl(title.replace(':', ''), quality['identifier'])
|
||||
|
||||
def _buildUrl(self, query, quality_identifier):
|
||||
|
||||
cat_ids = self.getCatId(quality_identifier)
|
||||
|
||||
if not cat_ids:
|
||||
log.warning('Unable to find category ids for identifier "%s"', quality_identifier)
|
||||
return None
|
||||
|
||||
return self.urls['search'] % ("&".join(("l%d=" % x) for x in cat_ids), tryUrlencode(query).replace('%', '%%'))
|
||||
|
||||
def _searchOnTitle(self, title, media, quality, results):
|
||||
|
||||
freeleech = '' if not self.conf('freeleech') else '&free=on'
|
||||
|
||||
base_url = self.buildUrl(title, media, quality)
|
||||
if not base_url: return
|
||||
|
||||
pages = 1
|
||||
current_page = 1
|
||||
while current_page <= pages and not self.shuttingDown():
|
||||
|
||||
url = self.urls['search'] % (self.getCatId(quality['identifier'])[0], freeleech, tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), current_page)
|
||||
data = self.getHTMLData(url)
|
||||
data = self.getHTMLData(base_url % (freeleech, current_page))
|
||||
|
||||
if data:
|
||||
html = BeautifulSoup(data)
|
||||
@@ -90,14 +105,52 @@ class IPTorrents(TorrentProvider):
|
||||
current_page += 1
|
||||
|
||||
def getLoginParams(self):
|
||||
return {
|
||||
return tryUrlencode({
|
||||
'username': self.conf('username'),
|
||||
'password': self.conf('password'),
|
||||
'login': 'submit',
|
||||
}
|
||||
})
|
||||
|
||||
def loginSuccess(self, output):
|
||||
return 'don\'t have an account' not in output.lower()
|
||||
|
||||
def loginCheckSuccess(self, output):
|
||||
return '/logout.php' in output.lower()
|
||||
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([48], ['720p', '1080p', 'bd50']),
|
||||
([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
|
||||
([7], ['dvdrip', 'brrip']),
|
||||
([6], ['dvdr']),
|
||||
]
|
||||
|
||||
def buildUrl(self, title, media, quality):
|
||||
query = '%s %s' % (title.replace(':', ''), media['library']['year'])
|
||||
|
||||
return self._buildUrl(query, quality['identifier'])
|
||||
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
# TODO come back to this later, a better quality system needs to be created
|
||||
cat_ids = [
|
||||
([65], [
|
||||
'bluray_1080p', 'bluray_720p',
|
||||
'bdrip_1080p', 'bdrip_720p',
|
||||
'brrip_1080p', 'brrip_720p',
|
||||
'webdl_1080p', 'webdl_720p', 'webdl_480p',
|
||||
'hdtv_720p', 'hdtv_sd'
|
||||
]),
|
||||
]
|
||||
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
# TODO come back to this later, a better quality system needs to be created
|
||||
cat_ids = [
|
||||
([5], ['hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
([4, 78, 79], ['hdtv_sd'])
|
||||
]
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
from dateutil.parser import parse
|
||||
import htmlentitydefs
|
||||
@@ -11,8 +13,13 @@ import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class PassThePopcorn(MultiProvider):
|
||||
|
||||
class PassThePopcorn(TorrentProvider):
|
||||
def getTypes(self):
|
||||
return [Movie]
|
||||
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'domain': 'https://tls.passthepopcorn.me',
|
||||
@@ -25,43 +32,15 @@ class PassThePopcorn(TorrentProvider):
|
||||
|
||||
http_time_between_calls = 2
|
||||
|
||||
quality_search_params = {
|
||||
'bd50': {'media': 'Blu-ray', 'format': 'BD50'},
|
||||
'1080p': {'resolution': '1080p'},
|
||||
'720p': {'resolution': '720p'},
|
||||
'brrip': {'media': 'Blu-ray'},
|
||||
'dvdr': {'resolution': 'anysd'},
|
||||
'dvdrip': {'media': 'DVD'},
|
||||
'scr': {'media': 'DVD-Screener'},
|
||||
'r5': {'media': 'R5'},
|
||||
'tc': {'media': 'TC'},
|
||||
'ts': {'media': 'TS'},
|
||||
'cam': {'media': 'CAM'}
|
||||
}
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
post_search_filters = {
|
||||
'bd50': {'Codec': ['BD50']},
|
||||
'1080p': {'Resolution': ['1080p']},
|
||||
'720p': {'Resolution': ['720p']},
|
||||
'brrip': {'Source': ['Blu-ray'], 'Quality': ['High Definition'], 'Container': ['!ISO']},
|
||||
'dvdr': {'Codec': ['DVD5', 'DVD9']},
|
||||
'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']},
|
||||
'scr': {'Source': ['DVD-Screener']},
|
||||
'r5': {'Source': ['R5']},
|
||||
'tc': {'Source': ['TC']},
|
||||
'ts': {'Source': ['TS']},
|
||||
'cam': {'Source': ['CAM']}
|
||||
}
|
||||
|
||||
def _search(self, movie, quality, results):
|
||||
|
||||
movie_title = getTitle(movie['library'])
|
||||
movie_title = getTitle(media['library'])
|
||||
quality_id = quality['identifier']
|
||||
|
||||
params = mergeDicts(self.quality_search_params[quality_id].copy(), {
|
||||
'order_by': 'relevance',
|
||||
'order_way': 'descending',
|
||||
'searchstr': movie['library']['identifier']
|
||||
'searchstr': media['library']['identifier']
|
||||
})
|
||||
|
||||
url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
|
||||
@@ -203,3 +182,33 @@ class PassThePopcorn(TorrentProvider):
|
||||
return False
|
||||
|
||||
loginCheckSuccess = loginSuccess
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
quality_search_params = {
|
||||
'bd50': {'media': 'Blu-ray', 'format': 'BD50'},
|
||||
'1080p': {'resolution': '1080p'},
|
||||
'720p': {'resolution': '720p'},
|
||||
'brrip': {'media': 'Blu-ray'},
|
||||
'dvdr': {'resolution': 'anysd'},
|
||||
'dvdrip': {'media': 'DVD'},
|
||||
'scr': {'media': 'DVD-Screener'},
|
||||
'r5': {'media': 'R5'},
|
||||
'tc': {'media': 'TC'},
|
||||
'ts': {'media': 'TS'},
|
||||
'cam': {'media': 'CAM'}
|
||||
}
|
||||
|
||||
post_search_filters = {
|
||||
'bd50': {'Codec': ['BD50']},
|
||||
'1080p': {'Resolution': ['1080p']},
|
||||
'720p': {'Resolution': ['720p']},
|
||||
'brrip': {'Source': ['Blu-ray'], 'Quality': ['High Definition'], 'Container': ['!ISO']},
|
||||
'dvdr': {'Codec': ['DVD5', 'DVD9']},
|
||||
'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']},
|
||||
'scr': {'Source': ['DVD-Screener']},
|
||||
'r5': {'Source': ['R5']},
|
||||
'tc': {'Source': ['TC']},
|
||||
'ts': {'Source': ['TS']},
|
||||
'cam': {'Source': ['CAM']}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,9 @@ from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
|
||||
from urlparse import parse_qs
|
||||
import re
|
||||
@@ -9,8 +12,12 @@ import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class PublicHD(MultiProvider):
|
||||
|
||||
class PublicHD(TorrentMagnetProvider):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentMagnetProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'https://publichd.se',
|
||||
@@ -24,13 +31,15 @@ class PublicHD(TorrentMagnetProvider):
|
||||
if not quality.get('hd', False):
|
||||
return []
|
||||
|
||||
return super(PublicHD, self).search(movie, quality)
|
||||
return super(Base, self).search(movie, quality)
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
query = self.buildUrl(media)
|
||||
|
||||
params = tryUrlencode({
|
||||
'page':'torrents',
|
||||
'search': '%s %s' % (title, movie['library']['year']),
|
||||
'search': query,
|
||||
'active': 1,
|
||||
})
|
||||
|
||||
@@ -86,3 +95,18 @@ class PublicHD(TorrentMagnetProvider):
|
||||
|
||||
item['description'] = description
|
||||
return item
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
return fireEvent('library.query', media['library'], single = True)
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
return fireEvent('library.query', media['library'], single = True)
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
def buildUrl(self, media):
|
||||
return fireEvent('library.query', media['library'], single = True)
|
||||
|
||||
@@ -1,50 +1,39 @@
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class SceneAccess(TorrentProvider):
|
||||
class SceneAccess(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'https://www.sceneaccess.eu/',
|
||||
'login': 'https://www.sceneaccess.eu/login',
|
||||
'login_check': 'https://www.sceneaccess.eu/inbox',
|
||||
'detail': 'https://www.sceneaccess.eu/details?id=%s',
|
||||
'search': 'https://www.sceneaccess.eu/browse?method=2&c%d=%d',
|
||||
'search': 'https://www.sceneaccess.eu/browse?c%d=%d',
|
||||
'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d',
|
||||
'download': 'https://www.sceneaccess.eu/%s',
|
||||
}
|
||||
|
||||
cat_ids = [
|
||||
([22], ['720p', '1080p']),
|
||||
([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
|
||||
([8], ['dvdr']),
|
||||
]
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
|
||||
def _search(self, movie, quality, results):
|
||||
|
||||
cat = self.getCatId(quality['identifier'])
|
||||
if not cat:
|
||||
return
|
||||
|
||||
url = self.urls['search'] % (
|
||||
cat[0],
|
||||
cat[0]
|
||||
)
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'search': movie['library']['identifier'],
|
||||
'method': 1,
|
||||
})
|
||||
url = "%s&%s" % (url, arguments)
|
||||
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
url = self.buildUrl(media, quality)
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
if data:
|
||||
@@ -77,13 +66,6 @@ class SceneAccess(TorrentProvider):
|
||||
except:
|
||||
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
def getLoginParams(self):
|
||||
return {
|
||||
'username': self.conf('username'),
|
||||
'password': self.conf('password'),
|
||||
'submit': 'come on in',
|
||||
}
|
||||
|
||||
def getMoreInfo(self, item):
|
||||
full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
|
||||
html = BeautifulSoup(full_description)
|
||||
@@ -93,7 +75,79 @@ class SceneAccess(TorrentProvider):
|
||||
item['description'] = description
|
||||
return item
|
||||
|
||||
# Login
|
||||
def getLoginParams(self):
|
||||
return tryUrlencode({
|
||||
'username': self.conf('username'),
|
||||
'password': self.conf('password'),
|
||||
'submit': 'come on in',
|
||||
})
|
||||
|
||||
def loginSuccess(self, output):
|
||||
return '/inbox' in output.lower()
|
||||
|
||||
loginCheckSuccess = loginSuccess
|
||||
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([22], ['720p', '1080p']),
|
||||
([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
|
||||
([8], ['dvdr']),
|
||||
]
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
url = self.urls['search'] % (
|
||||
self.getCatId(quality['identifier'])[0],
|
||||
self.getCatId(quality['identifier'])[0]
|
||||
)
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'method': 2,
|
||||
})
|
||||
query = "%s&%s" % (url, arguments)
|
||||
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([26], ['hdtv_sd', 'hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
]
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
url = self.urls['archive'] % (
|
||||
self.getCatId(quality['identifier'])[0],
|
||||
self.getCatId(quality['identifier'])[0]
|
||||
)
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'method': 2,
|
||||
})
|
||||
query = "%s&%s" % (url, arguments)
|
||||
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([27], ['hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
([17, 11], ['hdtv_sd'])
|
||||
]
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
url = self.urls['search'] % (
|
||||
self.getCatId(quality['identifier'])[0],
|
||||
self.getCatId(quality['identifier'])[0]
|
||||
)
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'search': fireEvent('library.query', media['library'], single = True),
|
||||
'method': 2,
|
||||
})
|
||||
query = "%s&%s" % (url, arguments)
|
||||
|
||||
return query
|
||||
|
||||
@@ -2,27 +2,27 @@ from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
|
||||
import re
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class ThePirateBay(MultiProvider):
|
||||
|
||||
class ThePirateBay(TorrentMagnetProvider):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentMagnetProvider):
|
||||
|
||||
urls = {
|
||||
'detail': '%s/torrent/%s',
|
||||
'search': '%s/search/%s/%s/7/%s'
|
||||
'search': '%s/search/%%s/%%s/7/%%s'
|
||||
}
|
||||
|
||||
cat_ids = [
|
||||
([207], ['720p', '1080p']),
|
||||
([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
|
||||
([201, 207], ['brrip']),
|
||||
([202], ['dvdr'])
|
||||
]
|
||||
|
||||
cat_backup_id = 200
|
||||
disable_provider = False
|
||||
http_time_between_calls = 0
|
||||
@@ -41,15 +41,18 @@ class ThePirateBay(TorrentMagnetProvider):
|
||||
'https://kuiken.co',
|
||||
]
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
page = 0
|
||||
total_pages = 1
|
||||
cats = self.getCatId(quality['identifier'])
|
||||
|
||||
search_url = self.urls['search'] % self.getDomain()
|
||||
|
||||
while page < total_pages:
|
||||
|
||||
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s" %s' % (title, movie['library']['year'])), page, ','.join(str(x) for x in cats))
|
||||
search_url = search_url % self.buildUrl(media, page, cats)
|
||||
|
||||
page += 1
|
||||
|
||||
data = self.getHTMLData(search_url)
|
||||
@@ -103,7 +106,7 @@ class ThePirateBay(TorrentMagnetProvider):
|
||||
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
def isEnabled(self):
|
||||
return super(ThePirateBay, self).isEnabled() and self.getDomain()
|
||||
return super(Base, self).isEnabled() and self.getDomain()
|
||||
|
||||
def correctProxy(self, data):
|
||||
return 'title="Pirate Search"' in data
|
||||
@@ -116,3 +119,47 @@ class ThePirateBay(TorrentMagnetProvider):
|
||||
|
||||
item['description'] = description
|
||||
return item
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([207], ['720p', '1080p']),
|
||||
([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
|
||||
([201, 207], ['brrip']),
|
||||
([202], ['dvdr'])
|
||||
]
|
||||
|
||||
def buildUrl(self, media, page, cats):
|
||||
return (
|
||||
tryUrlencode('"%s"' % fireEvent('library.query', media['library'], single = True)),
|
||||
page,
|
||||
','.join(str(x) for x in cats)
|
||||
)
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([208], ['hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
([205], ['hdtv_sd'])
|
||||
]
|
||||
|
||||
def buildUrl(self, media, page, cats):
|
||||
return (
|
||||
tryUrlencode('"%s"' % fireEvent('library.query', media['library'], single = True)),
|
||||
page,
|
||||
','.join(str(x) for x in cats)
|
||||
)
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([208], ['hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
([205], ['hdtv_sd'])
|
||||
]
|
||||
|
||||
def buildUrl(self, media, page, cats):
|
||||
return (
|
||||
tryUrlencode('"%s"' % fireEvent('library.query', media['library'], single = True)),
|
||||
page,
|
||||
','.join(str(x) for x in cats)
|
||||
)
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class TorrentDay(TorrentProvider):
|
||||
class TorrentDay(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test': 'http://www.td.af/',
|
||||
@@ -16,25 +24,18 @@ class TorrentDay(TorrentProvider):
|
||||
'download': 'http://www.td.af/download.php/%s/%s',
|
||||
}
|
||||
|
||||
cat_ids = [
|
||||
([11], ['720p', '1080p']),
|
||||
([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
|
||||
([3], ['dvdr']),
|
||||
([5], ['bd50']),
|
||||
]
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
q = '"%s %s"' % (title, movie['library']['year'])
|
||||
query = self.buildUrl(media)
|
||||
|
||||
data = {
|
||||
'/browse.php?': None,
|
||||
'cata': 'yes',
|
||||
'jxt': 8,
|
||||
'jxw': 'b',
|
||||
'search': q,
|
||||
'search': query,
|
||||
}
|
||||
|
||||
data = self.getJsonData(self.urls['search'], data = data)
|
||||
@@ -66,3 +67,31 @@ class TorrentDay(TorrentProvider):
|
||||
|
||||
def loginCheckSuccess(self, output):
|
||||
return 'logout.php' in output.lower()
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([11], ['720p', '1080p']),
|
||||
([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
|
||||
([3], ['dvdr']),
|
||||
([5], ['bd50']),
|
||||
]
|
||||
def buildUrl(self, media):
|
||||
return fireEvent('library.query', media['library'], single = True)
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([14], ['hdtv_sd', 'hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
]
|
||||
def buildUrl(self, media):
|
||||
return fireEvent('library.query', media['library'], single = True)
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
cat_ids = [
|
||||
([7], ['hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
([2], [24], [26], ['hdtv_sd'])
|
||||
]
|
||||
def buildUrl(self, media):
|
||||
return fireEvent('library.query', media['library'], single = True)
|
||||
|
||||
|
||||
@@ -2,14 +2,20 @@ from bs4 import BeautifulSoup
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
import traceback
|
||||
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class TorrentLeech(MultiProvider):
|
||||
|
||||
class TorrentLeech(TorrentProvider):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test' : 'http://www.torrentleech.org/',
|
||||
@@ -20,22 +26,13 @@ class TorrentLeech(TorrentProvider):
|
||||
'download' : 'http://www.torrentleech.org%s',
|
||||
}
|
||||
|
||||
cat_ids = [
|
||||
([13], ['720p', '1080p']),
|
||||
([8], ['cam']),
|
||||
([9], ['ts', 'tc']),
|
||||
([10], ['r5', 'scr']),
|
||||
([11], ['dvdrip']),
|
||||
([14], ['brrip']),
|
||||
([12], ['dvdr']),
|
||||
]
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
cat_backup_id = None
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
url = self.urls['search'] % self.buildUrl(media, quality)
|
||||
|
||||
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
if data:
|
||||
@@ -68,14 +65,57 @@ class TorrentLeech(TorrentProvider):
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
def getLoginParams(self):
|
||||
return {
|
||||
return tryUrlencode({
|
||||
'username': self.conf('username'),
|
||||
'password': self.conf('password'),
|
||||
'remember_me': 'on',
|
||||
'login': 'submit',
|
||||
}
|
||||
})
|
||||
|
||||
def loginSuccess(self, output):
|
||||
return '/user/account/logout' in output.lower() or 'welcome back' in output.lower()
|
||||
|
||||
loginCheckSuccess = loginSuccess
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([13], ['720p', '1080p']),
|
||||
([8], ['cam']),
|
||||
([9], ['ts', 'tc']),
|
||||
([10], ['r5', 'scr']),
|
||||
([11], ['dvdrip']),
|
||||
([14], ['brrip']),
|
||||
([12], ['dvdr']),
|
||||
]
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
return (
|
||||
tryUrlencode(fireEvent('library.query', media['library'], single = True)),
|
||||
self.getCatId(quality['identifier'])[0]
|
||||
)
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([27], ['hdtv_sd', 'hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
]
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
return (
|
||||
tryUrlencode(fireEvent('library.query', media['library'], single = True)),
|
||||
self.getCatId(quality['identifier'])[0]
|
||||
)
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
cat_ids = [
|
||||
([32], ['hdtv_720p', 'webdl_720p', 'webdl_1080p']),
|
||||
([26], ['hdtv_sd'])
|
||||
]
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
return (
|
||||
tryUrlencode(fireEvent('library.query', media['library'], single = True)),
|
||||
self.getCatId(quality['identifier'])[0]
|
||||
)
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.variable import splitString, tryInt, tryFloat
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider, SeasonProvider, EpisodeProvider
|
||||
from couchpotato.core.providers.base import ResultList
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
from urlparse import urlparse
|
||||
@@ -9,37 +12,34 @@ import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class TorrentPotato(MultiProvider):
|
||||
|
||||
class TorrentPotato(TorrentProvider):
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {}
|
||||
limits_reached = {}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def search(self, movie, quality):
|
||||
def search(self, media, quality):
|
||||
hosts = self.getHosts()
|
||||
|
||||
results = ResultList(self, movie, quality, imdb_results = True)
|
||||
results = ResultList(self, media, quality, imdb_results = True)
|
||||
|
||||
for host in hosts:
|
||||
if self.isDisabled(host):
|
||||
continue
|
||||
|
||||
self._searchOnHost(host, movie, quality, results)
|
||||
self._searchOnHost(host, media, quality, results)
|
||||
|
||||
return results
|
||||
|
||||
def _searchOnHost(self, host, movie, quality, results):
|
||||
def _searchOnHost(self, host, media, quality, results):
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'user': host['name'],
|
||||
'passkey': host['pass_key'],
|
||||
'imdbid': movie['library']['identifier']
|
||||
})
|
||||
url = '%s?%s' % (host['host'], arguments)
|
||||
|
||||
torrents = self.getJsonData(url, cache_timeout = 1800)
|
||||
torrents = self.getJsonData(self.buildUrl(media, host), cache_timeout = 1800)
|
||||
|
||||
if torrents:
|
||||
try:
|
||||
@@ -110,7 +110,7 @@ class TorrentPotato(TorrentProvider):
|
||||
hosts = self.getHosts()
|
||||
|
||||
for host in hosts:
|
||||
result = super(TorrentPotato, self).belongsTo(url, host = host['host'], provider = provider)
|
||||
result = super(Base, self).belongsTo(url, host = host['host'], provider = provider)
|
||||
if result:
|
||||
return result
|
||||
|
||||
@@ -127,3 +127,33 @@ class TorrentPotato(TorrentProvider):
|
||||
return False
|
||||
|
||||
return TorrentProvider.isEnabled(self) and host['host'] and host['pass_key'] and int(host['use'])
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
|
||||
def buildUrl(self, media, host):
|
||||
arguments = tryUrlencode({
|
||||
'user': host['name'],
|
||||
'passkey': host['pass_key'],
|
||||
'imdbid': media['library']['identifier']
|
||||
})
|
||||
return '%s?%s' % (host['host'], arguments)
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
|
||||
def buildUrl(self, media, host):
|
||||
arguments = tryUrlencode({
|
||||
'user': host['name'],
|
||||
'passkey': host['pass_key'],
|
||||
'search': fireEvent('library.query', media['library'], single = True)
|
||||
})
|
||||
return '%s?%s' % (host['host'], arguments)
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
|
||||
def buildUrl(self, media, host):
|
||||
arguments = tryUrlencode({
|
||||
'user': host['name'],
|
||||
'passkey': host['pass_key'],
|
||||
'search': fireEvent('library.query', media['library'], single = True)
|
||||
})
|
||||
return '%s?%s' % (host['host'], arguments)
|
||||
|
||||
@@ -1,39 +1,37 @@
|
||||
from bs4 import BeautifulSoup
|
||||
from couchpotato.core.event import fireEvent
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import EpisodeProvider, SeasonProvider, MovieProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class TorrentShack(TorrentProvider):
|
||||
class TorrentShack(MultiProvider):
|
||||
|
||||
def getTypes(self):
|
||||
return [Movie, Season, Episode]
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test' : 'https://torrentshack.net/',
|
||||
'login' : 'https://torrentshack.net/login.php',
|
||||
'login_check': 'https://torrentshack.net/inbox.php',
|
||||
'detail' : 'https://torrentshack.net/torrent/%s',
|
||||
'search' : 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
|
||||
'search' : 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&filter_cat[%d]=1&scene=%s',
|
||||
'download' : 'https://torrentshack.net/%s',
|
||||
}
|
||||
|
||||
cat_ids = [
|
||||
([970], ['bd50']),
|
||||
([300], ['720p', '1080p']),
|
||||
([350], ['dvdr']),
|
||||
([400], ['brrip', 'dvdrip']),
|
||||
]
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
cat_backup_id = 400
|
||||
|
||||
def _searchOnTitle(self, title, movie, quality, results):
|
||||
def _search(self, media, quality, results):
|
||||
|
||||
scene_only = '1' if self.conf('scene_only') else ''
|
||||
|
||||
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), scene_only, self.getCatId(quality['identifier'])[0])
|
||||
url = self.urls['search'] % self.buildUrl(media, quality)
|
||||
data = self.getHTMLData(url)
|
||||
|
||||
if data:
|
||||
@@ -65,14 +63,76 @@ class TorrentShack(TorrentProvider):
|
||||
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
def getLoginParams(self):
|
||||
return {
|
||||
return tryUrlencode({
|
||||
'username': self.conf('username'),
|
||||
'password': self.conf('password'),
|
||||
'keeplogged': '1',
|
||||
'login': 'Login',
|
||||
}
|
||||
})
|
||||
|
||||
def loginSuccess(self, output):
|
||||
return 'logout.php' in output.lower()
|
||||
|
||||
loginCheckSuccess = loginSuccess
|
||||
|
||||
def getSceneOnly(self):
|
||||
return '1' if self.conf('scene_only') else ''
|
||||
|
||||
class Movie(MovieProvider, Base):
|
||||
# TorrentShack movie search categories
|
||||
# Movies/x264 - 300
|
||||
# Movies/DVD-R - 350
|
||||
# Movies/XviD - 400
|
||||
# Full Blu-ray - 970
|
||||
#
|
||||
# REMUX - 320 (not included)
|
||||
# Movies-HD Pack - 982 (not included)
|
||||
# Movies-SD Pack - 983 (not included)
|
||||
cat_ids = [
|
||||
([970], ['bd50']),
|
||||
([300], ['720p', '1080p']),
|
||||
([350], ['dvdr']),
|
||||
([400], ['brrip', 'dvdrip']),
|
||||
]
|
||||
cat_backup_id = 400
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = (tryUrlencode(fireEvent('library.query', media['library'], single = True)),
|
||||
self.getCatId(quality['identifier'])[0],
|
||||
self.getSceneOnly())
|
||||
return query
|
||||
|
||||
class Season(SeasonProvider, Base):
|
||||
# TorrentShack tv season search categories
|
||||
# TV-SD Pack - 980
|
||||
# TV-HD Pack - 981
|
||||
# Full Blu-ray - 970
|
||||
cat_ids = [
|
||||
([980], ['hdtv_sd']),
|
||||
([981], ['hdtv_720p', 'webdl_720p', 'webdl_1080p', 'bdrip_1080p', 'bdrip_720p', 'brrip_1080p', 'brrip_720p']),
|
||||
([970], ['bluray_1080p', 'bluray_720p']),
|
||||
]
|
||||
cat_backup_id = 980
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = (tryUrlencode(fireEvent('library.query', media['library'], single = True)),
|
||||
self.getCatId(quality['identifier'])[0],
|
||||
self.getSceneOnly())
|
||||
return query
|
||||
|
||||
class Episode(EpisodeProvider, Base):
|
||||
# TorrentShack tv episode search categories
|
||||
# TV/x264-HD - 600
|
||||
# TV/x264-SD - 620
|
||||
# TV/DVDrip - 700
|
||||
cat_ids = [
|
||||
([600], ['hdtv_720p', 'webdl_720p', 'webdl_1080p', 'bdrip_1080p', 'bdrip_720p', 'brrip_1080p', 'brrip_720p']),
|
||||
([620], ['hdtv_sd'])
|
||||
]
|
||||
cat_backup_id = 620
|
||||
|
||||
def buildUrl(self, media, quality):
|
||||
query = (tryUrlencode(fireEvent('library.query', media['library'], single = True)),
|
||||
self.getCatId(quality['identifier'])[0],
|
||||
self.getSceneOnly())
|
||||
return query
|
||||
|
||||
@@ -18,12 +18,6 @@ config = [{
|
||||
'type': 'enabler',
|
||||
'default': 0
|
||||
},
|
||||
{
|
||||
'name': 'domain',
|
||||
'advanced': True,
|
||||
'label': 'Proxy server',
|
||||
'description': 'Domain for requests, keep empty to let CouchPotato pick.',
|
||||
},
|
||||
{
|
||||
'name': 'seed_ratio',
|
||||
'label': 'Seed ratio',
|
||||
|
||||
@@ -1,40 +1,37 @@
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
|
||||
from couchpotato.core.providers.base import MultiProvider
|
||||
from couchpotato.core.providers.info.base import MovieProvider
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
class Yify(MultiProvider):
|
||||
|
||||
class Yify(TorrentMagnetProvider):
|
||||
def getTypes(self):
|
||||
return [Movie]
|
||||
|
||||
class Base(TorrentProvider):
|
||||
|
||||
urls = {
|
||||
'test' : '%s/api',
|
||||
'search' : '%s/api/list.json?keywords=%s&quality=%s',
|
||||
'detail': '%s/api/movie.json?id=%s'
|
||||
'test' : 'https://yify-torrents.com/api',
|
||||
'search' : 'https://yify-torrents.com/api/list.json?keywords=%s&quality=%s',
|
||||
'detail': 'https://yify-torrents.com/api/movie.json?id=%s'
|
||||
}
|
||||
|
||||
http_time_between_calls = 1 #seconds
|
||||
|
||||
proxy_list = [
|
||||
'https://yify-torrents.im',
|
||||
'http://yify.unlocktorrent.com',
|
||||
'http://yify.ftwnet.co.uk',
|
||||
'http://yify-torrents.com.come.in',
|
||||
]
|
||||
|
||||
def search(self, movie, quality):
|
||||
|
||||
if not quality.get('hd', False):
|
||||
return []
|
||||
|
||||
return super(Yify, self).search(movie, quality)
|
||||
return super(Base, self).search(movie, quality)
|
||||
|
||||
def _search(self, movie, quality, results):
|
||||
|
||||
search_url = self.urls['search'] % (self.getDomain(), movie['library']['identifier'], quality['identifier'])
|
||||
|
||||
data = self.getJsonData(search_url)
|
||||
data = self.getJsonData(self.urls['search'] % (movie['library']['identifier'], quality['identifier']))
|
||||
|
||||
if data and data.get('MovieList'):
|
||||
try:
|
||||
@@ -50,8 +47,8 @@ class Yify(TorrentMagnetProvider):
|
||||
results.append({
|
||||
'id': result['MovieID'],
|
||||
'name': title,
|
||||
'url': result['TorrentMagnetUrl'],
|
||||
'detail_url': self.urls['detail'] % (self.getDomain(),result['MovieID']),
|
||||
'url': result['TorrentUrl'],
|
||||
'detail_url': self.urls['detail'] % result['MovieID'],
|
||||
'size': self.parseSize(result['Size']),
|
||||
'seeders': tryInt(result['TorrentSeeds']),
|
||||
'leechers': tryInt(result['TorrentPeers'])
|
||||
@@ -60,5 +57,5 @@ class Yify(TorrentMagnetProvider):
|
||||
except:
|
||||
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
|
||||
|
||||
def correctProxy(self, data):
|
||||
return 'title="YIFY-Torrents RSS feed"' in data
|
||||
class Movie(MovieProvider, Base):
|
||||
pass
|
||||
@@ -1,3 +1,5 @@
|
||||
from UserDict import DictMixin
|
||||
from collections import OrderedDict
|
||||
from couchpotato.core.helpers.encoding import toUnicode
|
||||
from elixir.entity import Entity
|
||||
from elixir.fields import Field
|
||||
@@ -72,13 +74,13 @@ class MutableDict(Mutable, dict):
|
||||
MutableDict.associate_with(JsonType)
|
||||
|
||||
|
||||
class Movie(Entity):
|
||||
"""Movie Resource a movie could have multiple releases
|
||||
The files belonging to the movie object are global for the whole movie
|
||||
class Media(Entity):
|
||||
"""Media Resource could have multiple releases
|
||||
The files belonging to the media object are global for the whole media
|
||||
such as trailers, nfo, thumbnails"""
|
||||
|
||||
type = Field(String(10), default = "movie", index = True)
|
||||
last_edit = Field(Integer, default = lambda: int(time.time()), index = True)
|
||||
type = 'movie' # Compat tv branch
|
||||
|
||||
library = ManyToOne('Library', cascade = 'delete, delete-orphan', single_parent = True)
|
||||
status = ManyToOne('Status')
|
||||
@@ -87,23 +89,206 @@ class Movie(Entity):
|
||||
releases = OneToMany('Release', cascade = 'all, delete-orphan')
|
||||
files = ManyToMany('File', cascade = 'all, delete-orphan', single_parent = True)
|
||||
|
||||
Media = Movie # Compat tv branch
|
||||
|
||||
|
||||
class Library(Entity):
|
||||
""""""
|
||||
using_options(inheritance = 'multi')
|
||||
|
||||
# For Movies, CPS uses three: omdbapi (no prio !?), tmdb (prio 2) and couchpotatoapi (prio 1)
|
||||
type = Field(String(10), default = "movie", index = True)
|
||||
primary_provider = Field(String(10), default = "imdb", index = True)
|
||||
year = Field(Integer)
|
||||
identifier = Field(String(20), index = True)
|
||||
identifier = Field(String(40), index = True)
|
||||
|
||||
plot = Field(UnicodeText)
|
||||
tagline = Field(UnicodeText(255))
|
||||
info = Field(JsonType)
|
||||
|
||||
status = ManyToOne('Status')
|
||||
movies = OneToMany('Movie', cascade = 'all, delete-orphan')
|
||||
media = OneToMany('Media', cascade = 'all, delete-orphan')
|
||||
titles = OneToMany('LibraryTitle', cascade = 'all, delete-orphan')
|
||||
files = ManyToMany('File', cascade = 'all, delete-orphan', single_parent = True)
|
||||
|
||||
parent = ManyToOne('Library')
|
||||
children = OneToMany('Library')
|
||||
|
||||
def getRelated(self, include_parents = True, include_self = True, include_children = True, merge=False):
|
||||
libraries = []
|
||||
|
||||
if include_parents and self.parent is not None:
|
||||
libraries += self.parent.getRelated(include_children = False)
|
||||
|
||||
if include_self:
|
||||
libraries += [(self.type, self)]
|
||||
|
||||
if include_children:
|
||||
for child in self.children:
|
||||
libraries += child.getRelated(include_parents = False)
|
||||
|
||||
# Return plain results if we aren't merging the results
|
||||
if not merge:
|
||||
return libraries
|
||||
|
||||
# Merge the results into a dict ({type: [<library>,...]})
|
||||
root_key = None
|
||||
results = {}
|
||||
|
||||
for key, library in libraries:
|
||||
if root_key is None:
|
||||
root_key = key
|
||||
|
||||
if key not in results:
|
||||
results[key] = []
|
||||
|
||||
results[key].append(library)
|
||||
|
||||
return root_key, results
|
||||
|
||||
def to_dict(self, deep = None, exclude = None):
|
||||
if not exclude: exclude = []
|
||||
if not deep: deep = {}
|
||||
|
||||
include_related = False
|
||||
include_root = False
|
||||
|
||||
if any(x in deep for x in ['related_libraries', 'root_library']):
|
||||
deep = deep.copy()
|
||||
|
||||
include_related = deep.pop('related_libraries', None) is not None
|
||||
include_root = deep.pop('root_library', None) is not None
|
||||
|
||||
orig_dict = super(Library, self).to_dict(deep = deep, exclude = exclude)
|
||||
|
||||
# Include related libraries (parents and children)
|
||||
if include_related:
|
||||
# Fetch child and parent libraries and determine root type
|
||||
root_key, related_libraries = self.getRelated(include_self = False, merge=True)
|
||||
|
||||
# Serialize libraries
|
||||
related_libraries = dict([
|
||||
(key, [library.to_dict(deep, exclude) for library in libraries])
|
||||
for (key, libraries) in related_libraries.items()
|
||||
])
|
||||
|
||||
# Add a reference to the current library dict into related_libraries
|
||||
if orig_dict['type'] not in related_libraries:
|
||||
related_libraries[orig_dict['type']] = []
|
||||
|
||||
related_libraries[orig_dict['type']].append(orig_dict)
|
||||
|
||||
# Update the dict for this library
|
||||
orig_dict['related_libraries'] = related_libraries
|
||||
|
||||
if include_root:
|
||||
root_library = related_libraries.get(root_key)
|
||||
orig_dict['root_library'] = root_library[0] if root_library else None
|
||||
|
||||
# Add references to children
|
||||
for key, libraries in related_libraries.items():
|
||||
for library in libraries:
|
||||
# Add related_libraries
|
||||
library['related_libraries'] = orig_dict['related_libraries']
|
||||
|
||||
# Add root_library
|
||||
library['root_library'] = orig_dict['root_library']
|
||||
|
||||
return orig_dict
|
||||
|
||||
|
||||
class ShowLibrary(Library, DictMixin):
|
||||
using_options(inheritance = 'multi')
|
||||
|
||||
last_updated = Field(Integer, index = True)
|
||||
show_status = Field(String(10), index = True)
|
||||
|
||||
# XXX: Maybe we should convert this to seconds?
|
||||
# airs_time u'21:00'
|
||||
airs_time = Field(Unicode, index = True)
|
||||
|
||||
# airs_dayofweek = Field(Integer, index = True)
|
||||
# u'Monday': 1,
|
||||
# u'Tuesday': 2,
|
||||
# u'Wednesday': 4,
|
||||
# u'Thursday': 8,
|
||||
# u'Friday': 16,
|
||||
# u'Saturday': 32,
|
||||
# u'Sunday': 64,
|
||||
# u'Daily': 127,
|
||||
airs_dayofweek = Field(Integer, index = True)
|
||||
|
||||
def getSeasons(self):
|
||||
data = OrderedDict()
|
||||
for c in self.children:
|
||||
data[c.season_number] = c
|
||||
return data
|
||||
|
||||
def getEpisodes(self, season_number):
|
||||
data = OrderedDict()
|
||||
for c in self.children[season_number].children:
|
||||
data[c.episode_number] = c
|
||||
return data
|
||||
|
||||
# Read access to season by number: library[1] for season 1
|
||||
data = {}
|
||||
def __getitem__(self, key):
|
||||
if not self.data:
|
||||
self.setData()
|
||||
if key in self.data:
|
||||
return self.data[key]
|
||||
if hasattr(self.__class__, "__missing__"):
|
||||
return self.__class__.__missing__(self, key)
|
||||
raise KeyError(key)
|
||||
def get(self, key, failobj = None):
|
||||
if key not in self:
|
||||
return failobj
|
||||
return self[key]
|
||||
def keys(self): return self.data.keys()
|
||||
def setData(self):
|
||||
for c in self.children:
|
||||
self.data[c.season_number] = c
|
||||
|
||||
|
||||
class SeasonLibrary(Library, DictMixin):
|
||||
using_options(inheritance = 'multi')
|
||||
|
||||
season_number = Field(Integer, index = True)
|
||||
last_updated = Field(Integer, index = True)
|
||||
|
||||
def getEpisodes(self):
|
||||
data = OrderedDict()
|
||||
for c in self.children:
|
||||
data[c.episode_number] = c
|
||||
return data
|
||||
|
||||
# Read access episode by number: library[1][4] for season 1, episode 4
|
||||
data = {}
|
||||
def __getitem__(self, key):
|
||||
if not self.data:
|
||||
self.setData()
|
||||
if key in self.data:
|
||||
return self.data[key]
|
||||
if hasattr(self.__class__, "__missing__"):
|
||||
return self.__class__.__missing__(self, key)
|
||||
raise KeyError(key)
|
||||
def get(self, key, failobj = None):
|
||||
if key not in self:
|
||||
return failobj
|
||||
return self[key]
|
||||
def keys(self): return self.data.keys()
|
||||
def setData(self):
|
||||
for c in self.children:
|
||||
self.data[c.episode_number] = c
|
||||
|
||||
|
||||
class EpisodeLibrary(Library):
|
||||
using_options(inheritance = 'multi')
|
||||
|
||||
last_updated = Field(Integer, index = True)
|
||||
season_number = Field(Integer, index = True)
|
||||
episode_number = Field(Integer, index = True)
|
||||
absolute_number = Field(Integer, index = True)
|
||||
|
||||
|
||||
class LibraryTitle(Entity):
|
||||
""""""
|
||||
@@ -133,7 +318,7 @@ class Release(Entity):
|
||||
last_edit = Field(Integer, default = lambda: int(time.time()), index = True)
|
||||
identifier = Field(String(100), index = True)
|
||||
|
||||
movie = ManyToOne('Movie')
|
||||
media = ManyToOne('Media')
|
||||
status = ManyToOne('Status')
|
||||
quality = ManyToOne('Quality')
|
||||
files = ManyToMany('File')
|
||||
@@ -175,7 +360,6 @@ class Status(Entity):
|
||||
label = Field(Unicode(20))
|
||||
|
||||
releases = OneToMany('Release')
|
||||
movies = OneToMany('Movie')
|
||||
|
||||
|
||||
class Quality(Entity):
|
||||
@@ -202,7 +386,7 @@ class Profile(Entity):
|
||||
core = Field(Boolean, default = False)
|
||||
hide = Field(Boolean, default = False)
|
||||
|
||||
movie = OneToMany('Movie')
|
||||
media = OneToMany('Media')
|
||||
types = OneToMany('ProfileType', cascade = 'all, delete-orphan')
|
||||
|
||||
def to_dict(self, deep = None, exclude = None):
|
||||
@@ -226,7 +410,8 @@ class Category(Entity):
|
||||
ignored = Field(Unicode(255))
|
||||
destination = Field(Unicode(255))
|
||||
|
||||
movie = OneToMany('Movie')
|
||||
media = OneToMany('Media')
|
||||
destination = Field(Unicode(255))
|
||||
|
||||
|
||||
class ProfileType(Entity):
|
||||
@@ -252,7 +437,7 @@ class File(Entity):
|
||||
properties = OneToMany('FileProperty')
|
||||
|
||||
history = OneToMany('RenameHistory')
|
||||
movie = ManyToMany('Movie')
|
||||
media = ManyToMany('Media')
|
||||
release = ManyToMany('Release')
|
||||
library = ManyToMany('Library')
|
||||
|
||||
|
||||
@@ -28,6 +28,8 @@ def getOptions(base_path, args):
|
||||
dest = 'config_file', help = 'Absolute or ~/ path of the settings file (default DATA_DIR/settings.conf)')
|
||||
parser.add_argument('--debug', action = 'store_true',
|
||||
dest = 'debug', help = 'Debug mode')
|
||||
parser.add_argument('--noreloader', action = 'store_false',
|
||||
dest = 'noreloader', help = 'Reloader mode')
|
||||
parser.add_argument('--console_log', action = 'store_true',
|
||||
dest = 'console_log', help = "Log to console")
|
||||
parser.add_argument('--quiet', action = 'store_true',
|
||||
@@ -80,7 +82,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
|
||||
Env.set('encoding', encoding)
|
||||
|
||||
# Do db stuff
|
||||
db_path = toUnicode(os.path.join(data_dir, 'couchpotato.db'))
|
||||
db_path = toUnicode(os.path.join(data_dir, 'couchpotato_v2.db'))
|
||||
|
||||
# Backup before start and cleanup old databases
|
||||
new_backup = toUnicode(os.path.join(data_dir, 'db_backup', str(int(time.time()))))
|
||||
@@ -152,7 +154,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
|
||||
logging.getLogger(logger_name).setLevel(logging.WARNING)
|
||||
|
||||
# Use reloader
|
||||
reloader = debug is True and development and not Env.get('desktop') and not options.daemon
|
||||
reloader = debug is True and development and not Env.get('desktop') and not options.daemon and options.noreloader is True
|
||||
|
||||
# Logger
|
||||
logger = logging.getLogger()
|
||||
|
||||
@@ -593,4 +593,4 @@ var createSpinner = function(target, options){
|
||||
}, options);
|
||||
|
||||
return new Spinner(opts).spin(target);
|
||||
};
|
||||
};
|
||||
|
||||
195
libs/caper/__init__.py
Normal file
195
libs/caper/__init__.py
Normal file
@@ -0,0 +1,195 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from logr import Logr
|
||||
from caper.matcher import FragmentMatcher
|
||||
from caper.objects import CaperFragment, CaperClosure
|
||||
from caper.parsers.anime import AnimeParser
|
||||
from caper.parsers.scene import SceneParser
|
||||
from caper.parsers.usenet import UsenetParser
|
||||
|
||||
|
||||
__version_info__ = ('0', '3', '1')
|
||||
__version_branch__ = 'master'
|
||||
|
||||
__version__ = "%s%s" % (
|
||||
'.'.join(__version_info__),
|
||||
'-' + __version_branch__ if __version_branch__ else ''
|
||||
)
|
||||
|
||||
|
||||
CL_START_CHARS = ['(', '[', '<', '>']
|
||||
CL_END_CHARS = [')', ']', '<', '>']
|
||||
CL_END_STRINGS = [' - ']
|
||||
|
||||
STRIP_START_CHARS = ''.join(CL_START_CHARS)
|
||||
STRIP_END_CHARS = ''.join(CL_END_CHARS)
|
||||
STRIP_CHARS = ''.join(['_', ' ', '.'])
|
||||
|
||||
FRAGMENT_SEPARATORS = ['.', '-', '_', ' ']
|
||||
|
||||
|
||||
CL_START = 0
|
||||
CL_END = 1
|
||||
|
||||
|
||||
class Caper(object):
|
||||
def __init__(self, debug=False):
|
||||
self.debug = debug
|
||||
|
||||
self.parsers = {
|
||||
'anime': AnimeParser,
|
||||
'scene': SceneParser,
|
||||
'usenet': UsenetParser
|
||||
}
|
||||
|
||||
def _closure_split(self, name):
|
||||
"""
|
||||
:type name: str
|
||||
|
||||
:rtype: list of CaperClosure
|
||||
"""
|
||||
|
||||
closures = []
|
||||
|
||||
def end_closure(closures, buf):
|
||||
buf = buf.strip(STRIP_CHARS)
|
||||
if len(buf) < 2:
|
||||
return
|
||||
|
||||
cur = CaperClosure(len(closures), buf)
|
||||
cur.left = closures[len(closures) - 1] if len(closures) > 0 else None
|
||||
|
||||
if cur.left:
|
||||
cur.left.right = cur
|
||||
|
||||
closures.append(cur)
|
||||
|
||||
state = CL_START
|
||||
buf = ""
|
||||
for x, ch in enumerate(name):
|
||||
# Check for start characters
|
||||
if state == CL_START and ch in CL_START_CHARS:
|
||||
end_closure(closures, buf)
|
||||
|
||||
state = CL_END
|
||||
buf = ""
|
||||
|
||||
buf += ch
|
||||
|
||||
if state == CL_END and ch in CL_END_CHARS:
|
||||
# End character found, create the closure
|
||||
end_closure(closures, buf)
|
||||
|
||||
state = CL_START
|
||||
buf = ""
|
||||
elif state == CL_START and buf[-3:] in CL_END_STRINGS:
|
||||
# End string found, create the closure
|
||||
end_closure(closures, buf[:-3])
|
||||
|
||||
state = CL_START
|
||||
buf = ""
|
||||
|
||||
end_closure(closures, buf)
|
||||
|
||||
return closures
|
||||
|
||||
def _clean_closure(self, closure):
|
||||
"""
|
||||
:type closure: str
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
return closure.lstrip(STRIP_START_CHARS).rstrip(STRIP_END_CHARS)
|
||||
|
||||
def _fragment_split(self, closures):
|
||||
"""
|
||||
:type closures: list of CaperClosure
|
||||
|
||||
:rtype: list of CaperClosure
|
||||
"""
|
||||
|
||||
cur_position = 0
|
||||
cur = None
|
||||
|
||||
def end_fragment(fragments, cur, cur_position):
|
||||
cur.position = cur_position
|
||||
|
||||
cur.left = fragments[len(fragments) - 1] if len(fragments) > 0 else None
|
||||
if cur.left:
|
||||
cur.left_sep = cur.left.right_sep
|
||||
cur.left.right = cur
|
||||
|
||||
cur.right_sep = ch
|
||||
|
||||
fragments.append(cur)
|
||||
|
||||
for closure in closures:
|
||||
closure.fragments = []
|
||||
|
||||
separator_buffer = ""
|
||||
|
||||
for x, ch in enumerate(self._clean_closure(closure.value)):
|
||||
if not cur:
|
||||
cur = CaperFragment(closure)
|
||||
|
||||
if ch in FRAGMENT_SEPARATORS:
|
||||
if cur.value:
|
||||
separator_buffer = ""
|
||||
|
||||
separator_buffer += ch
|
||||
|
||||
if cur.value or not closure.fragments:
|
||||
end_fragment(closure.fragments, cur, cur_position)
|
||||
elif len(separator_buffer) > 1:
|
||||
cur.value = separator_buffer.strip()
|
||||
|
||||
if cur.value:
|
||||
end_fragment(closure.fragments, cur, cur_position)
|
||||
|
||||
separator_buffer = ""
|
||||
|
||||
# Reset
|
||||
cur = None
|
||||
cur_position += 1
|
||||
else:
|
||||
cur.value += ch
|
||||
|
||||
# Finish parsing the last fragment
|
||||
if cur and cur.value:
|
||||
end_fragment(closure.fragments, cur, cur_position)
|
||||
|
||||
# Reset
|
||||
cur_position = 0
|
||||
cur = None
|
||||
|
||||
return closures
|
||||
|
||||
def parse(self, name, parser='scene'):
|
||||
closures = self._closure_split(name)
|
||||
closures = self._fragment_split(closures)
|
||||
|
||||
# Print closures
|
||||
for closure in closures:
|
||||
Logr.debug("closure [%s]", closure.value)
|
||||
|
||||
for fragment in closure.fragments:
|
||||
Logr.debug("\tfragment [%s]", fragment.value)
|
||||
|
||||
if parser not in self.parsers:
|
||||
raise ValueError("Unknown parser")
|
||||
|
||||
# TODO autodetect the parser type
|
||||
return self.parsers[parser](self.debug).run(closures)
|
||||
134
libs/caper/constraint.py
Normal file
134
libs/caper/constraint.py
Normal file
@@ -0,0 +1,134 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class CaptureConstraint(object):
|
||||
def __init__(self, capture_group, constraint_type, comparisons=None, target=None, **kwargs):
|
||||
"""Capture constraint object
|
||||
|
||||
:type capture_group: CaptureGroup
|
||||
"""
|
||||
|
||||
self.capture_group = capture_group
|
||||
|
||||
self.constraint_type = constraint_type
|
||||
self.target = target
|
||||
|
||||
self.comparisons = comparisons if comparisons else []
|
||||
self.kwargs = {}
|
||||
|
||||
for orig_key, value in kwargs.items():
|
||||
key = orig_key.split('__')
|
||||
if len(key) != 2:
|
||||
self.kwargs[orig_key] = value
|
||||
continue
|
||||
name, method = key
|
||||
|
||||
method = 'constraint_match_' + method
|
||||
if not hasattr(self, method):
|
||||
self.kwargs[orig_key] = value
|
||||
continue
|
||||
|
||||
self.comparisons.append((name, getattr(self, method), value))
|
||||
|
||||
def execute(self, parent_node, node, **kwargs):
|
||||
func_name = 'constraint_%s' % self.constraint_type
|
||||
|
||||
if hasattr(self, func_name):
|
||||
return getattr(self, func_name)(parent_node, node, **kwargs)
|
||||
|
||||
raise ValueError('Unknown constraint type "%s"' % self.constraint_type)
|
||||
|
||||
#
|
||||
# Node Matching
|
||||
#
|
||||
|
||||
def constraint_match(self, parent_node, node):
|
||||
results = []
|
||||
total_weight = 0
|
||||
|
||||
for name, method, argument in self.comparisons:
|
||||
weight, success = method(node, name, argument)
|
||||
total_weight += weight
|
||||
results.append(success)
|
||||
|
||||
return total_weight / (float(len(results)) or 1), all(results) if len(results) > 0 else False
|
||||
|
||||
def constraint_match_eq(self, node, name, expected):
|
||||
if not hasattr(node, name):
|
||||
return 1.0, False
|
||||
|
||||
return 1.0, getattr(node, name) == expected
|
||||
|
||||
def constraint_match_re(self, node, name, arg):
|
||||
# Node match
|
||||
if name == 'node':
|
||||
group, minimum_weight = arg if type(arg) is tuple and len(arg) > 1 else (arg, 0)
|
||||
|
||||
weight, match, num_fragments = self.capture_group.parser.matcher.fragment_match(node, group)
|
||||
return weight, weight > minimum_weight
|
||||
|
||||
# Regex match
|
||||
if type(arg).__name__ == 'SRE_Pattern':
|
||||
return 1.0, arg.match(getattr(node, name)) is not None
|
||||
|
||||
# Value match
|
||||
if hasattr(node, name):
|
||||
match = self.capture_group.parser.matcher.value_match(getattr(node, name), arg, single=True)
|
||||
return 1.0, match is not None
|
||||
|
||||
raise ValueError("Unknown constraint match type '%s'" % name)
|
||||
|
||||
#
|
||||
# Result
|
||||
#
|
||||
|
||||
def constraint_result(self, parent_node, fragment):
|
||||
ctag = self.kwargs.get('tag')
|
||||
if not ctag:
|
||||
return 0, False
|
||||
|
||||
ckey = self.kwargs.get('key')
|
||||
|
||||
for tag, result in parent_node.captured():
|
||||
if tag != ctag:
|
||||
continue
|
||||
|
||||
if not ckey or ckey in result.keys():
|
||||
return 1.0, True
|
||||
|
||||
return 0.0, False
|
||||
|
||||
#
|
||||
# Failure
|
||||
#
|
||||
|
||||
def constraint_failure(self, parent_node, fragment, match):
|
||||
if not match or not match.success:
|
||||
return 1.0, True
|
||||
|
||||
return 0, False
|
||||
|
||||
#
|
||||
# Success
|
||||
#
|
||||
|
||||
def constraint_success(self, parent_node, fragment, match):
|
||||
if match and match.success:
|
||||
return 1.0, True
|
||||
|
||||
return 0, False
|
||||
|
||||
def __repr__(self):
|
||||
return "CaptureConstraint(comparisons=%s)" % repr(self.comparisons)
|
||||
284
libs/caper/group.py
Normal file
284
libs/caper/group.py
Normal file
@@ -0,0 +1,284 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from logr import Logr
|
||||
from caper import CaperClosure, CaperFragment
|
||||
from caper.helpers import clean_dict
|
||||
from caper.result import CaperFragmentNode, CaperClosureNode
|
||||
from caper.step import CaptureStep
|
||||
from caper.constraint import CaptureConstraint
|
||||
|
||||
|
||||
class CaptureGroup(object):
|
||||
def __init__(self, parser, result):
|
||||
"""Capture group object
|
||||
|
||||
:type parser: caper.parsers.base.Parser
|
||||
:type result: caper.result.CaperResult
|
||||
"""
|
||||
|
||||
self.parser = parser
|
||||
self.result = result
|
||||
|
||||
#: @type: list of CaptureStep
|
||||
self.steps = []
|
||||
|
||||
#: type: str
|
||||
self.step_source = None
|
||||
|
||||
#: @type: list of CaptureConstraint
|
||||
self.pre_constraints = []
|
||||
|
||||
#: :type: list of CaptureConstraint
|
||||
self.post_constraints = []
|
||||
|
||||
def capture_fragment(self, tag, regex=None, func=None, single=True, **kwargs):
|
||||
Logr.debug('capture_fragment("%s", "%s", %s, %s)', tag, regex, func, single)
|
||||
|
||||
if self.step_source != 'fragment':
|
||||
if self.step_source is None:
|
||||
self.step_source = 'fragment'
|
||||
else:
|
||||
raise ValueError("Unable to mix fragment and closure capturing in a group")
|
||||
|
||||
self.steps.append(CaptureStep(
|
||||
self, tag,
|
||||
'fragment',
|
||||
regex=regex,
|
||||
func=func,
|
||||
single=single,
|
||||
**kwargs
|
||||
))
|
||||
|
||||
return self
|
||||
|
||||
def capture_closure(self, tag, regex=None, func=None, single=True, **kwargs):
|
||||
Logr.debug('capture_closure("%s", "%s", %s, %s)', tag, regex, func, single)
|
||||
|
||||
if self.step_source != 'closure':
|
||||
if self.step_source is None:
|
||||
self.step_source = 'closure'
|
||||
else:
|
||||
raise ValueError("Unable to mix fragment and closure capturing in a group")
|
||||
|
||||
self.steps.append(CaptureStep(
|
||||
self, tag,
|
||||
'closure',
|
||||
regex=regex,
|
||||
func=func,
|
||||
single=single,
|
||||
**kwargs
|
||||
))
|
||||
|
||||
return self
|
||||
|
||||
def until_closure(self, **kwargs):
|
||||
self.pre_constraints.append(CaptureConstraint(self, 'match', target='closure', **kwargs))
|
||||
|
||||
return self
|
||||
|
||||
def until_fragment(self, **kwargs):
|
||||
self.pre_constraints.append(CaptureConstraint(self, 'match', target='fragment', **kwargs))
|
||||
|
||||
return self
|
||||
|
||||
def until_result(self, **kwargs):
|
||||
self.pre_constraints.append(CaptureConstraint(self, 'result', **kwargs))
|
||||
|
||||
return self
|
||||
|
||||
def until_failure(self, **kwargs):
|
||||
self.post_constraints.append(CaptureConstraint(self, 'failure', **kwargs))
|
||||
|
||||
return self
|
||||
|
||||
def until_success(self, **kwargs):
|
||||
self.post_constraints.append(CaptureConstraint(self, 'success', **kwargs))
|
||||
|
||||
return self
|
||||
|
||||
def parse_subject(self, parent_head, subject):
|
||||
Logr.debug("parse_subject (%s) subject: %s", self.step_source, repr(subject))
|
||||
|
||||
if type(subject) is CaperClosure:
|
||||
return self.parse_closure(parent_head, subject)
|
||||
|
||||
if type(subject) is CaperFragment:
|
||||
return self.parse_fragment(parent_head, subject)
|
||||
|
||||
raise ValueError('Unknown subject (%s)', subject)
|
||||
|
||||
def parse_fragment(self, parent_head, subject):
|
||||
parent_node = parent_head[0] if type(parent_head) is list else parent_head
|
||||
|
||||
nodes, match = self.match(parent_head, parent_node, subject)
|
||||
|
||||
# Capturing broke on constraint, return now
|
||||
if not match:
|
||||
return nodes
|
||||
|
||||
Logr.debug('created fragment node with subject.value: "%s"' % subject.value)
|
||||
|
||||
result = [CaperFragmentNode(
|
||||
parent_node.closure,
|
||||
subject.take_right(match.num_fragments),
|
||||
parent_head,
|
||||
match
|
||||
)]
|
||||
|
||||
# Branch if the match was indefinite (weight below 1.0)
|
||||
if match.result and match.weight < 1.0:
|
||||
if match.num_fragments == 1:
|
||||
result.append(CaperFragmentNode(parent_node.closure, [subject], parent_head))
|
||||
else:
|
||||
nodes.append(CaperFragmentNode(parent_node.closure, [subject], parent_head))
|
||||
|
||||
nodes.append(result[0] if len(result) == 1 else result)
|
||||
|
||||
return nodes
|
||||
|
||||
def parse_closure(self, parent_head, subject):
|
||||
parent_node = parent_head[0] if type(parent_head) is list else parent_head
|
||||
|
||||
nodes, match = self.match(parent_head, parent_node, subject)
|
||||
|
||||
# Capturing broke on constraint, return now
|
||||
if not match:
|
||||
return nodes
|
||||
|
||||
Logr.debug('created closure node with subject.value: "%s"' % subject.value)
|
||||
|
||||
result = [CaperClosureNode(
|
||||
subject,
|
||||
parent_head,
|
||||
match
|
||||
)]
|
||||
|
||||
# Branch if the match was indefinite (weight below 1.0)
|
||||
if match.result and match.weight < 1.0:
|
||||
if match.num_fragments == 1:
|
||||
result.append(CaperClosureNode(subject, parent_head))
|
||||
else:
|
||||
nodes.append(CaperClosureNode(subject, parent_head))
|
||||
|
||||
nodes.append(result[0] if len(result) == 1 else result)
|
||||
|
||||
return nodes
|
||||
|
||||
def match(self, parent_head, parent_node, subject):
|
||||
nodes = []
|
||||
|
||||
# Check pre constaints
|
||||
broke, definite = self.check_constraints(self.pre_constraints, parent_head, subject)
|
||||
|
||||
if broke:
|
||||
nodes.append(parent_head)
|
||||
|
||||
if definite:
|
||||
return nodes, None
|
||||
|
||||
# Try match subject against the steps available
|
||||
match = None
|
||||
|
||||
for step in self.steps:
|
||||
if step.source == 'closure' and type(subject) is not CaperClosure:
|
||||
pass
|
||||
elif step.source == 'fragment' and type(subject) is CaperClosure:
|
||||
Logr.debug('Closure encountered on fragment step, jumping into fragments')
|
||||
return [CaperClosureNode(subject, parent_head, None)], None
|
||||
|
||||
match = step.execute(subject)
|
||||
|
||||
if match.success:
|
||||
if type(match.result) is dict:
|
||||
match.result = clean_dict(match.result)
|
||||
|
||||
Logr.debug('Found match with weight %s, match: %s, num_fragments: %s' % (
|
||||
match.weight, match.result, match.num_fragments
|
||||
))
|
||||
|
||||
step.matched = True
|
||||
|
||||
break
|
||||
|
||||
if all([step.single and step.matched for step in self.steps]):
|
||||
Logr.debug('All steps completed, group finished')
|
||||
parent_node.finished_groups.append(self)
|
||||
return nodes, match
|
||||
|
||||
# Check post constraints
|
||||
broke, definite = self.check_constraints(self.post_constraints, parent_head, subject, match=match)
|
||||
if broke:
|
||||
return nodes, None
|
||||
|
||||
return nodes, match
|
||||
|
||||
def check_constraints(self, constraints, parent_head, subject, **kwargs):
|
||||
parent_node = parent_head[0] if type(parent_head) is list else parent_head
|
||||
|
||||
# Check constraints
|
||||
for constraint in [c for c in constraints if c.target == subject.__key__ or not c.target]:
|
||||
Logr.debug("Testing constraint %s against subject %s", repr(constraint), repr(subject))
|
||||
|
||||
weight, success = constraint.execute(parent_node, subject, **kwargs)
|
||||
|
||||
if success:
|
||||
Logr.debug('capturing broke on "%s" at %s', subject.value, constraint)
|
||||
parent_node.finished_groups.append(self)
|
||||
|
||||
return True, weight == 1.0
|
||||
|
||||
return False, None
|
||||
|
||||
def execute(self):
|
||||
heads_finished = None
|
||||
|
||||
while heads_finished is None or not (len(heads_finished) == len(self.result.heads) and all(heads_finished)):
|
||||
heads_finished = []
|
||||
|
||||
heads = self.result.heads
|
||||
self.result.heads = []
|
||||
|
||||
for head in heads:
|
||||
node = head[0] if type(head) is list else head
|
||||
|
||||
if self in node.finished_groups:
|
||||
Logr.debug("head finished for group")
|
||||
self.result.heads.append(head)
|
||||
heads_finished.append(True)
|
||||
continue
|
||||
|
||||
Logr.debug('')
|
||||
|
||||
Logr.debug(node)
|
||||
|
||||
next_subject = node.next()
|
||||
|
||||
Logr.debug('----------[%s] (%s)----------' % (next_subject, repr(next_subject.value) if next_subject else None))
|
||||
|
||||
if next_subject:
|
||||
for node_result in self.parse_subject(head, next_subject):
|
||||
self.result.heads.append(node_result)
|
||||
|
||||
Logr.debug('Heads: %s', self.result.heads)
|
||||
|
||||
heads_finished.append(self in node.finished_groups or next_subject is None)
|
||||
|
||||
if len(self.result.heads) == 0:
|
||||
self.result.heads = heads
|
||||
|
||||
Logr.debug("heads_finished: %s, self.result.heads: %s", heads_finished, self.result.heads)
|
||||
|
||||
Logr.debug("group finished")
|
||||
80
libs/caper/helpers.py
Normal file
80
libs/caper/helpers.py
Normal file
@@ -0,0 +1,80 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
|
||||
def is_list_type(obj, element_type):
|
||||
if not type(obj) is list:
|
||||
return False
|
||||
|
||||
if len(obj) < 1:
|
||||
raise ValueError("Unable to determine list element type from empty list")
|
||||
|
||||
return type(obj[0]) is element_type
|
||||
|
||||
|
||||
def clean_dict(target, remove=None):
|
||||
"""Recursively remove items matching a value 'remove' from the dictionary
|
||||
|
||||
:type target: dict
|
||||
"""
|
||||
if type(target) is not dict:
|
||||
raise ValueError("Target is required to be a dict")
|
||||
|
||||
remove_keys = []
|
||||
for key in target.keys():
|
||||
if type(target[key]) is not dict:
|
||||
if target[key] == remove:
|
||||
remove_keys.append(key)
|
||||
else:
|
||||
clean_dict(target[key], remove)
|
||||
|
||||
for key in remove_keys:
|
||||
target.pop(key)
|
||||
|
||||
return target
|
||||
|
||||
|
||||
def update_dict(a, b):
|
||||
for key, value in b.items():
|
||||
if key not in a:
|
||||
a[key] = value
|
||||
elif isinstance(a[key], dict) and isinstance(value, dict):
|
||||
update_dict(a[key], value)
|
||||
elif isinstance(a[key], list):
|
||||
a[key].append(value)
|
||||
else:
|
||||
a[key] = [a[key], value]
|
||||
|
||||
|
||||
def xrange_six(start, stop=None, step=None):
|
||||
if stop is not None and step is not None:
|
||||
if PY3:
|
||||
return range(start, stop, step)
|
||||
else:
|
||||
return xrange(start, stop, step)
|
||||
else:
|
||||
if PY3:
|
||||
return range(start)
|
||||
else:
|
||||
return xrange(start)
|
||||
|
||||
|
||||
def delta_seconds(td):
|
||||
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
|
||||
144
libs/caper/matcher.py
Normal file
144
libs/caper/matcher.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from caper.helpers import is_list_type, update_dict, delta_seconds
|
||||
from datetime import datetime
|
||||
from logr import Logr
|
||||
import re
|
||||
|
||||
|
||||
class FragmentMatcher(object):
|
||||
def __init__(self, pattern_groups):
|
||||
self.regex = {}
|
||||
|
||||
self.construct_patterns(pattern_groups)
|
||||
|
||||
def construct_patterns(self, pattern_groups):
|
||||
compile_start = datetime.now()
|
||||
compile_count = 0
|
||||
|
||||
for group_name, patterns in pattern_groups:
|
||||
if group_name not in self.regex:
|
||||
self.regex[group_name] = []
|
||||
|
||||
# Transform into weight groups
|
||||
if type(patterns[0]) is str or type(patterns[0][0]) not in [int, float]:
|
||||
patterns = [(1.0, patterns)]
|
||||
|
||||
for weight, patterns in patterns:
|
||||
weight_patterns = []
|
||||
|
||||
for pattern in patterns:
|
||||
# Transform into multi-fragment patterns
|
||||
if type(pattern) is str:
|
||||
pattern = (pattern,)
|
||||
|
||||
if type(pattern) is tuple and len(pattern) == 2:
|
||||
if type(pattern[0]) is str and is_list_type(pattern[1], str):
|
||||
pattern = (pattern,)
|
||||
|
||||
result = []
|
||||
for value in pattern:
|
||||
if type(value) is tuple:
|
||||
if len(value) == 2:
|
||||
# Construct OR-list pattern
|
||||
value = value[0] % '|'.join(value[1])
|
||||
elif len(value) == 1:
|
||||
value = value[0]
|
||||
|
||||
result.append(re.compile(value, re.IGNORECASE))
|
||||
compile_count += 1
|
||||
|
||||
weight_patterns.append(tuple(result))
|
||||
|
||||
self.regex[group_name].append((weight, weight_patterns))
|
||||
|
||||
Logr.info("Compiled %s patterns in %ss", compile_count, delta_seconds(datetime.now() - compile_start))
|
||||
|
||||
def find_group(self, name):
|
||||
for group_name, weight_groups in self.regex.items():
|
||||
if group_name and group_name == name:
|
||||
return group_name, weight_groups
|
||||
|
||||
return None, None
|
||||
|
||||
def value_match(self, value, group_name=None, single=True):
|
||||
result = None
|
||||
|
||||
for group, weight_groups in self.regex.items():
|
||||
if group_name and group != group_name:
|
||||
continue
|
||||
|
||||
# TODO handle multiple weights
|
||||
weight, patterns = weight_groups[0]
|
||||
|
||||
for pattern in patterns:
|
||||
match = pattern[0].match(value)
|
||||
if not match:
|
||||
continue
|
||||
|
||||
if result is None:
|
||||
result = {}
|
||||
if group not in result:
|
||||
result[group] = {}
|
||||
|
||||
result[group].update(match.groupdict())
|
||||
|
||||
if single:
|
||||
return result
|
||||
|
||||
return result
|
||||
|
||||
def fragment_match(self, fragment, group_name=None):
|
||||
"""Follow a fragment chain to try find a match
|
||||
|
||||
:type fragment: caper.objects.CaperFragment
|
||||
:type group_name: str or None
|
||||
|
||||
:return: The weight of the match found between 0.0 and 1.0,
|
||||
where 1.0 means perfect match and 0.0 means no match
|
||||
:rtype: (float, dict, int)
|
||||
"""
|
||||
|
||||
group_name, weight_groups = self.find_group(group_name)
|
||||
|
||||
for weight, patterns in weight_groups:
|
||||
for pattern in patterns:
|
||||
cur_fragment = fragment
|
||||
success = True
|
||||
result = {}
|
||||
|
||||
# Ignore empty patterns
|
||||
if len(pattern) < 1:
|
||||
break
|
||||
|
||||
for fragment_pattern in pattern:
|
||||
if not cur_fragment:
|
||||
success = False
|
||||
break
|
||||
|
||||
match = fragment_pattern.match(cur_fragment.value)
|
||||
if match:
|
||||
update_dict(result, match.groupdict())
|
||||
else:
|
||||
success = False
|
||||
break
|
||||
|
||||
cur_fragment = cur_fragment.right if cur_fragment else None
|
||||
|
||||
if success:
|
||||
Logr.debug("Found match with weight %s" % weight)
|
||||
return float(weight), result, len(pattern)
|
||||
|
||||
return 0.0, None, 1
|
||||
124
libs/caper/objects.py
Normal file
124
libs/caper/objects.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from caper.helpers import xrange_six
|
||||
|
||||
|
||||
class CaperClosure(object):
|
||||
__key__ = 'closure'
|
||||
|
||||
def __init__(self, index, value):
|
||||
#: :type: int
|
||||
self.index = index
|
||||
|
||||
#: :type: str
|
||||
self.value = value
|
||||
|
||||
#: :type: CaperClosure
|
||||
self.left = None
|
||||
#: :type: CaperClosure
|
||||
self.right = None
|
||||
|
||||
#: :type: list of CaperFragment
|
||||
self.fragments = []
|
||||
|
||||
def __str__(self):
|
||||
return "<CaperClosure value: %s" % repr(self.value)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
|
||||
class CaperFragment(object):
|
||||
__key__ = 'fragment'
|
||||
|
||||
def __init__(self, closure=None):
|
||||
#: :type: CaperClosure
|
||||
self.closure = closure
|
||||
|
||||
#: :type: str
|
||||
self.value = ""
|
||||
|
||||
#: :type: CaperFragment
|
||||
self.left = None
|
||||
#: :type: str
|
||||
self.left_sep = None
|
||||
|
||||
#: :type: CaperFragment
|
||||
self.right = None
|
||||
#: :type: str
|
||||
self.right_sep = None
|
||||
|
||||
#: :type: int
|
||||
self.position = None
|
||||
|
||||
def take(self, direction, count, include_self=True):
|
||||
if direction not in ['left', 'right']:
|
||||
raise ValueError('Un-Expected value for "direction", expected "left" or "right".')
|
||||
|
||||
result = []
|
||||
|
||||
if include_self:
|
||||
result.append(self)
|
||||
count -= 1
|
||||
|
||||
cur = self
|
||||
for x in xrange_six(count):
|
||||
if cur and getattr(cur, direction):
|
||||
cur = getattr(cur, direction)
|
||||
result.append(cur)
|
||||
else:
|
||||
result.append(None)
|
||||
cur = None
|
||||
|
||||
return result
|
||||
|
||||
def take_left(self, count, include_self=True):
|
||||
return self.take('left', count, include_self)
|
||||
|
||||
def take_right(self, count, include_self=True):
|
||||
return self.take('right', count, include_self)
|
||||
|
||||
def __str__(self):
|
||||
return "<CaperFragment value: %s" % repr(self.value)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
|
||||
class CaptureMatch(object):
|
||||
def __init__(self, tag, step, success=False, weight=None, result=None, num_fragments=1):
|
||||
#: :type: bool
|
||||
self.success = success
|
||||
|
||||
#: :type: float
|
||||
self.weight = weight
|
||||
|
||||
#: :type: dict or str
|
||||
self.result = result
|
||||
|
||||
#: :type: int
|
||||
self.num_fragments = num_fragments
|
||||
|
||||
#: :type: str
|
||||
self.tag = tag
|
||||
|
||||
#: :type: CaptureStep
|
||||
self.step = step
|
||||
|
||||
def __str__(self):
|
||||
return "<CaperMatch result: %s>" % repr(self.result)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
0
libs/caper/parsers/__init__.py
Normal file
0
libs/caper/parsers/__init__.py
Normal file
88
libs/caper/parsers/anime.py
Normal file
88
libs/caper/parsers/anime.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
from caper.parsers.base import Parser
|
||||
|
||||
|
||||
REGEX_GROUP = re.compile(r'(\(|\[)(?P<group>.*?)(\)|\])', re.IGNORECASE)
|
||||
|
||||
|
||||
PATTERN_GROUPS = [
|
||||
('identifier', [
|
||||
r'S(?P<season>\d+)E(?P<episode>\d+)',
|
||||
r'(S(?P<season>\d+))|(E(?P<episode>\d+))',
|
||||
|
||||
r'Ep(?P<episode>\d+)',
|
||||
r'$(?P<absolute>\d+)^',
|
||||
|
||||
(r'Episode', r'(?P<episode>\d+)'),
|
||||
]),
|
||||
('video', [
|
||||
(r'(?P<h264_profile>%s)', [
|
||||
'Hi10P'
|
||||
]),
|
||||
(r'.(?P<resolution>%s)', [
|
||||
'720p',
|
||||
'1080p',
|
||||
|
||||
'960x720',
|
||||
'1920x1080'
|
||||
]),
|
||||
(r'(?P<source>%s)', [
|
||||
'BD'
|
||||
]),
|
||||
]),
|
||||
('audio', [
|
||||
(r'(?P<codec>%s)', [
|
||||
'FLAC'
|
||||
]),
|
||||
])
|
||||
]
|
||||
|
||||
|
||||
class AnimeParser(Parser):
|
||||
def __init__(self, debug=False):
|
||||
super(AnimeParser, self).__init__(PATTERN_GROUPS, debug)
|
||||
|
||||
def capture_group(self, fragment):
|
||||
match = REGEX_GROUP.match(fragment.value)
|
||||
|
||||
if not match:
|
||||
return None
|
||||
|
||||
return match.group('group')
|
||||
|
||||
def run(self, closures):
|
||||
"""
|
||||
:type closures: list of CaperClosure
|
||||
"""
|
||||
|
||||
self.setup(closures)
|
||||
|
||||
self.capture_closure('group', func=self.capture_group)\
|
||||
.execute(once=True)
|
||||
|
||||
self.capture_fragment('show_name', single=False)\
|
||||
.until_fragment(value__re='identifier')\
|
||||
.until_fragment(value__re='video')\
|
||||
.execute()
|
||||
|
||||
self.capture_fragment('identifier', regex='identifier') \
|
||||
.capture_fragment('video', regex='video', single=False) \
|
||||
.capture_fragment('audio', regex='audio', single=False) \
|
||||
.execute()
|
||||
|
||||
self.result.build()
|
||||
return self.result
|
||||
84
libs/caper/parsers/base.py
Normal file
84
libs/caper/parsers/base.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from caper import FragmentMatcher
|
||||
from caper.group import CaptureGroup
|
||||
from caper.result import CaperResult, CaperClosureNode, CaperRootNode
|
||||
from logr import Logr
|
||||
|
||||
|
||||
class Parser(object):
|
||||
def __init__(self, matcher, debug=False):
|
||||
self.debug = debug
|
||||
|
||||
self.matcher = matcher
|
||||
|
||||
self.closures = None
|
||||
#: :type: caper.result.CaperResult
|
||||
self.result = None
|
||||
|
||||
self._match_cache = None
|
||||
self._fragment_pos = None
|
||||
self._closure_pos = None
|
||||
self._history = None
|
||||
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.closures = None
|
||||
self.result = CaperResult()
|
||||
|
||||
self._match_cache = {}
|
||||
self._fragment_pos = -1
|
||||
self._closure_pos = -1
|
||||
self._history = []
|
||||
|
||||
def setup(self, closures):
|
||||
"""
|
||||
:type closures: list of CaperClosure
|
||||
"""
|
||||
|
||||
self.reset()
|
||||
self.closures = closures
|
||||
|
||||
self.result.heads = [CaperRootNode(closures[0])]
|
||||
|
||||
def run(self, closures):
|
||||
"""
|
||||
:type closures: list of CaperClosure
|
||||
"""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
#
|
||||
# Capture Methods
|
||||
#
|
||||
|
||||
def capture_fragment(self, tag, regex=None, func=None, single=True, **kwargs):
|
||||
return CaptureGroup(self, self.result).capture_fragment(
|
||||
tag,
|
||||
regex=regex,
|
||||
func=func,
|
||||
single=single,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def capture_closure(self, tag, regex=None, func=None, single=True, **kwargs):
|
||||
return CaptureGroup(self, self.result).capture_closure(
|
||||
tag,
|
||||
regex=regex,
|
||||
func=func,
|
||||
single=single,
|
||||
**kwargs
|
||||
)
|
||||
230
libs/caper/parsers/scene.py
Normal file
230
libs/caper/parsers/scene.py
Normal file
@@ -0,0 +1,230 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from logr import Logr
|
||||
from caper import FragmentMatcher
|
||||
from caper.parsers.base import Parser
|
||||
from caper.result import CaperFragmentNode
|
||||
|
||||
|
||||
PATTERN_GROUPS = [
|
||||
('identifier', [
|
||||
(1.0, [
|
||||
# S01E01-E02
|
||||
('^S(?P<season>\d+)E(?P<episode_from>\d+)$', '^E(?P<episode_to>\d+)$'),
|
||||
# 'S03 E01 to E08' or 'S03 E01 - E09'
|
||||
('^S(?P<season>\d+)$', '^E(?P<episode_from>\d+)$', '^(to|-)$', '^E(?P<episode_to>\d+)$'),
|
||||
# 'E01 to E08' or 'E01 - E09'
|
||||
('^E(?P<episode_from>\d+)$', '^(to|-)$', '^E(?P<episode_to>\d+)$'),
|
||||
|
||||
# S01-S03
|
||||
('^S(?P<season_from>\d+)$', '^S(?P<season_to>\d+)$'),
|
||||
|
||||
# S02E13
|
||||
r'^S(?P<season>\d+)E(?P<episode>\d+)$',
|
||||
# S01 E13
|
||||
(r'^(S(?P<season>\d+))$', r'^(E(?P<episode>\d+))$'),
|
||||
# S02
|
||||
# E13
|
||||
r'^((S(?P<season>\d+))|(E(?P<episode>\d+)))$',
|
||||
# 3x19
|
||||
r'^(?P<season>\d+)x(?P<episode>\d+)$',
|
||||
|
||||
# 2013.09.15
|
||||
(r'^(?P<year>\d{4})$', r'^(?P<month>\d{2})$', r'^(?P<day>\d{2})$'),
|
||||
# 09.15.2013
|
||||
(r'^(?P<month>\d{2})$', r'^(?P<day>\d{2})$', r'^(?P<year>\d{4})$'),
|
||||
# TODO - US/UK Date Format Conflict? will only support US format for now..
|
||||
# 15.09.2013
|
||||
#(r'^(?P<day>\d{2})$', r'^(?P<month>\d{2})$', r'^(?P<year>\d{4})$'),
|
||||
# 130915
|
||||
r'^(?P<year_short>\d{2})(?P<month>\d{2})(?P<day>\d{2})$',
|
||||
|
||||
# Season 3 Episode 14
|
||||
(r'^Se(ason)?$', r'^(?P<season>\d+)$', r'^Ep(isode)?$', r'^(?P<episode>\d+)$'),
|
||||
# Season 3
|
||||
(r'^Se(ason)?$', r'^(?P<season>\d+)$'),
|
||||
# Episode 14
|
||||
(r'^Ep(isode)?$', r'^(?P<episode>\d+)$'),
|
||||
|
||||
# Part.3
|
||||
# Part.1.and.Part.3
|
||||
('^Part$', '(?P<part>\d+)'),
|
||||
|
||||
r'(?P<extra>Special)',
|
||||
r'(?P<country>NZ|AU|US|UK)'
|
||||
]),
|
||||
(0.8, [
|
||||
# 100 - 1899, 2100 - 9999 (skips 1900 to 2099 - so we don't get years my mistake)
|
||||
# TODO - Update this pattern on 31 Dec 2099
|
||||
r'^(?P<season>([1-9])|(1[0-8])|(2[1-9])|([3-9][0-9]))(?P<episode>\d{2})$'
|
||||
]),
|
||||
(0.5, [
|
||||
# 100 - 9999
|
||||
r'^(?P<season>([1-9])|([1-9][0-9]))(?P<episode>\d{2})$'
|
||||
])
|
||||
]),
|
||||
|
||||
('video', [
|
||||
r'(?P<aspect>FS|WS)',
|
||||
|
||||
(r'(?P<resolution>%s)', [
|
||||
'480p',
|
||||
'720p',
|
||||
'1080p'
|
||||
]),
|
||||
|
||||
#
|
||||
# Source
|
||||
#
|
||||
|
||||
(r'(?P<source>%s)', [
|
||||
'DVDRiP',
|
||||
# HDTV
|
||||
'HDTV',
|
||||
'PDTV',
|
||||
'DSR',
|
||||
# WEB
|
||||
'WEBRip',
|
||||
'WEBDL',
|
||||
# BluRay
|
||||
'BluRay',
|
||||
'B(D|R)Rip',
|
||||
# DVD
|
||||
'DVDR',
|
||||
'DVD9',
|
||||
'DVD5'
|
||||
]),
|
||||
|
||||
# For multi-fragment 'WEB-DL', 'WEB-Rip', etc... matches
|
||||
('(?P<source>WEB)', '(?P<source>DL|Rip)'),
|
||||
|
||||
#
|
||||
# Codec
|
||||
#
|
||||
|
||||
(r'(?P<codec>%s)', [
|
||||
'x264',
|
||||
'XViD',
|
||||
'H264',
|
||||
'AVC'
|
||||
]),
|
||||
|
||||
# For multi-fragment 'H 264' tags
|
||||
('(?P<codec>H)', '(?P<codec>264)'),
|
||||
]),
|
||||
|
||||
('dvd', [
|
||||
r'D(ISC)?(?P<disc>\d+)',
|
||||
|
||||
r'R(?P<region>[0-8])',
|
||||
|
||||
(r'(?P<encoding>%s)', [
|
||||
'PAL',
|
||||
'NTSC'
|
||||
]),
|
||||
]),
|
||||
|
||||
('audio', [
|
||||
(r'(?P<codec>%s)', [
|
||||
'AC3',
|
||||
'TrueHD'
|
||||
]),
|
||||
|
||||
(r'(?P<language>%s)', [
|
||||
'GERMAN',
|
||||
'DUTCH',
|
||||
'FRENCH',
|
||||
'SWEDiSH',
|
||||
'DANiSH',
|
||||
'iTALiAN'
|
||||
]),
|
||||
]),
|
||||
|
||||
('scene', [
|
||||
r'(?P<proper>PROPER|REAL)',
|
||||
])
|
||||
]
|
||||
|
||||
|
||||
class SceneParser(Parser):
|
||||
matcher = None
|
||||
|
||||
def __init__(self, debug=False):
|
||||
if not SceneParser.matcher:
|
||||
SceneParser.matcher = FragmentMatcher(PATTERN_GROUPS)
|
||||
Logr.info("Fragment matcher for %s created", self.__class__.__name__)
|
||||
|
||||
super(SceneParser, self).__init__(SceneParser.matcher, debug)
|
||||
|
||||
def capture_group(self, fragment):
|
||||
if fragment.closure.index + 1 != len(self.closures):
|
||||
return None
|
||||
|
||||
if fragment.left_sep != '-' or fragment.right:
|
||||
return None
|
||||
|
||||
return fragment.value
|
||||
|
||||
def run(self, closures):
|
||||
"""
|
||||
:type closures: list of CaperClosure
|
||||
"""
|
||||
|
||||
self.setup(closures)
|
||||
|
||||
self.capture_fragment('show_name', single=False)\
|
||||
.until_fragment(node__re='identifier')\
|
||||
.until_fragment(node__re='video')\
|
||||
.until_fragment(node__re='dvd')\
|
||||
.until_fragment(node__re='audio')\
|
||||
.until_fragment(node__re='scene')\
|
||||
.execute()
|
||||
|
||||
self.capture_fragment('identifier', regex='identifier', single=False)\
|
||||
.capture_fragment('video', regex='video', single=False)\
|
||||
.capture_fragment('dvd', regex='dvd', single=False)\
|
||||
.capture_fragment('audio', regex='audio', single=False)\
|
||||
.capture_fragment('scene', regex='scene', single=False)\
|
||||
.until_fragment(left_sep__eq='-', right__eq=None)\
|
||||
.execute()
|
||||
|
||||
self.capture_fragment('group', func=self.capture_group)\
|
||||
.execute()
|
||||
|
||||
self.print_tree(self.result.heads)
|
||||
|
||||
self.result.build()
|
||||
return self.result
|
||||
|
||||
def print_tree(self, heads):
|
||||
if not self.debug:
|
||||
return
|
||||
|
||||
for head in heads:
|
||||
head = head if type(head) is list else [head]
|
||||
|
||||
if type(head[0]) is CaperFragmentNode:
|
||||
for fragment in head[0].fragments:
|
||||
Logr.debug(fragment.value)
|
||||
else:
|
||||
Logr.debug(head[0].closure.value)
|
||||
|
||||
for node in head:
|
||||
Logr.debug('\t' + str(node).ljust(55) + '\t' + (
|
||||
str(node.match.weight) + '\t' + str(node.match.result)
|
||||
) if node.match else '')
|
||||
|
||||
if len(head) > 0 and head[0].parent:
|
||||
self.print_tree([head[0].parent])
|
||||
115
libs/caper/parsers/usenet.py
Normal file
115
libs/caper/parsers/usenet.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from logr import Logr
|
||||
from caper import FragmentMatcher
|
||||
from caper.parsers.base import Parser
|
||||
|
||||
|
||||
PATTERN_GROUPS = [
|
||||
('usenet', [
|
||||
r'\[(?P<group>#[\w\.@]+)\]',
|
||||
r'^\[(?P<code>\w+)\]$',
|
||||
r'\[(?P<full>FULL)\]',
|
||||
r'\[\s?(?P<group>TOWN)\s?\]',
|
||||
r'(.*?\s)?[_\W]*(?P<site>www\..*?\.[a-z0-9]+)[_\W]*(.*?\s)?',
|
||||
r'(.*?\s)?[_\W]*(?P<site>(www\.)?[-\w]+\.(com|org|info))[_\W]*(.*?\s)?'
|
||||
]),
|
||||
|
||||
('part', [
|
||||
r'.?(?P<current>\d+)/(?P<total>\d+).?'
|
||||
]),
|
||||
|
||||
('detail', [
|
||||
r'[\s-]*\w*?[\s-]*\"(?P<file_name>.*?)\"[\s-]*\w*?[\s-]*(?P<size>[\d,\.]*\s?MB)?[\s-]*(?P<extra>yEnc)?',
|
||||
r'(?P<size>[\d,\.]*\s?MB)[\s-]*(?P<extra>yEnc)',
|
||||
r'(?P<size>[\d,\.]*\s?MB)|(?P<extra>yEnc)'
|
||||
])
|
||||
]
|
||||
|
||||
|
||||
class UsenetParser(Parser):
|
||||
matcher = None
|
||||
|
||||
def __init__(self, debug=False):
|
||||
if not UsenetParser.matcher:
|
||||
UsenetParser.matcher = FragmentMatcher(PATTERN_GROUPS)
|
||||
Logr.info("Fragment matcher for %s created", self.__class__.__name__)
|
||||
|
||||
super(UsenetParser, self).__init__(UsenetParser.matcher, debug)
|
||||
|
||||
def run(self, closures):
|
||||
"""
|
||||
:type closures: list of CaperClosure
|
||||
"""
|
||||
|
||||
self.setup(closures)
|
||||
|
||||
# Capture usenet or part info until we get a part or matching fails
|
||||
self.capture_closure('usenet', regex='usenet', single=False)\
|
||||
.capture_closure('part', regex='part', single=True) \
|
||||
.until_result(tag='part') \
|
||||
.until_failure()\
|
||||
.execute()
|
||||
|
||||
is_town_release, has_part = self.get_state()
|
||||
|
||||
if not is_town_release:
|
||||
self.capture_release_name()
|
||||
|
||||
# If we already have the part (TOWN releases), ignore matching part again
|
||||
if not is_town_release and not has_part:
|
||||
self.capture_fragment('part', regex='part', single=True)\
|
||||
.until_closure(node__re='usenet')\
|
||||
.until_success()\
|
||||
.execute()
|
||||
|
||||
# Capture any leftover details
|
||||
self.capture_closure('usenet', regex='usenet', single=False)\
|
||||
.capture_closure('detail', regex='detail', single=False)\
|
||||
.execute()
|
||||
|
||||
self.result.build()
|
||||
return self.result
|
||||
|
||||
def capture_release_name(self):
|
||||
self.capture_closure('detail', regex='detail', single=False)\
|
||||
.until_failure()\
|
||||
.execute()
|
||||
|
||||
self.capture_fragment('release_name', single=False, include_separators=True) \
|
||||
.until_closure(node__re='usenet') \
|
||||
.until_closure(node__re='detail') \
|
||||
.until_closure(node__re='part') \
|
||||
.until_fragment(value__eq='-')\
|
||||
.execute()
|
||||
|
||||
# Capture any detail after the release name
|
||||
self.capture_closure('detail', regex='detail', single=False)\
|
||||
.until_failure()\
|
||||
.execute()
|
||||
|
||||
def get_state(self):
|
||||
# TODO multiple-chains?
|
||||
is_town_release = False
|
||||
has_part = False
|
||||
|
||||
for tag, result in self.result.heads[0].captured():
|
||||
if tag == 'usenet' and result.get('group') == 'TOWN':
|
||||
is_town_release = True
|
||||
|
||||
if tag == 'part':
|
||||
has_part = True
|
||||
|
||||
return is_town_release, has_part
|
||||
213
libs/caper/result.py
Normal file
213
libs/caper/result.py
Normal file
@@ -0,0 +1,213 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
from logr import Logr
|
||||
|
||||
|
||||
GROUP_MATCHES = ['identifier']
|
||||
|
||||
|
||||
class CaperNode(object):
|
||||
def __init__(self, closure, parent=None, match=None):
|
||||
"""
|
||||
:type parent: CaperNode
|
||||
:type weight: float
|
||||
"""
|
||||
|
||||
#: :type: caper.objects.CaperClosure
|
||||
self.closure = closure
|
||||
|
||||
#: :type: CaperNode
|
||||
self.parent = parent
|
||||
|
||||
#: :type: CaptureMatch
|
||||
self.match = match
|
||||
|
||||
#: :type: list of CaptureGroup
|
||||
self.finished_groups = []
|
||||
|
||||
def next(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def captured(self):
|
||||
cur = self
|
||||
|
||||
if cur.match:
|
||||
yield cur.match.tag, cur.match.result
|
||||
|
||||
while cur.parent:
|
||||
cur = cur.parent
|
||||
|
||||
if cur.match:
|
||||
yield cur.match.tag, cur.match.result
|
||||
|
||||
|
||||
class CaperRootNode(CaperNode):
|
||||
def __init__(self, closure):
|
||||
"""
|
||||
:type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure
|
||||
"""
|
||||
super(CaperRootNode, self).__init__(closure)
|
||||
|
||||
def next(self):
|
||||
return self.closure
|
||||
|
||||
|
||||
class CaperClosureNode(CaperNode):
|
||||
def __init__(self, closure, parent=None, match=None):
|
||||
"""
|
||||
:type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure
|
||||
"""
|
||||
super(CaperClosureNode, self).__init__(closure, parent, match)
|
||||
|
||||
def next(self):
|
||||
if not self.closure:
|
||||
return None
|
||||
|
||||
if self.match:
|
||||
# Jump to next closure if we have a match
|
||||
return self.closure.right
|
||||
elif len(self.closure.fragments) > 0:
|
||||
# Otherwise parse the fragments
|
||||
return self.closure.fragments[0]
|
||||
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
return "<CaperClosureNode match: %s>" % repr(self.match)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
|
||||
class CaperFragmentNode(CaperNode):
|
||||
def __init__(self, closure, fragments, parent=None, match=None):
|
||||
"""
|
||||
:type closure: caper.objects.CaperClosure
|
||||
:type fragments: list of caper.objects.CaperFragment
|
||||
"""
|
||||
super(CaperFragmentNode, self).__init__(closure, parent, match)
|
||||
|
||||
#: :type: caper.objects.CaperFragment or list of caper.objects.CaperFragment
|
||||
self.fragments = fragments
|
||||
|
||||
def next(self):
|
||||
if len(self.fragments) > 0 and self.fragments[-1] and self.fragments[-1].right:
|
||||
return self.fragments[-1].right
|
||||
|
||||
if self.closure.right:
|
||||
return self.closure.right
|
||||
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
return "<CaperFragmentNode match: %s>" % repr(self.match)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
|
||||
class CaperResult(object):
|
||||
def __init__(self):
|
||||
#: :type: list of CaperNode
|
||||
self.heads = []
|
||||
|
||||
self.chains = []
|
||||
|
||||
def build(self):
|
||||
max_matched = 0
|
||||
|
||||
for head in self.heads:
|
||||
for chain in self.combine_chain(head):
|
||||
if chain.num_matched > max_matched:
|
||||
max_matched = chain.num_matched
|
||||
|
||||
self.chains.append(chain)
|
||||
|
||||
for chain in self.chains:
|
||||
chain.weights.append(chain.num_matched / float(max_matched or chain.num_matched or 1))
|
||||
chain.finish()
|
||||
|
||||
self.chains.sort(key=lambda chain: chain.weight, reverse=True)
|
||||
|
||||
for chain in self.chains:
|
||||
Logr.debug("chain weight: %.02f", chain.weight)
|
||||
Logr.debug("\tInfo: %s", chain.info)
|
||||
|
||||
Logr.debug("\tWeights: %s", chain.weights)
|
||||
Logr.debug("\tNumber of Fragments Matched: %s", chain.num_matched)
|
||||
|
||||
def combine_chain(self, subject, chain=None):
|
||||
nodes = subject if type(subject) is list else [subject]
|
||||
|
||||
if chain is None:
|
||||
chain = CaperResultChain()
|
||||
|
||||
result = []
|
||||
|
||||
for x, node in enumerate(nodes):
|
||||
node_chain = chain if x == len(nodes) - 1 else chain.copy()
|
||||
|
||||
if not node.parent:
|
||||
result.append(node_chain)
|
||||
continue
|
||||
|
||||
node_chain.update(node)
|
||||
result.extend(self.combine_chain(node.parent, node_chain))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class CaperResultChain(object):
|
||||
def __init__(self):
|
||||
#: :type: float
|
||||
self.weight = None
|
||||
self.info = {}
|
||||
self.num_matched = 0
|
||||
|
||||
self.weights = []
|
||||
|
||||
def update(self, subject):
|
||||
"""
|
||||
:type subject: CaperFragmentNode
|
||||
"""
|
||||
if not subject.match or not subject.match.success:
|
||||
return
|
||||
|
||||
# TODO this should support closure nodes
|
||||
if type(subject) is CaperFragmentNode:
|
||||
self.num_matched += len(subject.fragments) if subject.fragments is not None else 0
|
||||
|
||||
self.weights.append(subject.match.weight)
|
||||
|
||||
if subject.match:
|
||||
if subject.match.tag not in self.info:
|
||||
self.info[subject.match.tag] = []
|
||||
|
||||
self.info[subject.match.tag].insert(0, subject.match.result)
|
||||
|
||||
def finish(self):
|
||||
self.weight = sum(self.weights) / len(self.weights)
|
||||
|
||||
def copy(self):
|
||||
chain = CaperResultChain()
|
||||
|
||||
chain.weight = self.weight
|
||||
chain.info = copy.deepcopy(self.info)
|
||||
|
||||
chain.num_matched = self.num_matched
|
||||
chain.weights = copy.copy(self.weights)
|
||||
|
||||
return chain
|
||||
96
libs/caper/step.py
Normal file
96
libs/caper/step.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from caper.objects import CaptureMatch
|
||||
from logr import Logr
|
||||
|
||||
|
||||
class CaptureStep(object):
|
||||
REPR_KEYS = ['regex', 'func', 'single']
|
||||
|
||||
def __init__(self, capture_group, tag, source, regex=None, func=None, single=None, **kwargs):
|
||||
#: @type: CaptureGroup
|
||||
self.capture_group = capture_group
|
||||
|
||||
#: @type: str
|
||||
self.tag = tag
|
||||
#: @type: str
|
||||
self.source = source
|
||||
#: @type: str
|
||||
self.regex = regex
|
||||
#: @type: function
|
||||
self.func = func
|
||||
#: @type: bool
|
||||
self.single = single
|
||||
|
||||
self.kwargs = kwargs
|
||||
|
||||
self.matched = False
|
||||
|
||||
def execute(self, fragment):
|
||||
"""Execute step on fragment
|
||||
|
||||
:type fragment: CaperFragment
|
||||
:rtype : CaptureMatch
|
||||
"""
|
||||
|
||||
match = CaptureMatch(self.tag, self)
|
||||
|
||||
if self.regex:
|
||||
weight, result, num_fragments = self.capture_group.parser.matcher.fragment_match(fragment, self.regex)
|
||||
Logr.debug('(execute) [regex] tag: "%s"', self.tag)
|
||||
|
||||
if not result:
|
||||
return match
|
||||
|
||||
# Populate CaptureMatch
|
||||
match.success = True
|
||||
match.weight = weight
|
||||
match.result = result
|
||||
match.num_fragments = num_fragments
|
||||
elif self.func:
|
||||
result = self.func(fragment)
|
||||
Logr.debug('(execute) [func] %s += "%s"', self.tag, match)
|
||||
|
||||
if not result:
|
||||
return match
|
||||
|
||||
# Populate CaptureMatch
|
||||
match.success = True
|
||||
match.weight = 1.0
|
||||
match.result = result
|
||||
else:
|
||||
Logr.debug('(execute) [raw] %s += "%s"', self.tag, fragment.value)
|
||||
|
||||
include_separators = self.kwargs.get('include_separators', False)
|
||||
|
||||
# Populate CaptureMatch
|
||||
match.success = True
|
||||
match.weight = 1.0
|
||||
|
||||
if include_separators:
|
||||
match.result = (fragment.left_sep, fragment.value, fragment.right_sep)
|
||||
else:
|
||||
match.result = fragment.value
|
||||
|
||||
return match
|
||||
|
||||
def __repr__(self):
|
||||
attribute_values = [key + '=' + repr(getattr(self, key))
|
||||
for key in self.REPR_KEYS
|
||||
if hasattr(self, key) and getattr(self, key)]
|
||||
|
||||
attribute_string = ', ' + ', '.join(attribute_values) if len(attribute_values) > 0 else ''
|
||||
|
||||
return "CaptureStep('%s'%s)" % (self.tag, attribute_string)
|
||||
225
libs/logr/__init__.py
Normal file
225
libs/logr/__init__.py
Normal file
@@ -0,0 +1,225 @@
|
||||
# logr - Simple python logging wrapper
|
||||
# Packed by Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# File part of:
|
||||
# rdio-sock - Rdio WebSocket Library
|
||||
# Copyright (C) 2013 fzza- <fzzzzzzzza@gmail.com>
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
IGNORE = ()
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
|
||||
class Logr(object):
|
||||
loggers = {}
|
||||
handler = None
|
||||
|
||||
trace_origin = False
|
||||
name = "Logr"
|
||||
|
||||
@staticmethod
|
||||
def configure(level=logging.WARNING, handler=None, formatter=None, trace_origin=False, name="Logr"):
|
||||
"""Configure Logr
|
||||
|
||||
@param handler: Logger message handler
|
||||
@type handler: logging.Handler or None
|
||||
|
||||
@param formatter: Logger message Formatter
|
||||
@type formatter: logging.Formatter or None
|
||||
"""
|
||||
if formatter is None:
|
||||
formatter = LogrFormatter()
|
||||
|
||||
if handler is None:
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
handler.setLevel(level)
|
||||
Logr.handler = handler
|
||||
|
||||
Logr.trace_origin = trace_origin
|
||||
Logr.name = name
|
||||
|
||||
@staticmethod
|
||||
def configure_check():
|
||||
if Logr.handler is None:
|
||||
Logr.configure()
|
||||
|
||||
@staticmethod
|
||||
def _get_name_from_path(filename):
|
||||
try:
|
||||
return os.path.splitext(os.path.basename(filename))[0]
|
||||
except TypeError:
|
||||
return "<unknown>"
|
||||
|
||||
@staticmethod
|
||||
def get_frame_class(frame):
|
||||
if len(frame.f_code.co_varnames) <= 0:
|
||||
return None
|
||||
|
||||
farg = frame.f_code.co_varnames[0]
|
||||
|
||||
if farg not in frame.f_locals:
|
||||
return None
|
||||
|
||||
if farg == 'self':
|
||||
return frame.f_locals[farg].__class__
|
||||
|
||||
if farg == 'cls':
|
||||
return frame.f_locals[farg]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_logger_name():
|
||||
if not Logr.trace_origin:
|
||||
return Logr.name
|
||||
|
||||
stack = inspect.stack()
|
||||
|
||||
for x in xrange_six(len(stack)):
|
||||
frame = stack[x][0]
|
||||
name = None
|
||||
|
||||
# Try find name of function defined inside a class
|
||||
frame_class = Logr.get_frame_class(frame)
|
||||
|
||||
if frame_class:
|
||||
class_name = frame_class.__name__
|
||||
module_name = frame_class.__module__
|
||||
|
||||
if module_name != '__main__':
|
||||
name = module_name + '.' + class_name
|
||||
else:
|
||||
name = class_name
|
||||
|
||||
# Try find name of function defined outside of a class
|
||||
if name is None:
|
||||
if frame.f_code.co_name in frame.f_globals:
|
||||
name = frame.f_globals.get('__name__')
|
||||
if name == '__main__':
|
||||
name = Logr._get_name_from_path(frame.f_globals.get('__file__'))
|
||||
name = name
|
||||
elif frame.f_code.co_name == '<module>':
|
||||
name = Logr._get_name_from_path(frame.f_globals.get('__file__'))
|
||||
|
||||
if name is not None and name not in IGNORE:
|
||||
return name
|
||||
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def get_logger():
|
||||
"""Get or create logger (if it does not exist)
|
||||
|
||||
@rtype: RootLogger
|
||||
"""
|
||||
name = Logr.get_logger_name()
|
||||
if name not in Logr.loggers:
|
||||
Logr.configure_check()
|
||||
Logr.loggers[name] = logging.Logger(name)
|
||||
Logr.loggers[name].addHandler(Logr.handler)
|
||||
return Logr.loggers[name]
|
||||
|
||||
@staticmethod
|
||||
def debug(msg, *args, **kwargs):
|
||||
Logr.get_logger().debug(msg, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def info(msg, *args, **kwargs):
|
||||
Logr.get_logger().info(msg, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def warning(msg, *args, **kwargs):
|
||||
Logr.get_logger().warning(msg, *args, **kwargs)
|
||||
|
||||
warn = warning
|
||||
|
||||
@staticmethod
|
||||
def error(msg, *args, **kwargs):
|
||||
Logr.get_logger().error(msg, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def exception(msg, *args, **kwargs):
|
||||
Logr.get_logger().exception(msg, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def critical(msg, *args, **kwargs):
|
||||
Logr.get_logger().critical(msg, *args, **kwargs)
|
||||
|
||||
fatal = critical
|
||||
|
||||
@staticmethod
|
||||
def log(level, msg, *args, **kwargs):
|
||||
Logr.get_logger().log(level, msg, *args, **kwargs)
|
||||
|
||||
|
||||
class LogrFormatter(logging.Formatter):
|
||||
LENGTH_NAME = 32
|
||||
LENGTH_LEVEL_NAME = 5
|
||||
|
||||
def __init__(self, fmt=None, datefmt=None):
|
||||
if sys.version_info[:2] > (2,6):
|
||||
super(LogrFormatter, self).__init__(fmt, datefmt)
|
||||
else:
|
||||
logging.Formatter.__init__(self, fmt, datefmt)
|
||||
|
||||
def usesTime(self):
|
||||
return True
|
||||
|
||||
def format(self, record):
|
||||
record.message = record.getMessage()
|
||||
if self.usesTime():
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
s = "%(asctime)s %(name)s %(levelname)s %(message)s" % {
|
||||
'asctime': record.asctime,
|
||||
'name': record.name[-self.LENGTH_NAME:].rjust(self.LENGTH_NAME, ' '),
|
||||
'levelname': record.levelname[:self.LENGTH_LEVEL_NAME].ljust(self.LENGTH_LEVEL_NAME, ' '),
|
||||
'message': record.message
|
||||
}
|
||||
|
||||
if record.exc_info:
|
||||
if not record.exc_text:
|
||||
record.exc_text = self.formatException(record.exc_info)
|
||||
if record.exc_text:
|
||||
if s[-1:] != "\n":
|
||||
s += "\n"
|
||||
try:
|
||||
s += record.exc_text
|
||||
except UnicodeError:
|
||||
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
|
||||
'replace')
|
||||
return s
|
||||
|
||||
|
||||
def xrange_six(start, stop=None, step=None):
|
||||
if stop is not None and step is not None:
|
||||
if PY3:
|
||||
return range(start, stop, step)
|
||||
else:
|
||||
return xrange(start, stop, step)
|
||||
else:
|
||||
if PY3:
|
||||
return range(start)
|
||||
else:
|
||||
return xrange(start)
|
||||
42
libs/qcond/__init__.py
Normal file
42
libs/qcond/__init__.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from qcond.transformers.merge import MergeTransformer
|
||||
from qcond.transformers.slice import SliceTransformer
|
||||
from qcond.transformers.strip_common import StripCommonTransformer
|
||||
|
||||
|
||||
__version_info__ = ('0', '1', '0')
|
||||
__version_branch__ = 'master'
|
||||
|
||||
__version__ = "%s%s" % (
|
||||
'.'.join(__version_info__),
|
||||
'-' + __version_branch__ if __version_branch__ else ''
|
||||
)
|
||||
|
||||
|
||||
class QueryCondenser(object):
|
||||
def __init__(self):
|
||||
self.transformers = [
|
||||
MergeTransformer(),
|
||||
SliceTransformer(),
|
||||
StripCommonTransformer()
|
||||
]
|
||||
|
||||
def distinct(self, titles):
|
||||
for transformer in self.transformers:
|
||||
titles = transformer.run(titles)
|
||||
|
||||
return titles
|
||||
23
libs/qcond/compat.py
Normal file
23
libs/qcond/compat.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
xrange = range
|
||||
else:
|
||||
xrange = xrange
|
||||
84
libs/qcond/helpers.py
Normal file
84
libs/qcond/helpers.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from difflib import SequenceMatcher
|
||||
import re
|
||||
import sys
|
||||
from logr import Logr
|
||||
from qcond.compat import xrange
|
||||
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
|
||||
def simplify(s):
|
||||
s = s.lower()
|
||||
s = re.sub(r"(\w)'(\w)", r"\1\2", s)
|
||||
return s
|
||||
|
||||
|
||||
def strip(s):
|
||||
return re.sub(r"^(\W*)(.*?)(\W*)$", r"\2", s)
|
||||
|
||||
|
||||
def create_matcher(a, b, swap_longest = True, case_sensitive = False):
|
||||
# Ensure longest string is a
|
||||
if swap_longest and len(b) > len(a):
|
||||
a_ = a
|
||||
a = b
|
||||
b = a_
|
||||
|
||||
if not case_sensitive:
|
||||
a = a.upper()
|
||||
b = b.upper()
|
||||
|
||||
return SequenceMatcher(None, a, b)
|
||||
|
||||
|
||||
def first(function_or_none, sequence):
|
||||
if PY3:
|
||||
for item in filter(function_or_none, sequence):
|
||||
return item
|
||||
else:
|
||||
result = filter(function_or_none, sequence)
|
||||
if len(result):
|
||||
return result[0]
|
||||
|
||||
return None
|
||||
|
||||
def sorted_append(sequence, item, func):
|
||||
if not len(sequence):
|
||||
sequence.insert(0, item)
|
||||
return
|
||||
|
||||
x = 0
|
||||
for x in xrange(len(sequence)):
|
||||
if func(sequence[x]):
|
||||
sequence.insert(x, item)
|
||||
return
|
||||
|
||||
sequence.append(item)
|
||||
|
||||
def itemsMatch(L1, L2):
|
||||
return len(L1) == len(L2) and sorted(L1) == sorted(L2)
|
||||
|
||||
def distinct(sequence):
|
||||
result = []
|
||||
|
||||
for item in sequence:
|
||||
if item not in result:
|
||||
result.append(item)
|
||||
|
||||
return result
|
||||
0
libs/qcond/transformers/__init__.py
Normal file
0
libs/qcond/transformers/__init__.py
Normal file
21
libs/qcond/transformers/base.py
Normal file
21
libs/qcond/transformers/base.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class Transformer(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self, titles):
|
||||
raise NotImplementedError()
|
||||
241
libs/qcond/transformers/merge.py
Normal file
241
libs/qcond/transformers/merge.py
Normal file
@@ -0,0 +1,241 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from operator import itemgetter
|
||||
from logr import Logr
|
||||
from qcond.helpers import simplify, strip, first, sorted_append, distinct
|
||||
from qcond.transformers.base import Transformer
|
||||
from qcond.compat import xrange
|
||||
|
||||
|
||||
class MergeTransformer(Transformer):
|
||||
def __init__(self):
|
||||
super(MergeTransformer, self).__init__()
|
||||
|
||||
def run(self, titles):
|
||||
titles = distinct([simplify(title) for title in titles])
|
||||
|
||||
Logr.info(str(titles))
|
||||
|
||||
Logr.debug("------------------------------------------------------------")
|
||||
|
||||
root, tails = self.parse(titles)
|
||||
|
||||
Logr.debug("--------------------------PARSE-----------------------------")
|
||||
|
||||
for node in root:
|
||||
print_tree(node)
|
||||
|
||||
Logr.debug("--------------------------MERGE-----------------------------")
|
||||
|
||||
self.merge(root)
|
||||
|
||||
Logr.debug("--------------------------FINAL-----------------------------")
|
||||
|
||||
for node in root:
|
||||
print_tree(node)
|
||||
|
||||
Logr.debug("--------------------------RESULT-----------------------------")
|
||||
|
||||
scores = {}
|
||||
results = []
|
||||
|
||||
for tail in tails:
|
||||
score, value, original_value = tail.full_value()
|
||||
|
||||
if value in scores:
|
||||
scores[value] += score
|
||||
else:
|
||||
results.append((value, original_value))
|
||||
scores[value] = score
|
||||
|
||||
Logr.debug("%s %s %s", score, value, original_value)
|
||||
|
||||
sorted_results = sorted(results, key=lambda item: (scores[item[0]], item[1]), reverse = True)
|
||||
|
||||
return [result[0] for result in sorted_results]
|
||||
|
||||
def parse(self, titles):
|
||||
root = []
|
||||
tails = []
|
||||
|
||||
for title in titles:
|
||||
Logr.debug(title)
|
||||
|
||||
cur = None
|
||||
words = title.split(' ')
|
||||
|
||||
for wx in xrange(len(words)):
|
||||
word = strip(words[wx])
|
||||
|
||||
if cur is None:
|
||||
cur = find_node(root, word)
|
||||
|
||||
if cur is None:
|
||||
cur = DNode(word, None, num_children=len(words) - wx, original_value=title)
|
||||
root.append(cur)
|
||||
else:
|
||||
parent = cur
|
||||
parent.weight += 1
|
||||
|
||||
cur = find_node(parent.right, word)
|
||||
|
||||
if cur is None:
|
||||
Logr.debug("%s %d", word, len(words) - wx)
|
||||
cur = DNode(word, parent, num_children=len(words) - wx)
|
||||
sorted_append(parent.right, cur, lambda a: a.num_children < cur.num_children)
|
||||
else:
|
||||
cur.weight += 1
|
||||
|
||||
tails.append(cur)
|
||||
|
||||
return root, tails
|
||||
|
||||
def merge(self, root):
|
||||
for x in range(len(root)):
|
||||
Logr.debug(root[x])
|
||||
root[x].right = self._merge(root[x].right)
|
||||
Logr.debug('=================================================================')
|
||||
|
||||
return root
|
||||
|
||||
def get_nodes_right(self, value):
|
||||
if type(value) is not list:
|
||||
value = [value]
|
||||
|
||||
nodes = []
|
||||
|
||||
for node in value:
|
||||
nodes.append(node)
|
||||
|
||||
for child in self.get_nodes_right(node.right):
|
||||
nodes.append(child)
|
||||
|
||||
return nodes
|
||||
|
||||
def destroy_nodes_right(self, value):
|
||||
nodes = self.get_nodes_right(value)
|
||||
|
||||
for node in nodes:
|
||||
node.value = None
|
||||
node.dead = True
|
||||
|
||||
def _merge(self, nodes, depth = 0):
|
||||
Logr.debug(str('\t' * depth) + str(nodes))
|
||||
|
||||
if not len(nodes):
|
||||
return []
|
||||
|
||||
top = nodes[0]
|
||||
|
||||
# Merge into top
|
||||
for x in range(len(nodes)):
|
||||
# Merge extra results into top
|
||||
if x > 0:
|
||||
top.value = None
|
||||
top.weight += nodes[x].weight
|
||||
self.destroy_nodes_right(top.right)
|
||||
|
||||
if len(nodes[x].right):
|
||||
top.join_right(nodes[x].right)
|
||||
|
||||
Logr.debug("= %s joined %s", nodes[x], top)
|
||||
|
||||
nodes[x].dead = True
|
||||
|
||||
nodes = [n for n in nodes if not n.dead]
|
||||
|
||||
# Traverse further
|
||||
for node in nodes:
|
||||
if len(node.right):
|
||||
node.right = self._merge(node.right, depth + 1)
|
||||
|
||||
return nodes
|
||||
|
||||
|
||||
def print_tree(node, depth = 0):
|
||||
Logr.debug(str('\t' * depth) + str(node))
|
||||
|
||||
if len(node.right):
|
||||
for child in node.right:
|
||||
print_tree(child, depth + 1)
|
||||
else:
|
||||
Logr.debug(node.full_value()[1])
|
||||
|
||||
|
||||
def find_node(node_list, value):
|
||||
# Try find adjacent node match
|
||||
for node in node_list:
|
||||
if node.value == value:
|
||||
return node
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class DNode(object):
|
||||
def __init__(self, value, parent, right=None, weight=1, num_children=None, original_value=None):
|
||||
self.value = value
|
||||
|
||||
self.parent = parent
|
||||
|
||||
if right is None:
|
||||
right = []
|
||||
self.right = right
|
||||
|
||||
self.weight = weight
|
||||
|
||||
self.original_value = original_value
|
||||
self.num_children = num_children
|
||||
|
||||
self.dead = False
|
||||
|
||||
def join_right(self, nodes):
|
||||
for node in nodes:
|
||||
duplicate = first(lambda x: x.value == node.value, self.right)
|
||||
|
||||
if duplicate:
|
||||
duplicate.weight += node.weight
|
||||
duplicate.join_right(node.right)
|
||||
else:
|
||||
node.parent = self
|
||||
self.right.append(node)
|
||||
|
||||
def full_value(self):
|
||||
words = []
|
||||
total_score = 0
|
||||
|
||||
cur = self
|
||||
root = None
|
||||
|
||||
while cur is not None:
|
||||
if cur.value and not cur.dead:
|
||||
words.insert(0, cur.value)
|
||||
total_score += cur.weight
|
||||
|
||||
if cur.parent is None:
|
||||
root = cur
|
||||
cur = cur.parent
|
||||
|
||||
return float(total_score) / len(words), ' '.join(words), root.original_value if root else None
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s value:"%s", weight: %s, num_children: %s%s%s>' % (
|
||||
'DNode',
|
||||
self.value,
|
||||
self.weight,
|
||||
self.num_children,
|
||||
(', original_value: %s' % self.original_value) if self.original_value else '',
|
||||
' REMOVING' if self.dead else ''
|
||||
)
|
||||
280
libs/qcond/transformers/slice.py
Normal file
280
libs/qcond/transformers/slice.py
Normal file
@@ -0,0 +1,280 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from logr import Logr
|
||||
from qcond.helpers import create_matcher
|
||||
from qcond.transformers.base import Transformer
|
||||
|
||||
|
||||
class SliceTransformer(Transformer):
|
||||
def __init__(self):
|
||||
super(SliceTransformer, self).__init__()
|
||||
|
||||
def run(self, titles):
|
||||
nodes = []
|
||||
|
||||
# Create a node for each title
|
||||
for title in titles:
|
||||
nodes.append(SimNode(title))
|
||||
|
||||
# Calculate similarities between nodes
|
||||
for node in nodes:
|
||||
calculate_sim_links(node, [n for n in nodes if n != node])
|
||||
|
||||
kill_nodes_above(nodes, 0.90)
|
||||
|
||||
Logr.debug('---------------------------------------------------------------------')
|
||||
|
||||
print_link_tree(nodes)
|
||||
Logr.debug('%s %s', len(nodes), [n.value for n in nodes])
|
||||
|
||||
Logr.debug('---------------------------------------------------------------------')
|
||||
|
||||
kill_trailing_nodes(nodes)
|
||||
|
||||
Logr.debug('---------------------------------------------------------------------')
|
||||
|
||||
# Sort remaining nodes by 'num_merges'
|
||||
nodes = sorted(nodes, key=lambda n: n.num_merges, reverse=True)
|
||||
|
||||
print_link_tree(nodes)
|
||||
|
||||
Logr.debug('---------------------------------------------------------------------')
|
||||
|
||||
Logr.debug('%s %s', len(nodes), [n.value for n in nodes])
|
||||
|
||||
return [n.value for n in nodes]
|
||||
|
||||
|
||||
class SimLink(object):
|
||||
def __init__(self, similarity, opcodes, stats):
|
||||
self.similarity = similarity
|
||||
self.opcodes = opcodes
|
||||
self.stats = stats
|
||||
|
||||
|
||||
class SimNode(object):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
self.dead = False
|
||||
self.num_merges = 0
|
||||
|
||||
self.links = {} # {<other SimNode>: <SimLink>}
|
||||
|
||||
|
||||
def kill_nodes(nodes, killed_nodes):
|
||||
# Remove killed nodes from root list
|
||||
for node in killed_nodes:
|
||||
if node in nodes:
|
||||
nodes.remove(node)
|
||||
|
||||
# Remove killed nodes from links
|
||||
for killed_node in killed_nodes:
|
||||
for node in nodes:
|
||||
if killed_node in node.links:
|
||||
node.links.pop(killed_node)
|
||||
|
||||
|
||||
def kill_nodes_above(nodes, above_sim):
|
||||
killed_nodes = []
|
||||
|
||||
for node in nodes:
|
||||
if node.dead:
|
||||
continue
|
||||
|
||||
Logr.debug(node.value)
|
||||
|
||||
for link_node, link in node.links.items():
|
||||
if link_node.dead:
|
||||
continue
|
||||
|
||||
Logr.debug('\t%0.2f -- %s', link.similarity, link_node.value)
|
||||
|
||||
if link.similarity >= above_sim:
|
||||
if len(link_node.value) > len(node.value):
|
||||
Logr.debug('\t\tvery similar, killed this node')
|
||||
link_node.dead = True
|
||||
node.num_merges += 1
|
||||
killed_nodes.append(link_node)
|
||||
else:
|
||||
Logr.debug('\t\tvery similar, killed owner')
|
||||
node.dead = True
|
||||
link_node.num_merges += 1
|
||||
killed_nodes.append(node)
|
||||
|
||||
kill_nodes(nodes, killed_nodes)
|
||||
|
||||
|
||||
def print_link_tree(nodes):
|
||||
for node in nodes:
|
||||
Logr.debug(node.value)
|
||||
Logr.debug('\tnum_merges: %s', node.num_merges)
|
||||
|
||||
if len(node.links):
|
||||
Logr.debug('\t========== LINKS ==========')
|
||||
for link_node, link in node.links.items():
|
||||
Logr.debug('\t%0.2f -- %s', link.similarity, link_node.value)
|
||||
|
||||
Logr.debug('\t---------------------------')
|
||||
|
||||
|
||||
def kill_trailing_nodes(nodes):
|
||||
killed_nodes = []
|
||||
|
||||
for node in nodes:
|
||||
if node.dead:
|
||||
continue
|
||||
|
||||
Logr.debug(node.value)
|
||||
|
||||
for link_node, link in node.links.items():
|
||||
if link_node.dead:
|
||||
continue
|
||||
|
||||
is_valid = link.stats.get('valid', False)
|
||||
|
||||
has_deletions = False
|
||||
has_insertions = False
|
||||
has_replacements = False
|
||||
|
||||
for opcode in link.opcodes:
|
||||
if opcode[0] == 'delete':
|
||||
has_deletions = True
|
||||
if opcode[0] == 'insert':
|
||||
has_insertions = True
|
||||
if opcode[0] == 'replace':
|
||||
has_replacements = True
|
||||
|
||||
equal_perc = link.stats.get('equal', 0) / float(len(node.value))
|
||||
insert_perc = link.stats.get('insert', 0) / float(len(node.value))
|
||||
|
||||
Logr.debug('\t({0:<24}) [{1:02d}:{2:02d} = {3:02d} {4:3.0f}% {5:3.0f}%] -- {6:<45}'.format(
|
||||
'd:%s, i:%s, r:%s' % (has_deletions, has_insertions, has_replacements),
|
||||
len(node.value), len(link_node.value), link.stats.get('equal', 0),
|
||||
equal_perc * 100, insert_perc * 100,
|
||||
'"{0}"'.format(link_node.value)
|
||||
))
|
||||
|
||||
Logr.debug('\t\t%s', link.stats)
|
||||
|
||||
kill = all([
|
||||
is_valid,
|
||||
equal_perc >= 0.5,
|
||||
insert_perc < 2,
|
||||
has_insertions,
|
||||
not has_deletions,
|
||||
not has_replacements
|
||||
])
|
||||
|
||||
if kill:
|
||||
Logr.debug('\t\tkilled this node')
|
||||
|
||||
link_node.dead = True
|
||||
node.num_merges += 1
|
||||
killed_nodes.append(link_node)
|
||||
|
||||
kill_nodes(nodes, killed_nodes)
|
||||
|
||||
stats_print_format = "\t{0:<8} ({1:2d}:{2:2d}) ({3:2d}:{4:2d})"
|
||||
|
||||
|
||||
def get_index_values(iterable, a, b):
|
||||
return (
|
||||
iterable[a] if a else None,
|
||||
iterable[b] if b else None
|
||||
)
|
||||
|
||||
|
||||
def get_indices(iterable, a, b):
|
||||
return (
|
||||
a if 0 < a < len(iterable) else None,
|
||||
b if 0 < b < len(iterable) else None
|
||||
)
|
||||
|
||||
|
||||
def get_opcode_stats(for_node, node, opcodes):
|
||||
stats = {}
|
||||
|
||||
for tag, i1, i2, j1, j2 in opcodes:
|
||||
Logr.debug(stats_print_format.format(
|
||||
tag, i1, i2, j1, j2
|
||||
))
|
||||
|
||||
if tag in ['insert', 'delete']:
|
||||
ax = None, None
|
||||
bx = None, None
|
||||
|
||||
if tag == 'insert':
|
||||
ax = get_indices(for_node.value, i1 - 1, i1)
|
||||
bx = get_indices(node.value, j1, j2 - 1)
|
||||
|
||||
if tag == 'delete':
|
||||
ax = get_indices(for_node.value, j1 - 1, j1)
|
||||
bx = get_indices(node.value, i1, i2 - 1)
|
||||
|
||||
av = get_index_values(for_node.value, *ax)
|
||||
bv = get_index_values(node.value, *bx)
|
||||
|
||||
Logr.debug(
|
||||
'\t\t%s %s [%s><%s] <---> %s %s [%s><%s]',
|
||||
ax, av, av[0], av[1],
|
||||
bx, bv, bv[0], bv[1]
|
||||
)
|
||||
|
||||
head_valid = av[0] in [None, ' '] or bv[0] in [None, ' ']
|
||||
tail_valid = av[1] in [None, ' '] or bv[1] in [None, ' ']
|
||||
valid = head_valid and tail_valid
|
||||
|
||||
if 'valid' not in stats or (stats['valid'] and not valid):
|
||||
stats['valid'] = valid
|
||||
|
||||
Logr.debug('\t\t' + ('VALID' if valid else 'INVALID'))
|
||||
|
||||
if tag not in stats:
|
||||
stats[tag] = 0
|
||||
|
||||
stats[tag] += (i2 - i1) or (j2 - j1)
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def calculate_sim_links(for_node, other_nodes):
|
||||
for node in other_nodes:
|
||||
if node in for_node.links:
|
||||
continue
|
||||
|
||||
Logr.debug('calculating similarity between "%s" and "%s"', for_node.value, node.value)
|
||||
|
||||
# Get similarity
|
||||
similarity_matcher = create_matcher(for_node.value, node.value)
|
||||
similarity = similarity_matcher.quick_ratio()
|
||||
|
||||
# Get for_node -> node opcodes
|
||||
a_opcodes_matcher = create_matcher(for_node.value, node.value, swap_longest = False)
|
||||
a_opcodes = a_opcodes_matcher.get_opcodes()
|
||||
a_stats = get_opcode_stats(for_node, node, a_opcodes)
|
||||
|
||||
Logr.debug('-' * 100)
|
||||
|
||||
# Get node -> for_node opcodes
|
||||
b_opcodes_matcher = create_matcher(node.value, for_node.value, swap_longest = False)
|
||||
b_opcodes = b_opcodes_matcher.get_opcodes()
|
||||
b_stats = get_opcode_stats(for_node, node, b_opcodes)
|
||||
|
||||
for_node.links[node] = SimLink(similarity, a_opcodes, a_stats)
|
||||
node.links[for_node] = SimLink(similarity, b_opcodes, b_stats)
|
||||
|
||||
#raw_input('Press ENTER to continue')
|
||||
26
libs/qcond/transformers/strip_common.py
Normal file
26
libs/qcond/transformers/strip_common.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from qcond.transformers.base import Transformer
|
||||
|
||||
|
||||
COMMON_WORDS = [
|
||||
'the'
|
||||
]
|
||||
|
||||
|
||||
class StripCommonTransformer(Transformer):
|
||||
def run(self, titles):
|
||||
return [title for title in titles if title.lower() not in COMMON_WORDS]
|
||||
4
libs/tvdb_api/.gitignore
vendored
Normal file
4
libs/tvdb_api/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.DS_Store
|
||||
*.pyc
|
||||
*.egg-info/*
|
||||
dist/*.tar.gz
|
||||
9
libs/tvdb_api/.travis.yml
Normal file
9
libs/tvdb_api/.travis.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
language: python
|
||||
python:
|
||||
- 2.5
|
||||
- 2.6
|
||||
- 2.7
|
||||
|
||||
install: pip install nose
|
||||
|
||||
script: nosetests
|
||||
4
libs/tvdb_api/MANIFEST.in
Normal file
4
libs/tvdb_api/MANIFEST.in
Normal file
@@ -0,0 +1,4 @@
|
||||
include UNLICENSE
|
||||
include readme.md
|
||||
include tests/*.py
|
||||
include Rakefile
|
||||
103
libs/tvdb_api/Rakefile
Normal file
103
libs/tvdb_api/Rakefile
Normal file
@@ -0,0 +1,103 @@
|
||||
require 'fileutils'
|
||||
|
||||
task :default => [:clean]
|
||||
|
||||
task :clean do
|
||||
[".", "tests"].each do |cd|
|
||||
puts "Cleaning directory #{cd}"
|
||||
Dir.new(cd).each do |t|
|
||||
if t =~ /.*\.pyc$/
|
||||
puts "Removing #{File.join(cd, t)}"
|
||||
File.delete(File.join(cd, t))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
desc "Upversion files"
|
||||
task :upversion do
|
||||
puts "Upversioning"
|
||||
|
||||
Dir.glob("*.py").each do |filename|
|
||||
f = File.new(filename, File::RDWR)
|
||||
contents = f.read()
|
||||
|
||||
contents.gsub!(/__version__ = ".+?"/){|m|
|
||||
cur_version = m.scan(/\d+\.\d+/)[0].to_f
|
||||
new_version = cur_version + 0.1
|
||||
|
||||
puts "Current version: #{cur_version}"
|
||||
puts "New version: #{new_version}"
|
||||
|
||||
new_line = "__version__ = \"#{new_version}\""
|
||||
|
||||
puts "Old line: #{m}"
|
||||
puts "New line: #{new_line}"
|
||||
|
||||
m = new_line
|
||||
}
|
||||
|
||||
puts contents[0]
|
||||
|
||||
f.truncate(0) # empty the existing file
|
||||
f.seek(0)
|
||||
f.write(contents.to_s) # write modified file
|
||||
f.close()
|
||||
end
|
||||
end
|
||||
|
||||
desc "Upload current version to PyPi"
|
||||
task :topypi => :test do
|
||||
cur_file = File.open("tvdb_api.py").read()
|
||||
tvdb_api_version = cur_file.scan(/__version__ = "(.*)"/)
|
||||
tvdb_api_version = tvdb_api_version[0][0].to_f
|
||||
|
||||
puts "Build sdist and send tvdb_api v#{tvdb_api_version} to PyPi?"
|
||||
if $stdin.gets.chomp == "y"
|
||||
puts "Sending source-dist (sdist) to PyPi"
|
||||
|
||||
if system("python setup.py sdist register upload")
|
||||
puts "tvdb_api uploaded!"
|
||||
end
|
||||
|
||||
else
|
||||
puts "Cancelled"
|
||||
end
|
||||
end
|
||||
|
||||
desc "Profile by running unittests"
|
||||
task :profile do
|
||||
cd "tests"
|
||||
puts "Profiling.."
|
||||
`python -m cProfile -o prof_runtest.prof runtests.py`
|
||||
puts "Converting prof to dot"
|
||||
`python gprof2dot.py -o prof_runtest.dot -f pstats prof_runtest.prof`
|
||||
puts "Generating graph"
|
||||
`~/Applications/dev/graphviz.app/Contents/macOS/dot -Tpng -o profile.png prof_runtest.dot -Gbgcolor=black`
|
||||
puts "Cleanup"
|
||||
rm "prof_runtest.dot"
|
||||
rm "prof_runtest.prof"
|
||||
end
|
||||
|
||||
task :test do
|
||||
puts "Nosetest'ing"
|
||||
if not system("nosetests -v --with-doctest")
|
||||
raise "Test failed!"
|
||||
end
|
||||
|
||||
puts "Doctesting *.py (excluding setup.py)"
|
||||
Dir.glob("*.py").select{|e| ! e.match(/setup.py/)}.each do |filename|
|
||||
if filename =~ /^setup\.py/
|
||||
skip
|
||||
end
|
||||
puts "Doctesting #{filename}"
|
||||
if not system("python", "-m", "doctest", filename)
|
||||
raise "Failed doctest"
|
||||
end
|
||||
end
|
||||
|
||||
puts "Doctesting readme.md"
|
||||
if not system("python", "-m", "doctest", "readme.md")
|
||||
raise "Doctest"
|
||||
end
|
||||
end
|
||||
26
libs/tvdb_api/UNLICENSE
Normal file
26
libs/tvdb_api/UNLICENSE
Normal file
@@ -0,0 +1,26 @@
|
||||
Copyright 2011-2012 Ben Dickson (dbr)
|
||||
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
||||
0
libs/tvdb_api/__init__.py
Normal file
0
libs/tvdb_api/__init__.py
Normal file
109
libs/tvdb_api/readme.md
Normal file
109
libs/tvdb_api/readme.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# `tvdb_api`
|
||||
|
||||
`tvdb_api` is an easy to use interface to [thetvdb.com][tvdb]
|
||||
|
||||
`tvnamer` has moved to a separate repository: [github.com/dbr/tvnamer][tvnamer] - it is a utility which uses `tvdb_api` to rename files from `some.show.s01e03.blah.abc.avi` to `Some Show - [01x03] - The Episode Name.avi` (which works by getting the episode name from `tvdb_api`)
|
||||
|
||||
[](http://travis-ci.org/dbr/tvdb_api)
|
||||
|
||||
## To install
|
||||
|
||||
You can easily install `tvdb_api` via `easy_install`
|
||||
|
||||
easy_install tvdb_api
|
||||
|
||||
You may need to use sudo, depending on your setup:
|
||||
|
||||
sudo easy_install tvdb_api
|
||||
|
||||
The [`tvnamer`][tvnamer] command-line tool can also be installed via `easy_install`, this installs `tvdb_api` as a dependancy:
|
||||
|
||||
easy_install tvnamer
|
||||
|
||||
|
||||
## Basic usage
|
||||
|
||||
import tvdb_api
|
||||
t = tvdb_api.Tvdb()
|
||||
episode = t['My Name Is Earl'][1][3] # get season 1, episode 3 of show
|
||||
print episode['episodename'] # Print episode name
|
||||
|
||||
## Advanced usage
|
||||
|
||||
Most of the documentation is in docstrings. The examples are tested (using doctest) so will always be up to date and working.
|
||||
|
||||
The docstring for `Tvdb.__init__` lists all initialisation arguments, including support for non-English searches, custom "Select Series" interfaces and enabling the retrieval of banners and extended actor information. You can also override the default API key using `apikey`, recommended if you're using `tvdb_api` in a larger script or application
|
||||
|
||||
### Exceptions
|
||||
|
||||
There are several exceptions you may catch, these can be imported from `tvdb_api`:
|
||||
|
||||
- `tvdb_error` - this is raised when there is an error communicating with [thetvdb.com][tvdb] (a network error most commonly)
|
||||
- `tvdb_userabort` - raised when a user aborts the Select Series dialog (by `ctrl+c`, or entering `q`)
|
||||
- `tvdb_shownotfound` - raised when `t['show name']` cannot find anything
|
||||
- `tvdb_seasonnotfound` - raised when the requested series (`t['show name][99]`) does not exist
|
||||
- `tvdb_episodenotfound` - raised when the requested episode (`t['show name][1][99]`) does not exist.
|
||||
- `tvdb_attributenotfound` - raised when the requested attribute is not found (`t['show name']['an attribute']`, `t['show name'][1]['an attribute']`, or ``t['show name'][1][1]['an attribute']``)
|
||||
|
||||
### Series data
|
||||
|
||||
All data exposed by [thetvdb.com][tvdb] is accessible via the `Show` class. A Show is retrieved by doing..
|
||||
|
||||
>>> import tvdb_api
|
||||
>>> t = tvdb_api.Tvdb()
|
||||
>>> show = t['scrubs']
|
||||
>>> type(show)
|
||||
<class 'tvdb_api.Show'>
|
||||
|
||||
For example, to find out what network Scrubs is aired:
|
||||
|
||||
>>> t['scrubs']['network']
|
||||
u'ABC'
|
||||
|
||||
The data is stored in an attribute named `data`, within the Show instance:
|
||||
|
||||
>>> t['scrubs'].data.keys()
|
||||
['networkid', 'rating', 'airs_dayofweek', 'contentrating', 'seriesname', 'id', 'airs_time', 'network', 'fanart', 'lastupdated', 'actors', 'ratingcount', 'status', 'added', 'poster', 'imdb_id', 'genre', 'banner', 'seriesid', 'language', 'zap2it_id', 'addedby', 'firstaired', 'runtime', 'overview']
|
||||
|
||||
Although each element is also accessible via `t['scrubs']` for ease-of-use:
|
||||
|
||||
>>> t['scrubs']['rating']
|
||||
u'9.0'
|
||||
|
||||
This is the recommended way of retrieving "one-off" data (for example, if you are only interested in "seriesname"). If you wish to iterate over all data, or check if a particular show has a specific piece of data, use the `data` attribute,
|
||||
|
||||
>>> 'rating' in t['scrubs'].data
|
||||
True
|
||||
|
||||
### Banners and actors
|
||||
|
||||
Since banners and actors are separate XML files, retrieving them by default is undesirable. If you wish to retrieve banners (and other fanart), use the `banners` Tvdb initialisation argument:
|
||||
|
||||
>>> from tvdb_api import Tvdb
|
||||
>>> t = Tvdb(banners = True)
|
||||
|
||||
Then access the data using a `Show`'s `_banner` key:
|
||||
|
||||
>>> t['scrubs']['_banners'].keys()
|
||||
['fanart', 'poster', 'series', 'season']
|
||||
|
||||
The banner data structure will be improved in future versions.
|
||||
|
||||
Extended actor data is accessible similarly:
|
||||
|
||||
>>> t = Tvdb(actors = True)
|
||||
>>> actors = t['scrubs']['_actors']
|
||||
>>> actors[0]
|
||||
<Actor "Zach Braff">
|
||||
>>> actors[0].keys()
|
||||
['sortorder', 'image', 'role', 'id', 'name']
|
||||
>>> actors[0]['role']
|
||||
u'Dr. John Michael "J.D." Dorian'
|
||||
|
||||
Remember a simple list of actors is accessible via the default Show data:
|
||||
|
||||
>>> t['scrubs']['actors']
|
||||
u'|Zach Braff|Donald Faison|Sarah Chalke|Christa Miller|Aloma Wright|Robert Maschio|Sam Lloyd|Neil Flynn|Ken Jenkins|Judy Reyes|John C. McGinley|Travis Schuldt|Johnny Kastl|Heather Graham|Michael Mosley|Kerry Bish\xe9|Dave Franco|Eliza Coupe|'
|
||||
|
||||
[tvdb]: http://thetvdb.com
|
||||
[tvnamer]: http://github.com/dbr/tvnamer
|
||||
35
libs/tvdb_api/setup.py
Normal file
35
libs/tvdb_api/setup.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from setuptools import setup
|
||||
setup(
|
||||
name = 'tvdb_api',
|
||||
version='1.8.2',
|
||||
|
||||
author='dbr/Ben',
|
||||
description='Interface to thetvdb.com',
|
||||
url='http://github.com/dbr/tvdb_api/tree/master',
|
||||
license='unlicense',
|
||||
|
||||
long_description="""\
|
||||
An easy to use API interface to TheTVDB.com
|
||||
Basic usage is:
|
||||
|
||||
>>> import tvdb_api
|
||||
>>> t = tvdb_api.Tvdb()
|
||||
>>> ep = t['My Name Is Earl'][1][22]
|
||||
>>> ep
|
||||
<Episode 01x22 - Stole a Badge>
|
||||
>>> ep['episodename']
|
||||
u'Stole a Badge'
|
||||
""",
|
||||
|
||||
py_modules = ['tvdb_api', 'tvdb_ui', 'tvdb_exceptions', 'tvdb_cache'],
|
||||
|
||||
classifiers=[
|
||||
"Intended Audience :: Developers",
|
||||
"Natural Language :: English",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python",
|
||||
"Topic :: Multimedia",
|
||||
"Topic :: Utilities",
|
||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||
]
|
||||
)
|
||||
1638
libs/tvdb_api/tests/gprof2dot.py
Normal file
1638
libs/tvdb_api/tests/gprof2dot.py
Normal file
File diff suppressed because it is too large
Load Diff
28
libs/tvdb_api/tests/runtests.py
Executable file
28
libs/tvdb_api/tests/runtests.py
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python
|
||||
#encoding:utf-8
|
||||
#author:dbr/Ben
|
||||
#project:tvdb_api
|
||||
#repository:http://github.com/dbr/tvdb_api
|
||||
#license:unlicense (http://unlicense.org/)
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
import test_tvdb_api
|
||||
|
||||
def main():
|
||||
suite = unittest.TestSuite([
|
||||
unittest.TestLoader().loadTestsFromModule(test_tvdb_api)
|
||||
])
|
||||
|
||||
runner = unittest.TextTestRunner(verbosity=2)
|
||||
result = runner.run(suite)
|
||||
if result.wasSuccessful():
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(
|
||||
int(main())
|
||||
)
|
||||
526
libs/tvdb_api/tests/test_tvdb_api.py
Normal file
526
libs/tvdb_api/tests/test_tvdb_api.py
Normal file
@@ -0,0 +1,526 @@
|
||||
#!/usr/bin/env python
|
||||
#encoding:utf-8
|
||||
#author:dbr/Ben
|
||||
#project:tvdb_api
|
||||
#repository:http://github.com/dbr/tvdb_api
|
||||
#license:unlicense (http://unlicense.org/)
|
||||
|
||||
"""Unittests for tvdb_api
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import datetime
|
||||
import unittest
|
||||
|
||||
# Force parent directory onto path
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import tvdb_api
|
||||
import tvdb_ui
|
||||
from tvdb_api import (tvdb_shownotfound, tvdb_seasonnotfound,
|
||||
tvdb_episodenotfound, tvdb_attributenotfound)
|
||||
|
||||
class test_tvdb_basic(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
|
||||
|
||||
def test_different_case(self):
|
||||
"""Checks the auto-correction of show names is working.
|
||||
It should correct the weirdly capitalised 'sCruBs' to 'Scrubs'
|
||||
"""
|
||||
self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
|
||||
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
|
||||
|
||||
def test_spaces(self):
|
||||
"""Checks shownames with spaces
|
||||
"""
|
||||
self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
|
||||
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
|
||||
|
||||
def test_numeric(self):
|
||||
"""Checks numeric show names
|
||||
"""
|
||||
self.assertEquals(self.t['24'][2][20]['episodename'], 'Day 2: 3:00 A.M.-4:00 A.M.')
|
||||
self.assertEquals(self.t['24']['seriesname'], '24')
|
||||
|
||||
def test_show_iter(self):
|
||||
"""Iterating over a show returns each seasons
|
||||
"""
|
||||
self.assertEquals(
|
||||
len(
|
||||
[season for season in self.t['Life on Mars']]
|
||||
),
|
||||
2
|
||||
)
|
||||
|
||||
def test_season_iter(self):
|
||||
"""Iterating over a show returns episodes
|
||||
"""
|
||||
self.assertEquals(
|
||||
len(
|
||||
[episode for episode in self.t['Life on Mars'][1]]
|
||||
),
|
||||
8
|
||||
)
|
||||
|
||||
def test_get_episode_overview(self):
|
||||
"""Checks episode overview is retrieved correctly.
|
||||
"""
|
||||
self.assertEquals(
|
||||
self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith(
|
||||
'When a new copy of Doral, a Cylon who had been previously'),
|
||||
True
|
||||
)
|
||||
|
||||
def test_get_parent(self):
|
||||
"""Check accessing series from episode instance
|
||||
"""
|
||||
show = self.t['Battlestar Galactica (2003)']
|
||||
season = show[1]
|
||||
episode = show[1][1]
|
||||
|
||||
self.assertEquals(
|
||||
season.show,
|
||||
show
|
||||
)
|
||||
|
||||
self.assertEquals(
|
||||
episode.season,
|
||||
season
|
||||
)
|
||||
|
||||
self.assertEquals(
|
||||
episode.season.show,
|
||||
show
|
||||
)
|
||||
|
||||
|
||||
class test_tvdb_errors(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
|
||||
|
||||
def test_seasonnotfound(self):
|
||||
"""Checks exception is thrown when season doesn't exist.
|
||||
"""
|
||||
self.assertRaises(tvdb_seasonnotfound, lambda:self.t['CNNNN'][10][1])
|
||||
|
||||
def test_shownotfound(self):
|
||||
"""Checks exception is thrown when episode doesn't exist.
|
||||
"""
|
||||
self.assertRaises(tvdb_shownotfound, lambda:self.t['the fake show thingy'])
|
||||
|
||||
def test_episodenotfound(self):
|
||||
"""Checks exception is raised for non-existent episode
|
||||
"""
|
||||
self.assertRaises(tvdb_episodenotfound, lambda:self.t['Scrubs'][1][30])
|
||||
|
||||
def test_attributenamenotfound(self):
|
||||
"""Checks exception is thrown for if an attribute isn't found.
|
||||
"""
|
||||
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])
|
||||
self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])
|
||||
|
||||
class test_tvdb_search(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
|
||||
|
||||
def test_search_len(self):
|
||||
"""There should be only one result matching
|
||||
"""
|
||||
self.assertEquals(len(self.t['My Name Is Earl'].search('Faked His Own Death')), 1)
|
||||
|
||||
def test_search_checkname(self):
|
||||
"""Checks you can get the episode name of a search result
|
||||
"""
|
||||
self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')
|
||||
self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')
|
||||
|
||||
def test_search_multiresults(self):
|
||||
"""Checks search can return multiple results
|
||||
"""
|
||||
self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)
|
||||
|
||||
def test_search_no_params_error(self):
|
||||
"""Checks not supplying search info raises TypeError"""
|
||||
self.assertRaises(
|
||||
TypeError,
|
||||
lambda: self.t['Scrubs'].search()
|
||||
)
|
||||
|
||||
def test_search_season(self):
|
||||
"""Checks the searching of a single season"""
|
||||
self.assertEquals(
|
||||
len(self.t['Scrubs'][1].search("First")),
|
||||
3
|
||||
)
|
||||
|
||||
def test_search_show(self):
|
||||
"""Checks the searching of an entire show"""
|
||||
self.assertEquals(
|
||||
len(self.t['CNNNN'].search('CNNNN', key='episodename')),
|
||||
3
|
||||
)
|
||||
|
||||
def test_aired_on(self):
|
||||
"""Tests airedOn show method"""
|
||||
sr = self.t['Scrubs'].airedOn(datetime.date(2001, 10, 2))
|
||||
self.assertEquals(len(sr), 1)
|
||||
self.assertEquals(sr[0]['episodename'], u'My First Day')
|
||||
|
||||
class test_tvdb_data(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
|
||||
|
||||
def test_episode_data(self):
|
||||
"""Check the firstaired value is retrieved
|
||||
"""
|
||||
self.assertEquals(
|
||||
self.t['lost']['firstaired'],
|
||||
'2004-09-22'
|
||||
)
|
||||
|
||||
class test_tvdb_misc(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
|
||||
|
||||
def test_repr_show(self):
|
||||
"""Check repr() of Season
|
||||
"""
|
||||
self.assertEquals(
|
||||
repr(self.t['CNNNN']),
|
||||
"<Show Chaser Non-Stop News Network (CNNNN) (containing 3 seasons)>"
|
||||
)
|
||||
def test_repr_season(self):
|
||||
"""Check repr() of Season
|
||||
"""
|
||||
self.assertEquals(
|
||||
repr(self.t['CNNNN'][1]),
|
||||
"<Season instance (containing 9 episodes)>"
|
||||
)
|
||||
def test_repr_episode(self):
|
||||
"""Check repr() of Episode
|
||||
"""
|
||||
self.assertEquals(
|
||||
repr(self.t['CNNNN'][1][1]),
|
||||
"<Episode 01x01 - Terror Alert>"
|
||||
)
|
||||
def test_have_all_languages(self):
|
||||
"""Check valid_languages is up-to-date (compared to languages.xml)
|
||||
"""
|
||||
et = self.t._getetsrc(
|
||||
"http://thetvdb.com/api/%s/languages.xml" % (
|
||||
self.t.config['apikey']
|
||||
)
|
||||
)
|
||||
languages = [x.find("abbreviation").text for x in et.findall("Language")]
|
||||
|
||||
self.assertEquals(
|
||||
sorted(languages),
|
||||
sorted(self.t.config['valid_languages'])
|
||||
)
|
||||
|
||||
class test_tvdb_languages(unittest.TestCase):
|
||||
def test_episode_name_french(self):
|
||||
"""Check episode data is in French (language="fr")
|
||||
"""
|
||||
t = tvdb_api.Tvdb(cache = True, language = "fr")
|
||||
self.assertEquals(
|
||||
t['scrubs'][1][1]['episodename'],
|
||||
"Mon premier jour"
|
||||
)
|
||||
self.assertTrue(
|
||||
t['scrubs']['overview'].startswith(
|
||||
u"J.D. est un jeune m\xe9decin qui d\xe9bute"
|
||||
)
|
||||
)
|
||||
|
||||
def test_episode_name_spanish(self):
|
||||
"""Check episode data is in Spanish (language="es")
|
||||
"""
|
||||
t = tvdb_api.Tvdb(cache = True, language = "es")
|
||||
self.assertEquals(
|
||||
t['scrubs'][1][1]['episodename'],
|
||||
"Mi Primer Dia"
|
||||
)
|
||||
self.assertTrue(
|
||||
t['scrubs']['overview'].startswith(
|
||||
u'Scrubs es una divertida comedia'
|
||||
)
|
||||
)
|
||||
|
||||
def test_multilanguage_selection(self):
|
||||
"""Check selected language is used
|
||||
"""
|
||||
class SelectEnglishUI(tvdb_ui.BaseUI):
|
||||
def selectSeries(self, allSeries):
|
||||
return [x for x in allSeries if x['language'] == "en"][0]
|
||||
|
||||
class SelectItalianUI(tvdb_ui.BaseUI):
|
||||
def selectSeries(self, allSeries):
|
||||
return [x for x in allSeries if x['language'] == "it"][0]
|
||||
|
||||
t_en = tvdb_api.Tvdb(
|
||||
cache=True,
|
||||
custom_ui = SelectEnglishUI,
|
||||
language = "en")
|
||||
t_it = tvdb_api.Tvdb(
|
||||
cache=True,
|
||||
custom_ui = SelectItalianUI,
|
||||
language = "it")
|
||||
|
||||
self.assertEquals(
|
||||
t_en['dexter'][1][2]['episodename'], "Crocodile"
|
||||
)
|
||||
self.assertEquals(
|
||||
t_it['dexter'][1][2]['episodename'], "Lacrime di coccodrillo"
|
||||
)
|
||||
|
||||
|
||||
class test_tvdb_unicode(unittest.TestCase):
|
||||
def test_search_in_chinese(self):
|
||||
"""Check searching for show with language=zh returns Chinese seriesname
|
||||
"""
|
||||
t = tvdb_api.Tvdb(cache = True, language = "zh")
|
||||
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
|
||||
self.assertEquals(
|
||||
type(show),
|
||||
tvdb_api.Show
|
||||
)
|
||||
|
||||
self.assertEquals(
|
||||
show['seriesname'],
|
||||
u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i'
|
||||
)
|
||||
|
||||
def test_search_in_all_languages(self):
|
||||
"""Check search_all_languages returns Chinese show, with language=en
|
||||
"""
|
||||
t = tvdb_api.Tvdb(cache = True, search_all_languages = True, language="en")
|
||||
show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i']
|
||||
self.assertEquals(
|
||||
type(show),
|
||||
tvdb_api.Show
|
||||
)
|
||||
|
||||
self.assertEquals(
|
||||
show['seriesname'],
|
||||
u'Virtues Of Harmony II'
|
||||
)
|
||||
|
||||
class test_tvdb_banners(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = True)
|
||||
|
||||
def test_have_banners(self):
|
||||
"""Check banners at least one banner is found
|
||||
"""
|
||||
self.assertEquals(
|
||||
len(self.t['scrubs']['_banners']) > 0,
|
||||
True
|
||||
)
|
||||
|
||||
def test_banner_url(self):
|
||||
"""Checks banner URLs start with http://
|
||||
"""
|
||||
for banner_type, banner_data in self.t['scrubs']['_banners'].items():
|
||||
for res, res_data in banner_data.items():
|
||||
for bid, banner_info in res_data.items():
|
||||
self.assertEquals(
|
||||
banner_info['_bannerpath'].startswith("http://"),
|
||||
True
|
||||
)
|
||||
|
||||
def test_episode_image(self):
|
||||
"""Checks episode 'filename' image is fully qualified URL
|
||||
"""
|
||||
self.assertEquals(
|
||||
self.t['scrubs'][1][1]['filename'].startswith("http://"),
|
||||
True
|
||||
)
|
||||
|
||||
def test_show_artwork(self):
|
||||
"""Checks various image URLs within season data are fully qualified
|
||||
"""
|
||||
for key in ['banner', 'fanart', 'poster']:
|
||||
self.assertEquals(
|
||||
self.t['scrubs'][key].startswith("http://"),
|
||||
True
|
||||
)
|
||||
|
||||
class test_tvdb_actors(unittest.TestCase):
|
||||
t = None
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
|
||||
|
||||
def test_actors_is_correct_datatype(self):
|
||||
"""Check show/_actors key exists and is correct type"""
|
||||
self.assertTrue(
|
||||
isinstance(
|
||||
self.t['scrubs']['_actors'],
|
||||
tvdb_api.Actors
|
||||
)
|
||||
)
|
||||
|
||||
def test_actors_has_actor(self):
|
||||
"""Check show has at least one Actor
|
||||
"""
|
||||
self.assertTrue(
|
||||
isinstance(
|
||||
self.t['scrubs']['_actors'][0],
|
||||
tvdb_api.Actor
|
||||
)
|
||||
)
|
||||
|
||||
def test_actor_has_name(self):
|
||||
"""Check first actor has a name"""
|
||||
self.assertEquals(
|
||||
self.t['scrubs']['_actors'][0]['name'],
|
||||
"Zach Braff"
|
||||
)
|
||||
|
||||
def test_actor_image_corrected(self):
|
||||
"""Check image URL is fully qualified
|
||||
"""
|
||||
for actor in self.t['scrubs']['_actors']:
|
||||
if actor['image'] is not None:
|
||||
# Actor's image can be None, it displays as the placeholder
|
||||
# image on thetvdb.com
|
||||
self.assertTrue(
|
||||
actor['image'].startswith("http://")
|
||||
)
|
||||
|
||||
class test_tvdb_doctest(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False)
|
||||
|
||||
def test_doctest(self):
|
||||
"""Check docstring examples works"""
|
||||
import doctest
|
||||
doctest.testmod(tvdb_api)
|
||||
|
||||
|
||||
class test_tvdb_custom_caching(unittest.TestCase):
|
||||
def test_true_false_string(self):
|
||||
"""Tests setting cache to True/False/string
|
||||
|
||||
Basic tests, only checking for errors
|
||||
"""
|
||||
|
||||
tvdb_api.Tvdb(cache = True)
|
||||
tvdb_api.Tvdb(cache = False)
|
||||
tvdb_api.Tvdb(cache = "/tmp")
|
||||
|
||||
def test_invalid_cache_option(self):
|
||||
"""Tests setting cache to invalid value
|
||||
"""
|
||||
|
||||
try:
|
||||
tvdb_api.Tvdb(cache = 2.3)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
self.fail("Expected ValueError from setting cache to float")
|
||||
|
||||
def test_custom_urlopener(self):
|
||||
class UsedCustomOpener(Exception):
|
||||
pass
|
||||
|
||||
import urllib2
|
||||
class TestOpener(urllib2.BaseHandler):
|
||||
def default_open(self, request):
|
||||
print request.get_method()
|
||||
raise UsedCustomOpener("Something")
|
||||
|
||||
custom_opener = urllib2.build_opener(TestOpener())
|
||||
t = tvdb_api.Tvdb(cache = custom_opener)
|
||||
try:
|
||||
t['scrubs']
|
||||
except UsedCustomOpener:
|
||||
pass
|
||||
else:
|
||||
self.fail("Did not use custom opener")
|
||||
|
||||
class test_tvdb_by_id(unittest.TestCase):
|
||||
t = None
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True)
|
||||
|
||||
def test_actors_is_correct_datatype(self):
|
||||
"""Check show/_actors key exists and is correct type"""
|
||||
self.assertEquals(
|
||||
self.t[76156]['seriesname'],
|
||||
'Scrubs'
|
||||
)
|
||||
|
||||
|
||||
class test_tvdb_zip(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, useZip = True)
|
||||
|
||||
def test_get_series_from_zip(self):
|
||||
"""
|
||||
"""
|
||||
self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady')
|
||||
self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs')
|
||||
|
||||
def test_spaces_from_zip(self):
|
||||
"""Checks shownames with spaces
|
||||
"""
|
||||
self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl')
|
||||
self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death')
|
||||
|
||||
|
||||
class test_tvdb_show_search(unittest.TestCase):
|
||||
# Used to store the cached instance of Tvdb()
|
||||
t = None
|
||||
|
||||
def setUp(self):
|
||||
if self.t is None:
|
||||
self.__class__.t = tvdb_api.Tvdb(cache = True, useZip = True)
|
||||
|
||||
def test_search(self):
|
||||
"""Test Tvdb.search method
|
||||
"""
|
||||
results = self.t.search("my name is earl")
|
||||
all_ids = [x['seriesid'] for x in results]
|
||||
self.assertTrue('75397' in all_ids)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
runner = unittest.TextTestRunner(verbosity = 2)
|
||||
unittest.main(testRunner = runner)
|
||||
874
libs/tvdb_api/tvdb_api.py
Normal file
874
libs/tvdb_api/tvdb_api.py
Normal file
@@ -0,0 +1,874 @@
|
||||
#!/usr/bin/env python
|
||||
#encoding:utf-8
|
||||
#author:dbr/Ben
|
||||
#project:tvdb_api
|
||||
#repository:http://github.com/dbr/tvdb_api
|
||||
#license:unlicense (http://unlicense.org/)
|
||||
|
||||
"""Simple-to-use Python interface to The TVDB's API (thetvdb.com)
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> from tvdb_api import Tvdb
|
||||
>>> t = Tvdb()
|
||||
>>> t['Lost'][4][11]['episodename']
|
||||
u'Cabin Fever'
|
||||
"""
|
||||
__author__ = "dbr/Ben"
|
||||
__version__ = "1.8.2"
|
||||
|
||||
import os
|
||||
import time
|
||||
import urllib
|
||||
import urllib2
|
||||
import getpass
|
||||
import StringIO
|
||||
import tempfile
|
||||
import warnings
|
||||
import logging
|
||||
import datetime
|
||||
import zipfile
|
||||
|
||||
try:
|
||||
import xml.etree.cElementTree as ElementTree
|
||||
except ImportError:
|
||||
import xml.etree.ElementTree as ElementTree
|
||||
|
||||
try:
|
||||
import gzip
|
||||
except ImportError:
|
||||
gzip = None
|
||||
|
||||
|
||||
from tvdb_cache import CacheHandler
|
||||
|
||||
from tvdb_ui import BaseUI, ConsoleUI
|
||||
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound,
|
||||
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
|
||||
|
||||
lastTimeout = None
|
||||
|
||||
def log():
|
||||
return logging.getLogger("tvdb_api")
|
||||
|
||||
|
||||
class ShowContainer(dict):
|
||||
"""Simple dict that holds a series of Show instances
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._stack = []
|
||||
self._lastgc = time.time()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._stack.append(key)
|
||||
|
||||
#keep only the 100th latest results
|
||||
if time.time() - self._lastgc > 20:
|
||||
tbd = self._stack[:-100]
|
||||
i = 0
|
||||
for o in tbd:
|
||||
del self[o]
|
||||
del self._stack[i]
|
||||
i += 1
|
||||
|
||||
_lastgc = time.time()
|
||||
del tbd
|
||||
|
||||
super(ShowContainer, self).__setitem__(key, value)
|
||||
|
||||
|
||||
class Show(dict):
|
||||
"""Holds a dict of seasons, and show data.
|
||||
"""
|
||||
def __init__(self):
|
||||
dict.__init__(self)
|
||||
self.data = {}
|
||||
|
||||
def __repr__(self):
|
||||
return "<Show %s (containing %s seasons)>" % (
|
||||
self.data.get(u'seriesname', 'instance'),
|
||||
len(self)
|
||||
)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key in self:
|
||||
# Key is an episode, return it
|
||||
return dict.__getitem__(self, key)
|
||||
|
||||
if key in self.data:
|
||||
# Non-numeric request is for show-data
|
||||
return dict.__getitem__(self.data, key)
|
||||
|
||||
# Data wasn't found, raise appropriate error
|
||||
if isinstance(key, int) or key.isdigit():
|
||||
# Episode number x was not found
|
||||
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
|
||||
else:
|
||||
# If it's not numeric, it must be an attribute name, which
|
||||
# doesn't exist, so attribute error.
|
||||
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
|
||||
|
||||
def airedOn(self, date):
|
||||
ret = self.search(str(date), 'firstaired')
|
||||
if len(ret) == 0:
|
||||
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
|
||||
return ret
|
||||
|
||||
def search(self, term = None, key = None):
|
||||
"""
|
||||
Search all episodes in show. Can search all data, or a specific key (for
|
||||
example, episodename)
|
||||
|
||||
Always returns an array (can be empty). First index contains the first
|
||||
match, and so on.
|
||||
|
||||
Each array index is an Episode() instance, so doing
|
||||
search_results[0]['episodename'] will retrieve the episode name of the
|
||||
first match.
|
||||
|
||||
Search terms are converted to lower case (unicode) strings.
|
||||
|
||||
# Examples
|
||||
|
||||
These examples assume t is an instance of Tvdb():
|
||||
|
||||
>>> t = Tvdb()
|
||||
>>>
|
||||
|
||||
To search for all episodes of Scrubs with a bit of data
|
||||
containing "my first day":
|
||||
|
||||
>>> t['Scrubs'].search("my first day")
|
||||
[<Episode 01x01 - My First Day>]
|
||||
>>>
|
||||
|
||||
Search for "My Name Is Earl" episode named "Faked His Own Death":
|
||||
|
||||
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
|
||||
[<Episode 01x04 - Faked His Own Death>]
|
||||
>>>
|
||||
|
||||
To search Scrubs for all episodes with "mentor" in the episode name:
|
||||
|
||||
>>> t['scrubs'].search('mentor', key = 'episodename')
|
||||
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
|
||||
>>>
|
||||
|
||||
# Using search results
|
||||
|
||||
>>> results = t['Scrubs'].search("my first")
|
||||
>>> print results[0]['episodename']
|
||||
My First Day
|
||||
>>> for x in results: print x['episodename']
|
||||
My First Day
|
||||
My First Step
|
||||
My First Kill
|
||||
>>>
|
||||
"""
|
||||
results = []
|
||||
for cur_season in self.values():
|
||||
searchresult = cur_season.search(term = term, key = key)
|
||||
if len(searchresult) != 0:
|
||||
results.extend(searchresult)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class Season(dict):
|
||||
def __init__(self, show = None):
|
||||
"""The show attribute points to the parent show
|
||||
"""
|
||||
self.show = show
|
||||
|
||||
def __repr__(self):
|
||||
return "<Season instance (containing %s episodes)>" % (
|
||||
len(self.keys())
|
||||
)
|
||||
|
||||
def __getitem__(self, episode_number):
|
||||
if episode_number not in self:
|
||||
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
|
||||
else:
|
||||
return dict.__getitem__(self, episode_number)
|
||||
|
||||
def search(self, term = None, key = None):
|
||||
"""Search all episodes in season, returns a list of matching Episode
|
||||
instances.
|
||||
|
||||
>>> t = Tvdb()
|
||||
>>> t['scrubs'][1].search('first day')
|
||||
[<Episode 01x01 - My First Day>]
|
||||
>>>
|
||||
|
||||
See Show.search documentation for further information on search
|
||||
"""
|
||||
results = []
|
||||
for ep in self.values():
|
||||
searchresult = ep.search(term = term, key = key)
|
||||
if searchresult is not None:
|
||||
results.append(
|
||||
searchresult
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
class Episode(dict):
|
||||
def __init__(self, season = None):
|
||||
"""The season attribute points to the parent season
|
||||
"""
|
||||
self.season = season
|
||||
|
||||
def __repr__(self):
|
||||
seasno = int(self.get(u'seasonnumber', 0))
|
||||
epno = int(self.get(u'episodenumber', 0))
|
||||
epname = self.get(u'episodename')
|
||||
if epname is not None:
|
||||
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
|
||||
else:
|
||||
return "<Episode %02dx%02d>" % (seasno, epno)
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return dict.__getitem__(self, key)
|
||||
except KeyError:
|
||||
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
|
||||
|
||||
def search(self, term = None, key = None):
|
||||
"""Search episode data for term, if it matches, return the Episode (self).
|
||||
The key parameter can be used to limit the search to a specific element,
|
||||
for example, episodename.
|
||||
|
||||
This primarily for use use by Show.search and Season.search. See
|
||||
Show.search for further information on search
|
||||
|
||||
Simple example:
|
||||
|
||||
>>> e = Episode()
|
||||
>>> e['episodename'] = "An Example"
|
||||
>>> e.search("examp")
|
||||
<Episode 00x00 - An Example>
|
||||
>>>
|
||||
|
||||
Limiting by key:
|
||||
|
||||
>>> e.search("examp", key = "episodename")
|
||||
<Episode 00x00 - An Example>
|
||||
>>>
|
||||
"""
|
||||
if term == None:
|
||||
raise TypeError("must supply string to search for (contents)")
|
||||
|
||||
term = unicode(term).lower()
|
||||
for cur_key, cur_value in self.items():
|
||||
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
|
||||
if key is not None and cur_key != key:
|
||||
# Do not search this key
|
||||
continue
|
||||
if cur_value.find( unicode(term).lower() ) > -1:
|
||||
return self
|
||||
|
||||
|
||||
class Actors(list):
|
||||
"""Holds all Actor instances for a show
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Actor(dict):
|
||||
"""Represents a single actor. Should contain..
|
||||
|
||||
id,
|
||||
image,
|
||||
name,
|
||||
role,
|
||||
sortorder
|
||||
"""
|
||||
def __repr__(self):
|
||||
return "<Actor \"%s\">" % (self.get("name"))
|
||||
|
||||
|
||||
class Tvdb:
|
||||
"""Create easy-to-use interface to name of season/episode name
|
||||
>>> t = Tvdb()
|
||||
>>> t['Scrubs'][1][24]['episodename']
|
||||
u'My Last Day'
|
||||
"""
|
||||
def __init__(self,
|
||||
interactive = False,
|
||||
select_first = False,
|
||||
debug = False,
|
||||
cache = True,
|
||||
banners = False,
|
||||
actors = False,
|
||||
custom_ui = None,
|
||||
language = None,
|
||||
search_all_languages = False,
|
||||
apikey = None,
|
||||
forceConnect=False,
|
||||
useZip=False):
|
||||
|
||||
"""interactive (True/False):
|
||||
When True, uses built-in console UI is used to select the correct show.
|
||||
When False, the first search result is used.
|
||||
|
||||
select_first (True/False):
|
||||
Automatically selects the first series search result (rather
|
||||
than showing the user a list of more than one series).
|
||||
Is overridden by interactive = False, or specifying a custom_ui
|
||||
|
||||
debug (True/False) DEPRECATED:
|
||||
Replaced with proper use of logging module. To show debug messages:
|
||||
|
||||
>>> import logging
|
||||
>>> logging.basicConfig(level = logging.DEBUG)
|
||||
|
||||
cache (True/False/str/unicode/urllib2 opener):
|
||||
Retrieved XML are persisted to to disc. If true, stores in
|
||||
tvdb_api folder under your systems TEMP_DIR, if set to
|
||||
str/unicode instance it will use this as the cache
|
||||
location. If False, disables caching. Can also be passed
|
||||
an arbitrary Python object, which is used as a urllib2
|
||||
opener, which should be created by urllib2.build_opener
|
||||
|
||||
banners (True/False):
|
||||
Retrieves the banners for a show. These are accessed
|
||||
via the _banners key of a Show(), for example:
|
||||
|
||||
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
|
||||
['fanart', 'poster', 'series', 'season']
|
||||
|
||||
actors (True/False):
|
||||
Retrieves a list of the actors for a show. These are accessed
|
||||
via the _actors key of a Show(), for example:
|
||||
|
||||
>>> t = Tvdb(actors=True)
|
||||
>>> t['scrubs']['_actors'][0]['name']
|
||||
u'Zach Braff'
|
||||
|
||||
custom_ui (tvdb_ui.BaseUI subclass):
|
||||
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
|
||||
|
||||
language (2 character language abbreviation):
|
||||
The language of the returned data. Is also the language search
|
||||
uses. Default is "en" (English). For full list, run..
|
||||
|
||||
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
|
||||
['da', 'fi', 'nl', ...]
|
||||
|
||||
search_all_languages (True/False):
|
||||
By default, Tvdb will only search in the language specified using
|
||||
the language option. When this is True, it will search for the
|
||||
show in and language
|
||||
|
||||
apikey (str/unicode):
|
||||
Override the default thetvdb.com API key. By default it will use
|
||||
tvdb_api's own key (fine for small scripts), but you can use your
|
||||
own key if desired - this is recommended if you are embedding
|
||||
tvdb_api in a larger application)
|
||||
See http://thetvdb.com/?tab=apiregister to get your own key
|
||||
|
||||
forceConnect (bool):
|
||||
If true it will always try to connect to theTVDB.com even if we
|
||||
recently timed out. By default it will wait one minute before
|
||||
trying again, and any requests within that one minute window will
|
||||
return an exception immediately.
|
||||
|
||||
useZip (bool):
|
||||
Download the zip archive where possibale, instead of the xml.
|
||||
This is only used when all episodes are pulled.
|
||||
And only the main language xml is used, the actor and banner xml are lost.
|
||||
"""
|
||||
|
||||
global lastTimeout
|
||||
|
||||
# if we're given a lastTimeout that is less than 1 min just give up
|
||||
if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1):
|
||||
raise tvdb_error("We recently timed out, so giving up early this time")
|
||||
|
||||
self.shows = ShowContainer() # Holds all Show classes
|
||||
self.corrections = {} # Holds show-name to show_id mapping
|
||||
|
||||
self.config = {}
|
||||
|
||||
if apikey is not None:
|
||||
self.config['apikey'] = apikey
|
||||
else:
|
||||
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
|
||||
|
||||
self.config['debug_enabled'] = debug # show debugging messages
|
||||
|
||||
self.config['custom_ui'] = custom_ui
|
||||
|
||||
self.config['interactive'] = interactive # prompt for correct series?
|
||||
|
||||
self.config['select_first'] = select_first
|
||||
|
||||
self.config['search_all_languages'] = search_all_languages
|
||||
|
||||
self.config['useZip'] = useZip
|
||||
|
||||
|
||||
if cache is True:
|
||||
self.config['cache_enabled'] = True
|
||||
self.config['cache_location'] = self._getTempDir()
|
||||
self.urlopener = urllib2.build_opener(
|
||||
CacheHandler(self.config['cache_location'])
|
||||
)
|
||||
|
||||
elif cache is False:
|
||||
self.config['cache_enabled'] = False
|
||||
self.urlopener = urllib2.build_opener() # default opener with no caching
|
||||
|
||||
elif isinstance(cache, basestring):
|
||||
self.config['cache_enabled'] = True
|
||||
self.config['cache_location'] = cache
|
||||
self.urlopener = urllib2.build_opener(
|
||||
CacheHandler(self.config['cache_location'])
|
||||
)
|
||||
|
||||
elif isinstance(cache, urllib2.OpenerDirector):
|
||||
# If passed something from urllib2.build_opener, use that
|
||||
log().debug("Using %r as urlopener" % cache)
|
||||
self.config['cache_enabled'] = True
|
||||
self.urlopener = cache
|
||||
|
||||
else:
|
||||
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
|
||||
|
||||
self.config['banners_enabled'] = banners
|
||||
self.config['actors_enabled'] = actors
|
||||
|
||||
if self.config['debug_enabled']:
|
||||
warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
|
||||
"To enable debug messages, use the following code before importing: "
|
||||
"import logging; logging.basicConfig(level=logging.DEBUG)")
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
# List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
|
||||
# Hard-coded here as it is realtively static, and saves another HTTP request, as
|
||||
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
|
||||
self.config['valid_languages'] = [
|
||||
"da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
|
||||
"ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
|
||||
]
|
||||
|
||||
# thetvdb.com should be based around numeric language codes,
|
||||
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
|
||||
# requires the language ID, thus this mapping is required (mainly
|
||||
# for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
|
||||
self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
|
||||
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
|
||||
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
|
||||
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
|
||||
|
||||
if language is None:
|
||||
self.config['language'] = 'en'
|
||||
else:
|
||||
if language not in self.config['valid_languages']:
|
||||
raise ValueError("Invalid language %s, options are: %s" % (
|
||||
language, self.config['valid_languages']
|
||||
))
|
||||
else:
|
||||
self.config['language'] = language
|
||||
|
||||
# The following url_ configs are based of the
|
||||
# http://thetvdb.com/wiki/index.php/Programmers_API
|
||||
self.config['base_url'] = "http://thetvdb.com"
|
||||
|
||||
if self.config['search_all_languages']:
|
||||
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
|
||||
else:
|
||||
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config
|
||||
|
||||
self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
|
||||
self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config
|
||||
|
||||
self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
|
||||
self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
|
||||
|
||||
self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
|
||||
self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config
|
||||
|
||||
def _getTempDir(self):
|
||||
"""Returns the [system temp dir]/tvdb_api-u501 (or
|
||||
tvdb_api-myuser)
|
||||
"""
|
||||
if hasattr(os, 'getuid'):
|
||||
uid = "u%d" % (os.getuid())
|
||||
else:
|
||||
# For Windows
|
||||
try:
|
||||
uid = getpass.getuser()
|
||||
except ImportError:
|
||||
return os.path.join(tempfile.gettempdir(), "tvdb_api")
|
||||
|
||||
return os.path.join(tempfile.gettempdir(), "tvdb_api-%s" % (uid))
|
||||
|
||||
def _loadUrl(self, url, recache = False, language=None):
|
||||
global lastTimeout
|
||||
try:
|
||||
log().debug("Retrieving URL %s" % url)
|
||||
resp = self.urlopener.open(url)
|
||||
if 'x-local-cache' in resp.headers:
|
||||
log().debug("URL %s was cached in %s" % (
|
||||
url,
|
||||
resp.headers['x-local-cache'])
|
||||
)
|
||||
if recache:
|
||||
log().debug("Attempting to recache %s" % url)
|
||||
resp.recache()
|
||||
except (IOError, urllib2.URLError), errormsg:
|
||||
if not str(errormsg).startswith('HTTP Error'):
|
||||
lastTimeout = datetime.datetime.now()
|
||||
raise tvdb_error("Could not connect to server: %s" % (errormsg))
|
||||
|
||||
|
||||
# handle gzipped content,
|
||||
# http://dbr.lighthouseapp.com/projects/13342/tickets/72-gzipped-data-patch
|
||||
if 'gzip' in resp.headers.get("Content-Encoding", ''):
|
||||
if gzip:
|
||||
stream = StringIO.StringIO(resp.read())
|
||||
gz = gzip.GzipFile(fileobj=stream)
|
||||
return gz.read()
|
||||
|
||||
raise tvdb_error("Received gzip data from thetvdb.com, but could not correctly handle it")
|
||||
|
||||
if 'application/zip' in resp.headers.get("Content-Type", ''):
|
||||
try:
|
||||
# TODO: The zip contains actors.xml and banners.xml, which are currently ignored [GH-20]
|
||||
log().debug("We recived a zip file unpacking now ...")
|
||||
zipdata = StringIO.StringIO()
|
||||
zipdata.write(resp.read())
|
||||
myzipfile = zipfile.ZipFile(zipdata)
|
||||
return myzipfile.read('%s.xml' % language)
|
||||
except zipfile.BadZipfile:
|
||||
if 'x-local-cache' in resp.headers:
|
||||
resp.delete_cache()
|
||||
raise tvdb_error("Bad zip file received from thetvdb.com, could not read it")
|
||||
|
||||
return resp.read()
|
||||
|
||||
def _getetsrc(self, url, language=None):
|
||||
"""Loads a URL using caching, returns an ElementTree of the source
|
||||
"""
|
||||
src = self._loadUrl(url, language=language)
|
||||
try:
|
||||
# TVDB doesn't sanitize \r (CR) from user input in some fields,
|
||||
# remove it to avoid errors. Change from SickBeard, from will14m
|
||||
return ElementTree.fromstring(src.rstrip("\r"))
|
||||
except SyntaxError:
|
||||
src = self._loadUrl(url, recache=True, language=language)
|
||||
try:
|
||||
return ElementTree.fromstring(src.rstrip("\r"))
|
||||
except SyntaxError, exceptionmsg:
|
||||
errormsg = "There was an error with the XML retrieved from thetvdb.com:\n%s" % (
|
||||
exceptionmsg
|
||||
)
|
||||
|
||||
if self.config['cache_enabled']:
|
||||
errormsg += "\nFirst try emptying the cache folder at..\n%s" % (
|
||||
self.config['cache_location']
|
||||
)
|
||||
|
||||
errormsg += "\nIf this does not resolve the issue, please try again later. If the error persists, report a bug on"
|
||||
errormsg += "\nhttp://dbr.lighthouseapp.com/projects/13342-tvdb_api/overview\n"
|
||||
raise tvdb_error(errormsg)
|
||||
|
||||
def _setItem(self, sid, seas, ep, attrib, value):
|
||||
"""Creates a new episode, creating Show(), Season() and
|
||||
Episode()s as required. Called by _getShowData to populate show
|
||||
|
||||
Since the nice-to-use tvdb[1][24]['name] interface
|
||||
makes it impossible to do tvdb[1][24]['name] = "name"
|
||||
and still be capable of checking if an episode exists
|
||||
so we can raise tvdb_shownotfound, we have a slightly
|
||||
less pretty method of setting items.. but since the API
|
||||
is supposed to be read-only, this is the best way to
|
||||
do it!
|
||||
The problem is that calling tvdb[1][24]['episodename'] = "name"
|
||||
calls __getitem__ on tvdb[1], there is no way to check if
|
||||
tvdb.__dict__ should have a key "1" before we auto-create it
|
||||
"""
|
||||
if sid not in self.shows:
|
||||
self.shows[sid] = Show()
|
||||
if seas not in self.shows[sid]:
|
||||
self.shows[sid][seas] = Season(show = self.shows[sid])
|
||||
if ep not in self.shows[sid][seas]:
|
||||
self.shows[sid][seas][ep] = Episode(season = self.shows[sid][seas])
|
||||
self.shows[sid][seas][ep][attrib] = value
|
||||
|
||||
def _setShowData(self, sid, key, value):
|
||||
"""Sets self.shows[sid] to a new Show instance, or sets the data
|
||||
"""
|
||||
if sid not in self.shows:
|
||||
self.shows[sid] = Show()
|
||||
self.shows[sid].data[key] = value
|
||||
|
||||
def _cleanData(self, data):
|
||||
"""Cleans up strings returned by TheTVDB.com
|
||||
|
||||
Issues corrected:
|
||||
- Replaces & with &
|
||||
- Trailing whitespace
|
||||
"""
|
||||
data = data.replace(u"&", u"&")
|
||||
data = data.strip()
|
||||
return data
|
||||
|
||||
def search(self, series):
|
||||
"""This searches TheTVDB.com for the series name
|
||||
and returns the result list
|
||||
"""
|
||||
series = urllib.quote(series.encode("utf-8"))
|
||||
log().debug("Searching for show %s" % series)
|
||||
seriesEt = self._getetsrc(self.config['url_getSeries'] % (series))
|
||||
allSeries = []
|
||||
for series in seriesEt:
|
||||
result = dict((k.tag.lower(), k.text) for k in series.getchildren())
|
||||
result['id'] = int(result['id'])
|
||||
result['lid'] = self.config['langabbv_to_id'][result['language']]
|
||||
log().debug('Found series %(seriesname)s' % result)
|
||||
allSeries.append(result)
|
||||
|
||||
return allSeries
|
||||
|
||||
def _getSeries(self, series):
|
||||
"""This searches TheTVDB.com for the series name,
|
||||
If a custom_ui UI is configured, it uses this to select the correct
|
||||
series. If not, and interactive == True, ConsoleUI is used, if not
|
||||
BaseUI is used to select the first result.
|
||||
"""
|
||||
allSeries = self.search(series)
|
||||
|
||||
if len(allSeries) == 0:
|
||||
log().debug('Series result returned zero')
|
||||
raise tvdb_shownotfound("Show-name search returned zero results (cannot find show on TVDB)")
|
||||
|
||||
if self.config['custom_ui'] is not None:
|
||||
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
|
||||
ui = self.config['custom_ui'](config = self.config)
|
||||
else:
|
||||
if not self.config['interactive']:
|
||||
log().debug('Auto-selecting first search result using BaseUI')
|
||||
ui = BaseUI(config = self.config)
|
||||
else:
|
||||
log().debug('Interactively selecting show using ConsoleUI')
|
||||
ui = ConsoleUI(config = self.config)
|
||||
|
||||
return ui.selectSeries(allSeries)
|
||||
|
||||
def _parseBanners(self, sid):
|
||||
"""Parses banners XML, from
|
||||
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
|
||||
|
||||
Banners are retrieved using t['show name]['_banners'], for example:
|
||||
|
||||
>>> t = Tvdb(banners = True)
|
||||
>>> t['scrubs']['_banners'].keys()
|
||||
['fanart', 'poster', 'series', 'season']
|
||||
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
|
||||
u'http://thetvdb.com/banners/posters/76156-2.jpg'
|
||||
>>>
|
||||
|
||||
Any key starting with an underscore has been processed (not the raw
|
||||
data from the XML)
|
||||
|
||||
This interface will be improved in future versions.
|
||||
"""
|
||||
log().debug('Getting season banners for %s' % (sid))
|
||||
bannersEt = self._getetsrc( self.config['url_seriesBanner'] % (sid) )
|
||||
banners = {}
|
||||
for cur_banner in bannersEt.findall('Banner'):
|
||||
bid = cur_banner.find('id').text
|
||||
btype = cur_banner.find('BannerType')
|
||||
btype2 = cur_banner.find('BannerType2')
|
||||
if btype is None or btype2 is None:
|
||||
continue
|
||||
btype, btype2 = btype.text, btype2.text
|
||||
if not btype in banners:
|
||||
banners[btype] = {}
|
||||
if not btype2 in banners[btype]:
|
||||
banners[btype][btype2] = {}
|
||||
if not bid in banners[btype][btype2]:
|
||||
banners[btype][btype2][bid] = {}
|
||||
|
||||
for cur_element in cur_banner.getchildren():
|
||||
tag = cur_element.tag.lower()
|
||||
value = cur_element.text
|
||||
if tag is None or value is None:
|
||||
continue
|
||||
tag, value = tag.lower(), value.lower()
|
||||
banners[btype][btype2][bid][tag] = value
|
||||
|
||||
for k, v in banners[btype][btype2][bid].items():
|
||||
if k.endswith("path"):
|
||||
new_key = "_%s" % (k)
|
||||
log().debug("Transforming %s to %s" % (k, new_key))
|
||||
new_url = self.config['url_artworkPrefix'] % (v)
|
||||
banners[btype][btype2][bid][new_key] = new_url
|
||||
|
||||
self._setShowData(sid, "_banners", banners)
|
||||
|
||||
def _parseActors(self, sid):
|
||||
"""Parsers actors XML, from
|
||||
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
|
||||
|
||||
Actors are retrieved using t['show name]['_actors'], for example:
|
||||
|
||||
>>> t = Tvdb(actors = True)
|
||||
>>> actors = t['scrubs']['_actors']
|
||||
>>> type(actors)
|
||||
<class 'tvdb_api.Actors'>
|
||||
>>> type(actors[0])
|
||||
<class 'tvdb_api.Actor'>
|
||||
>>> actors[0]
|
||||
<Actor "Zach Braff">
|
||||
>>> sorted(actors[0].keys())
|
||||
['id', 'image', 'name', 'role', 'sortorder']
|
||||
>>> actors[0]['name']
|
||||
u'Zach Braff'
|
||||
>>> actors[0]['image']
|
||||
u'http://thetvdb.com/banners/actors/43640.jpg'
|
||||
|
||||
Any key starting with an underscore has been processed (not the raw
|
||||
data from the XML)
|
||||
"""
|
||||
log().debug("Getting actors for %s" % (sid))
|
||||
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
|
||||
|
||||
cur_actors = Actors()
|
||||
for curActorItem in actorsEt.findall("Actor"):
|
||||
curActor = Actor()
|
||||
for curInfo in curActorItem:
|
||||
tag = curInfo.tag.lower()
|
||||
value = curInfo.text
|
||||
if value is not None:
|
||||
if tag == "image":
|
||||
value = self.config['url_artworkPrefix'] % (value)
|
||||
else:
|
||||
value = self._cleanData(value)
|
||||
curActor[tag] = value
|
||||
cur_actors.append(curActor)
|
||||
self._setShowData(sid, '_actors', cur_actors)
|
||||
|
||||
def _getShowData(self, sid, language):
|
||||
"""Takes a series ID, gets the epInfo URL and parses the TVDB
|
||||
XML file into the shows dict in layout:
|
||||
shows[series_id][season_number][episode_number]
|
||||
"""
|
||||
|
||||
if self.config['language'] is None:
|
||||
log().debug('Config language is none, using show language')
|
||||
if language is None:
|
||||
raise tvdb_error("config['language'] was None, this should not happen")
|
||||
getShowInLanguage = language
|
||||
else:
|
||||
log().debug(
|
||||
'Configured language %s override show language of %s' % (
|
||||
self.config['language'],
|
||||
language
|
||||
)
|
||||
)
|
||||
getShowInLanguage = self.config['language']
|
||||
|
||||
# Parse show information
|
||||
log().debug('Getting all series data for %s' % (sid))
|
||||
seriesInfoEt = self._getetsrc(
|
||||
self.config['url_seriesInfo'] % (sid, getShowInLanguage)
|
||||
)
|
||||
for curInfo in seriesInfoEt.findall("Series")[0]:
|
||||
tag = curInfo.tag.lower()
|
||||
value = curInfo.text
|
||||
|
||||
if value is not None:
|
||||
if tag in ['banner', 'fanart', 'poster']:
|
||||
value = self.config['url_artworkPrefix'] % (value)
|
||||
else:
|
||||
value = self._cleanData(value)
|
||||
|
||||
self._setShowData(sid, tag, value)
|
||||
|
||||
# Parse banners
|
||||
if self.config['banners_enabled']:
|
||||
self._parseBanners(sid)
|
||||
|
||||
# Parse actors
|
||||
if self.config['actors_enabled']:
|
||||
self._parseActors(sid)
|
||||
|
||||
# Parse episode data
|
||||
log().debug('Getting all episodes of %s' % (sid))
|
||||
|
||||
if self.config['useZip']:
|
||||
url = self.config['url_epInfo_zip'] % (sid, language)
|
||||
else:
|
||||
url = self.config['url_epInfo'] % (sid, language)
|
||||
|
||||
epsEt = self._getetsrc( url, language=language)
|
||||
|
||||
for cur_ep in epsEt.findall("Episode"):
|
||||
seas_no = int(cur_ep.find('SeasonNumber').text)
|
||||
ep_no = int(cur_ep.find('EpisodeNumber').text)
|
||||
for cur_item in cur_ep.getchildren():
|
||||
tag = cur_item.tag.lower()
|
||||
value = cur_item.text
|
||||
if value is not None:
|
||||
if tag == 'filename':
|
||||
value = self.config['url_artworkPrefix'] % (value)
|
||||
else:
|
||||
value = self._cleanData(value)
|
||||
self._setItem(sid, seas_no, ep_no, tag, value)
|
||||
|
||||
def _nameToSid(self, name):
|
||||
"""Takes show name, returns the correct series ID (if the show has
|
||||
already been grabbed), or grabs all episodes and returns
|
||||
the correct SID.
|
||||
"""
|
||||
if name in self.corrections:
|
||||
log().debug('Correcting %s to %s' % (name, self.corrections[name]) )
|
||||
sid = self.corrections[name]
|
||||
else:
|
||||
log().debug('Getting show %s' % (name))
|
||||
selected_series = self._getSeries( name )
|
||||
sname, sid = selected_series['seriesname'], selected_series['id']
|
||||
log().debug('Got %(seriesname)s, id %(id)s' % selected_series)
|
||||
|
||||
self.corrections[name] = sid
|
||||
self._getShowData(selected_series['id'], selected_series['language'])
|
||||
|
||||
return sid
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Handles tvdb_instance['seriesname'] calls.
|
||||
The dict index should be the show id
|
||||
"""
|
||||
if isinstance(key, (int, long)):
|
||||
# Item is integer, treat as show id
|
||||
if key not in self.shows:
|
||||
self._getShowData(key, self.config['language'])
|
||||
return self.shows[key]
|
||||
|
||||
key = key.lower() # make key lower case
|
||||
sid = self._nameToSid(key)
|
||||
log().debug('Got series id %s' % (sid))
|
||||
return self.shows[sid]
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.shows)
|
||||
|
||||
|
||||
def main():
|
||||
"""Simple example of using tvdb_api - it just
|
||||
grabs an episode name interactively.
|
||||
"""
|
||||
import logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
tvdb_instance = Tvdb(interactive=True, cache=False)
|
||||
print tvdb_instance['Lost']['seriesname']
|
||||
print tvdb_instance['Lost'][1][4]['episodename']
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
251
libs/tvdb_api/tvdb_cache.py
Normal file
251
libs/tvdb_api/tvdb_cache.py
Normal file
@@ -0,0 +1,251 @@
|
||||
#!/usr/bin/env python
|
||||
#encoding:utf-8
|
||||
#author:dbr/Ben
|
||||
#project:tvdb_api
|
||||
#repository:http://github.com/dbr/tvdb_api
|
||||
#license:unlicense (http://unlicense.org/)
|
||||
|
||||
"""
|
||||
urllib2 caching handler
|
||||
Modified from http://code.activestate.com/recipes/491261/
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
|
||||
__author__ = "dbr/Ben"
|
||||
__version__ = "1.8.2"
|
||||
|
||||
import os
|
||||
import time
|
||||
import errno
|
||||
import httplib
|
||||
import urllib2
|
||||
import StringIO
|
||||
from hashlib import md5
|
||||
from threading import RLock
|
||||
|
||||
cache_lock = RLock()
|
||||
|
||||
def locked_function(origfunc):
|
||||
"""Decorator to execute function under lock"""
|
||||
def wrapped(*args, **kwargs):
|
||||
cache_lock.acquire()
|
||||
try:
|
||||
return origfunc(*args, **kwargs)
|
||||
finally:
|
||||
cache_lock.release()
|
||||
return wrapped
|
||||
|
||||
def calculate_cache_path(cache_location, url):
|
||||
"""Checks if [cache_location]/[hash_of_url].headers and .body exist
|
||||
"""
|
||||
thumb = md5(url).hexdigest()
|
||||
header = os.path.join(cache_location, thumb + ".headers")
|
||||
body = os.path.join(cache_location, thumb + ".body")
|
||||
return header, body
|
||||
|
||||
def check_cache_time(path, max_age):
|
||||
"""Checks if a file has been created/modified in the [last max_age] seconds.
|
||||
False means the file is too old (or doesn't exist), True means it is
|
||||
up-to-date and valid"""
|
||||
if not os.path.isfile(path):
|
||||
return False
|
||||
cache_modified_time = os.stat(path).st_mtime
|
||||
time_now = time.time()
|
||||
if cache_modified_time < time_now - max_age:
|
||||
# Cache is old
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@locked_function
|
||||
def exists_in_cache(cache_location, url, max_age):
|
||||
"""Returns if header AND body cache file exist (and are up-to-date)"""
|
||||
hpath, bpath = calculate_cache_path(cache_location, url)
|
||||
if os.path.exists(hpath) and os.path.exists(bpath):
|
||||
return(
|
||||
check_cache_time(hpath, max_age)
|
||||
and check_cache_time(bpath, max_age)
|
||||
)
|
||||
else:
|
||||
# File does not exist
|
||||
return False
|
||||
|
||||
@locked_function
|
||||
def store_in_cache(cache_location, url, response):
|
||||
"""Tries to store response in cache."""
|
||||
hpath, bpath = calculate_cache_path(cache_location, url)
|
||||
try:
|
||||
outf = open(hpath, "wb")
|
||||
headers = str(response.info())
|
||||
outf.write(headers)
|
||||
outf.close()
|
||||
|
||||
outf = open(bpath, "wb")
|
||||
outf.write(response.read())
|
||||
outf.close()
|
||||
except IOError:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@locked_function
|
||||
def delete_from_cache(cache_location, url):
|
||||
"""Deletes a response in cache."""
|
||||
hpath, bpath = calculate_cache_path(cache_location, url)
|
||||
try:
|
||||
if os.path.exists(hpath):
|
||||
os.remove(hpath)
|
||||
if os.path.exists(bpath):
|
||||
os.remove(bpath)
|
||||
except IOError:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
class CacheHandler(urllib2.BaseHandler):
|
||||
"""Stores responses in a persistant on-disk cache.
|
||||
|
||||
If a subsequent GET request is made for the same URL, the stored
|
||||
response is returned, saving time, resources and bandwidth
|
||||
"""
|
||||
@locked_function
|
||||
def __init__(self, cache_location, max_age = 21600):
|
||||
"""The location of the cache directory"""
|
||||
self.max_age = max_age
|
||||
self.cache_location = cache_location
|
||||
if not os.path.exists(self.cache_location):
|
||||
try:
|
||||
os.mkdir(self.cache_location)
|
||||
except OSError, e:
|
||||
if e.errno == errno.EEXIST and os.path.isdir(self.cache_location):
|
||||
# File exists, and it's a directory,
|
||||
# another process beat us to creating this dir, that's OK.
|
||||
pass
|
||||
else:
|
||||
# Our target dir is already a file, or different error,
|
||||
# relay the error!
|
||||
raise
|
||||
|
||||
def default_open(self, request):
|
||||
"""Handles GET requests, if the response is cached it returns it
|
||||
"""
|
||||
if request.get_method() is not "GET":
|
||||
return None # let the next handler try to handle the request
|
||||
|
||||
if exists_in_cache(
|
||||
self.cache_location, request.get_full_url(), self.max_age
|
||||
):
|
||||
return CachedResponse(
|
||||
self.cache_location,
|
||||
request.get_full_url(),
|
||||
set_cache_header = True
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
def http_response(self, request, response):
|
||||
"""Gets a HTTP response, if it was a GET request and the status code
|
||||
starts with 2 (200 OK etc) it caches it and returns a CachedResponse
|
||||
"""
|
||||
if (request.get_method() == "GET"
|
||||
and str(response.code).startswith("2")
|
||||
):
|
||||
if 'x-local-cache' not in response.info():
|
||||
# Response is not cached
|
||||
set_cache_header = store_in_cache(
|
||||
self.cache_location,
|
||||
request.get_full_url(),
|
||||
response
|
||||
)
|
||||
else:
|
||||
set_cache_header = True
|
||||
|
||||
return CachedResponse(
|
||||
self.cache_location,
|
||||
request.get_full_url(),
|
||||
set_cache_header = set_cache_header
|
||||
)
|
||||
else:
|
||||
return response
|
||||
|
||||
class CachedResponse(StringIO.StringIO):
|
||||
"""An urllib2.response-like object for cached responses.
|
||||
|
||||
To determine if a response is cached or coming directly from
|
||||
the network, check the x-local-cache header rather than the object type.
|
||||
"""
|
||||
|
||||
@locked_function
|
||||
def __init__(self, cache_location, url, set_cache_header=True):
|
||||
self.cache_location = cache_location
|
||||
hpath, bpath = calculate_cache_path(cache_location, url)
|
||||
|
||||
StringIO.StringIO.__init__(self, file(bpath, "rb").read())
|
||||
|
||||
self.url = url
|
||||
self.code = 200
|
||||
self.msg = "OK"
|
||||
headerbuf = file(hpath, "rb").read()
|
||||
if set_cache_header:
|
||||
headerbuf += "x-local-cache: %s\r\n" % (bpath)
|
||||
self.headers = httplib.HTTPMessage(StringIO.StringIO(headerbuf))
|
||||
|
||||
def info(self):
|
||||
"""Returns headers
|
||||
"""
|
||||
return self.headers
|
||||
|
||||
def geturl(self):
|
||||
"""Returns original URL
|
||||
"""
|
||||
return self.url
|
||||
|
||||
@locked_function
|
||||
def recache(self):
|
||||
new_request = urllib2.urlopen(self.url)
|
||||
set_cache_header = store_in_cache(
|
||||
self.cache_location,
|
||||
new_request.url,
|
||||
new_request
|
||||
)
|
||||
CachedResponse.__init__(self, self.cache_location, self.url, True)
|
||||
|
||||
@locked_function
|
||||
def delete_cache(self):
|
||||
delete_from_cache(
|
||||
self.cache_location,
|
||||
self.url
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
"""Quick test/example of CacheHandler"""
|
||||
opener = urllib2.build_opener(CacheHandler("/tmp/"))
|
||||
response = opener.open("http://google.com")
|
||||
print response.headers
|
||||
print "Response:", response.read()
|
||||
|
||||
response.recache()
|
||||
print response.headers
|
||||
print "After recache:", response.read()
|
||||
|
||||
# Test usage in threads
|
||||
from threading import Thread
|
||||
class CacheThreadTest(Thread):
|
||||
lastdata = None
|
||||
def run(self):
|
||||
req = opener.open("http://google.com")
|
||||
newdata = req.read()
|
||||
if self.lastdata is None:
|
||||
self.lastdata = newdata
|
||||
assert self.lastdata == newdata, "Data was not consistent, uhoh"
|
||||
req.recache()
|
||||
threads = [CacheThreadTest() for x in range(50)]
|
||||
print "Starting threads"
|
||||
[t.start() for t in threads]
|
||||
print "..done"
|
||||
print "Joining threads"
|
||||
[t.join() for t in threads]
|
||||
print "..done"
|
||||
main()
|
||||
52
libs/tvdb_api/tvdb_exceptions.py
Normal file
52
libs/tvdb_api/tvdb_exceptions.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python
|
||||
#encoding:utf-8
|
||||
#author:dbr/Ben
|
||||
#project:tvdb_api
|
||||
#repository:http://github.com/dbr/tvdb_api
|
||||
#license:unlicense (http://unlicense.org/)
|
||||
|
||||
"""Custom exceptions used or raised by tvdb_api
|
||||
"""
|
||||
|
||||
__author__ = "dbr/Ben"
|
||||
__version__ = "1.8.2"
|
||||
|
||||
__all__ = ["tvdb_error", "tvdb_userabort", "tvdb_shownotfound",
|
||||
"tvdb_seasonnotfound", "tvdb_episodenotfound", "tvdb_attributenotfound"]
|
||||
|
||||
class tvdb_exception(Exception):
|
||||
"""Any exception generated by tvdb_api
|
||||
"""
|
||||
pass
|
||||
|
||||
class tvdb_error(tvdb_exception):
|
||||
"""An error with thetvdb.com (Cannot connect, for example)
|
||||
"""
|
||||
pass
|
||||
|
||||
class tvdb_userabort(tvdb_exception):
|
||||
"""User aborted the interactive selection (via
|
||||
the q command, ^c etc)
|
||||
"""
|
||||
pass
|
||||
|
||||
class tvdb_shownotfound(tvdb_exception):
|
||||
"""Show cannot be found on thetvdb.com (non-existant show)
|
||||
"""
|
||||
pass
|
||||
|
||||
class tvdb_seasonnotfound(tvdb_exception):
|
||||
"""Season cannot be found on thetvdb.com
|
||||
"""
|
||||
pass
|
||||
|
||||
class tvdb_episodenotfound(tvdb_exception):
|
||||
"""Episode cannot be found on thetvdb.com
|
||||
"""
|
||||
pass
|
||||
|
||||
class tvdb_attributenotfound(tvdb_exception):
|
||||
"""Raised if an episode does not have the requested
|
||||
attribute (such as a episode name)
|
||||
"""
|
||||
pass
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user