Merge branch 'refs/heads/develop'
This commit is contained in:
@@ -34,6 +34,8 @@ class ClientScript(Plugin):
|
||||
'scripts/library/question.js',
|
||||
'scripts/library/scrollspy.js',
|
||||
'scripts/library/spin.js',
|
||||
'scripts/library/Array.stableSort.js',
|
||||
'scripts/library/async.js',
|
||||
'scripts/couchpotato.js',
|
||||
'scripts/api.js',
|
||||
'scripts/library/history.js',
|
||||
|
||||
@@ -31,8 +31,8 @@ class Scheduler(Plugin):
|
||||
pass
|
||||
|
||||
def doShutdown(self):
|
||||
super(Scheduler, self).doShutdown()
|
||||
self.stop()
|
||||
return super(Scheduler, self).doShutdown()
|
||||
|
||||
def stop(self):
|
||||
if self.started:
|
||||
|
||||
@@ -24,7 +24,7 @@ var UpdaterBase = new Class({
|
||||
self.doUpdate();
|
||||
else {
|
||||
App.unBlockPage();
|
||||
App.fireEvent('message', 'No updates available');
|
||||
App.on('message', 'No updates available');
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -58,14 +58,6 @@ config = [{
|
||||
'advanced': True,
|
||||
'description': 'Also remove the leftover files.',
|
||||
},
|
||||
{
|
||||
'name': 'append_label',
|
||||
'label': 'Append Label',
|
||||
'default': False,
|
||||
'advanced': True,
|
||||
'type': 'bool',
|
||||
'description': 'Append label to download location. Requires you to set the download location above.',
|
||||
},
|
||||
{
|
||||
'name': 'paused',
|
||||
'type': 'bool',
|
||||
|
||||
@@ -125,9 +125,7 @@ class rTorrent(Downloader):
|
||||
if self.conf('label'):
|
||||
torrent.set_custom(1, self.conf('label'))
|
||||
|
||||
if self.conf('directory') and self.conf('append_label'):
|
||||
torrent.set_directory(os.path.join(self.conf('directory'), self.conf('label')))
|
||||
elif self.conf('directory'):
|
||||
if self.conf('directory'):
|
||||
torrent.set_directory(self.conf('directory'))
|
||||
|
||||
# Set Ratio Group
|
||||
|
||||
@@ -77,6 +77,7 @@ class uTorrent(Downloader):
|
||||
else:
|
||||
info = bdecode(filedata)["info"]
|
||||
torrent_hash = sha1(benc(info)).hexdigest().upper()
|
||||
|
||||
torrent_filename = self.createFileName(data, filedata, movie)
|
||||
|
||||
if data.get('seed_ratio'):
|
||||
@@ -93,9 +94,9 @@ class uTorrent(Downloader):
|
||||
|
||||
# Send request to uTorrent
|
||||
if data.get('protocol') == 'torrent_magnet':
|
||||
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'))
|
||||
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'), directory)
|
||||
else:
|
||||
self.utorrent_api.add_torrent_file(torrent_filename, filedata)
|
||||
self.utorrent_api.add_torrent_file(torrent_filename, filedata, directory)
|
||||
|
||||
# Change settings of added torrent
|
||||
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
|
||||
|
||||
@@ -38,7 +38,7 @@ class MediaBase(Plugin):
|
||||
def notifyFront():
|
||||
db = get_session()
|
||||
media = db.query(Media).filter_by(id = media_id).first()
|
||||
fireEvent('notify.frontend', type = '%s.update.%s' % (media.type, media.id), data = media.to_dict(self.default_dict))
|
||||
fireEvent('notify.frontend', type = '%s.update' % media.type, data = media.to_dict(self.default_dict))
|
||||
db.expire_all()
|
||||
|
||||
return notifyFront
|
||||
|
||||
@@ -34,7 +34,7 @@ class MediaPlugin(MediaBase):
|
||||
for title in media.library.titles:
|
||||
if title.default: default_title = title.title
|
||||
|
||||
fireEvent('notify.frontend', type = '%s.busy.%s' % (media.type, x), data = True)
|
||||
fireEvent('notify.frontend', type = '%s.busy' % media.type, data = {'id': x})
|
||||
fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
|
||||
|
||||
db.expire_all()
|
||||
|
||||
@@ -52,8 +52,8 @@ var MovieList = new Class({
|
||||
|
||||
self.getMovies();
|
||||
|
||||
App.addEvent('movie.added', self.movieAdded.bind(self))
|
||||
App.addEvent('movie.deleted', self.movieDeleted.bind(self))
|
||||
App.on('movie.added', self.movieAdded.bind(self))
|
||||
App.on('movie.deleted', self.movieDeleted.bind(self))
|
||||
},
|
||||
|
||||
movieDeleted: function(notification){
|
||||
@@ -65,6 +65,7 @@ var MovieList = new Class({
|
||||
movie.destroy();
|
||||
delete self.movies_added[notification.data.id];
|
||||
self.setCounter(self.counter_count-1);
|
||||
self.total_movies--;
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -75,6 +76,7 @@ var MovieList = new Class({
|
||||
movieAdded: function(notification){
|
||||
var self = this;
|
||||
|
||||
self.fireEvent('movieAdded', notification);
|
||||
if(self.options.add_new && !self.movies_added[notification.data.id] && notification.data.status.identifier == self.options.status){
|
||||
window.scroll(0,0);
|
||||
self.createMovie(notification.data, 'top');
|
||||
@@ -390,6 +392,7 @@ var MovieList = new Class({
|
||||
self.movies.erase(movie);
|
||||
movie.destroy();
|
||||
self.setCounter(self.counter_count-1);
|
||||
self.total_movies--;
|
||||
});
|
||||
|
||||
self.calculateSelected();
|
||||
|
||||
@@ -126,7 +126,9 @@ MA.Release = new Class({
|
||||
else
|
||||
self.showHelper();
|
||||
|
||||
App.addEvent('movie.searcher.ended.'+self.movie.data.id, function(notification){
|
||||
App.on('movie.searcher.ended', function(notification){
|
||||
if(self.movie.data.id != notification.data.id) return;
|
||||
|
||||
self.releases = null;
|
||||
if(self.options_container){
|
||||
self.options_container.destroy();
|
||||
@@ -250,12 +252,14 @@ MA.Release = new Class({
|
||||
else if(!self.next_release && status.identifier == 'available'){
|
||||
self.next_release = release;
|
||||
}
|
||||
|
||||
|
||||
var update_handle = function(notification) {
|
||||
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
|
||||
if(notification.data.id != release.id) return;
|
||||
|
||||
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
|
||||
status = Status.get(release.status_id),
|
||||
new_status = Status.get(notification.data);
|
||||
|
||||
new_status = Status.get(notification.data.status_id);
|
||||
|
||||
release.status_id = new_status.id
|
||||
release.el.set('class', 'item ' + new_status.identifier);
|
||||
|
||||
@@ -272,7 +276,7 @@ MA.Release = new Class({
|
||||
}
|
||||
}
|
||||
|
||||
App.addEvent('release.update_status.' + release.id, update_handle);
|
||||
App.on('release.update_status', update_handle);
|
||||
|
||||
});
|
||||
|
||||
@@ -285,7 +289,7 @@ MA.Release = new Class({
|
||||
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
|
||||
|
||||
self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top');
|
||||
|
||||
|
||||
var nr = self.next_release,
|
||||
lr = self.last_release;
|
||||
|
||||
|
||||
@@ -1036,7 +1036,7 @@
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
width: 85%;
|
||||
direction: rtl;
|
||||
direction: ltr;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
|
||||
@@ -23,23 +23,49 @@ var Movie = new Class({
|
||||
addEvents: function(){
|
||||
var self = this;
|
||||
|
||||
App.addEvent('movie.update.'+self.data.id, function(notification){
|
||||
self.global_events = {}
|
||||
|
||||
// Do refresh with new data
|
||||
self.global_events['movie.update'] = function(notification){
|
||||
if(self.data.id != notification.data.id) return;
|
||||
|
||||
self.busy(false);
|
||||
self.removeView();
|
||||
self.update.delay(2000, self, notification);
|
||||
});
|
||||
}
|
||||
App.on('movie.update', self.global_events['movie.update']);
|
||||
|
||||
// Add spinner on load / search
|
||||
['movie.busy', 'movie.searcher.started'].each(function(listener){
|
||||
App.addEvent(listener+'.'+self.data.id, function(notification){
|
||||
if(notification.data)
|
||||
self.global_events[listener] = function(notification){
|
||||
if(notification.data && self.data.id == notification.data.id)
|
||||
self.busy(true)
|
||||
});
|
||||
}
|
||||
App.on(listener, self.global_events[listener]);
|
||||
})
|
||||
|
||||
App.addEvent('movie.searcher.ended.'+self.data.id, function(notification){
|
||||
if(notification.data)
|
||||
// Remove spinner
|
||||
self.global_events['movie.searcher.ended'] = function(notification){
|
||||
if(notification.data && self.data.id == notification.data.id)
|
||||
self.busy(false)
|
||||
});
|
||||
}
|
||||
App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']);
|
||||
|
||||
// Reload when releases have updated
|
||||
self.global_events['release.update_status'] = function(notification){
|
||||
var data = notification.data
|
||||
if(data && self.data.id == data.movie_id){
|
||||
|
||||
if(!self.data.releases)
|
||||
self.data.releases = [];
|
||||
|
||||
self.data.releases.push({'quality_id': data.quality_id, 'status_id': data.status_id});
|
||||
self.updateReleases();
|
||||
}
|
||||
}
|
||||
|
||||
App.on('release.update_status', self.global_events['release.update_status']);
|
||||
|
||||
},
|
||||
|
||||
destroy: function(){
|
||||
@@ -52,9 +78,8 @@ var Movie = new Class({
|
||||
self.list.checkIfEmpty();
|
||||
|
||||
// Remove events
|
||||
App.removeEvents('movie.update.'+self.data.id);
|
||||
['movie.busy', 'movie.searcher.started'].each(function(listener){
|
||||
App.removeEvents(listener+'.'+self.data.id);
|
||||
self.global_events.each(function(handle, listener){
|
||||
App.off(listener, handle);
|
||||
})
|
||||
},
|
||||
|
||||
@@ -179,21 +204,7 @@ var Movie = new Class({
|
||||
});
|
||||
|
||||
// Add releases
|
||||
if(self.data.releases)
|
||||
self.data.releases.each(function(release){
|
||||
|
||||
var q = self.quality.getElement('.q_id'+ release.quality_id),
|
||||
status = Status.get(release.status_id);
|
||||
|
||||
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
|
||||
var q = self.addQuality(release.quality_id)
|
||||
|
||||
if (status && q && !q.hasClass(status.identifier)){
|
||||
q.addClass(status.identifier);
|
||||
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
|
||||
}
|
||||
|
||||
});
|
||||
self.updateReleases();
|
||||
|
||||
Object.each(self.options.actions, function(action, key){
|
||||
self.action[key.toLowerCase()] = action = new self.options.actions[key](self)
|
||||
@@ -203,6 +214,26 @@ var Movie = new Class({
|
||||
|
||||
},
|
||||
|
||||
updateReleases: function(){
|
||||
var self = this;
|
||||
if(!self.data.releases || self.data.releases.length == 0) return;
|
||||
|
||||
self.data.releases.each(function(release){
|
||||
|
||||
var q = self.quality.getElement('.q_id'+ release.quality_id),
|
||||
status = Status.get(release.status_id);
|
||||
|
||||
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
|
||||
var q = self.addQuality(release.quality_id)
|
||||
|
||||
if (status && q && !q.hasClass(status.identifier)){
|
||||
q.addClass(status.identifier);
|
||||
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
|
||||
}
|
||||
|
||||
});
|
||||
},
|
||||
|
||||
addQuality: function(quality_id){
|
||||
var self = this;
|
||||
|
||||
|
||||
@@ -148,7 +148,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
|
||||
fireEvent('movie.delete', movie['id'], single = True)
|
||||
return
|
||||
|
||||
fireEvent('notify.frontend', type = 'movie.searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title)
|
||||
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'id': movie['id']}, message = 'Searching for "%s"' % default_title)
|
||||
|
||||
|
||||
ret = False
|
||||
@@ -202,7 +202,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
|
||||
if len(too_early_to_search) > 0:
|
||||
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
|
||||
|
||||
fireEvent('notify.frontend', type = 'movie.searcher.ended.%s' % movie['id'], data = True)
|
||||
fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'id': movie['id']})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ class Notification(Provider):
|
||||
listen_to = [
|
||||
'renamer.after', 'movie.snatched',
|
||||
'updater.available', 'updater.updated',
|
||||
'core.message',
|
||||
'core.message.important',
|
||||
]
|
||||
dont_listen_to = []
|
||||
|
||||
|
||||
@@ -21,6 +21,12 @@ class CoreNotifier(Notification):
|
||||
|
||||
m_lock = None
|
||||
|
||||
listen_to = [
|
||||
'renamer.after', 'movie.snatched',
|
||||
'updater.available', 'updater.updated',
|
||||
'core.message', 'core.message.important',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
super(CoreNotifier, self).__init__()
|
||||
|
||||
@@ -121,7 +127,10 @@ class CoreNotifier(Notification):
|
||||
|
||||
for message in messages:
|
||||
if message.get('time') > last_check:
|
||||
fireEvent('core.message', message = message.get('message'), data = message)
|
||||
message['sticky'] = True # Always sticky core messages
|
||||
|
||||
message_type = 'core.message.important' if message.get('important') else 'core.message'
|
||||
fireEvent(message_type, message = message.get('message'), data = message)
|
||||
|
||||
if last_check < message.get('time'):
|
||||
last_check = message.get('time')
|
||||
|
||||
@@ -10,8 +10,8 @@ var NotificationBase = new Class({
|
||||
// Listener
|
||||
App.addEvent('unload', self.stopPoll.bind(self));
|
||||
App.addEvent('reload', self.startInterval.bind(self, [true]));
|
||||
App.addEvent('notification', self.notify.bind(self));
|
||||
App.addEvent('message', self.showMessage.bind(self));
|
||||
App.on('notification', self.notify.bind(self));
|
||||
App.on('message', self.showMessage.bind(self));
|
||||
|
||||
// Add test buttons to settings page
|
||||
App.addEvent('load', self.addTestButtons.bind(self));
|
||||
@@ -50,9 +50,9 @@ var NotificationBase = new Class({
|
||||
, 'top');
|
||||
self.notifications.include(result);
|
||||
|
||||
if(result.data.important !== undefined && !result.read){
|
||||
if((result.data.important !== undefined || result.data.sticky !== undefined) && !result.read){
|
||||
var sticky = true
|
||||
App.fireEvent('message', [result.message, sticky, result])
|
||||
App.trigger('message', [result.message, sticky, result])
|
||||
}
|
||||
else if(!result.read){
|
||||
self.setBadge(self.notifications.filter(function(n){ return !n.read}).length)
|
||||
@@ -147,7 +147,7 @@ var NotificationBase = new Class({
|
||||
// Process data
|
||||
if(json){
|
||||
Array.each(json.result, function(result){
|
||||
App.fireEvent(result.type, result);
|
||||
App.trigger(result.type, result);
|
||||
if(result.message && result.read === undefined)
|
||||
self.showMessage(result.message);
|
||||
})
|
||||
|
||||
@@ -4,6 +4,7 @@ from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.notifications.base import Notification
|
||||
from couchpotato.environment import Env
|
||||
from email.mime.text import MIMEText
|
||||
from email.utils import formatdate, make_msgid
|
||||
import smtplib
|
||||
import traceback
|
||||
|
||||
@@ -30,6 +31,8 @@ class Email(Notification):
|
||||
message['Subject'] = self.default_title
|
||||
message['From'] = from_address
|
||||
message['To'] = to_address
|
||||
message['Date'] = formatdate(localtime = 1)
|
||||
message['Message-ID'] = make_msgid()
|
||||
|
||||
try:
|
||||
# Open the SMTP connection, via SSL if requested
|
||||
|
||||
39
couchpotato/core/notifications/pushbullet/__init__.py
Normal file
39
couchpotato/core/notifications/pushbullet/__init__.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from .main import Pushbullet
|
||||
|
||||
def start():
|
||||
return Pushbullet()
|
||||
|
||||
config = [{
|
||||
'name': 'pushbullet',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'notifications',
|
||||
'list': 'notification_providers',
|
||||
'name': 'pushbullet',
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'default': 0,
|
||||
'type': 'enabler',
|
||||
},
|
||||
{
|
||||
'name': 'api_key',
|
||||
'label': 'User API Key'
|
||||
},
|
||||
{
|
||||
'name': 'devices',
|
||||
'default': '',
|
||||
'advanced': True,
|
||||
'description': 'IDs of devices to send notifications to, empty = all devices'
|
||||
},
|
||||
{
|
||||
'name': 'on_snatch',
|
||||
'default': 0,
|
||||
'type': 'bool',
|
||||
'advanced': True,
|
||||
'description': 'Also send message when movie is snatched.',
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
}]
|
||||
86
couchpotato/core/notifications/pushbullet/main.py
Normal file
86
couchpotato/core/notifications/pushbullet/main.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from couchpotato.core.helpers.encoding import toUnicode
|
||||
from couchpotato.core.helpers.variable import tryInt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.notifications.base import Notification
|
||||
import base64
|
||||
import json
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class Pushbullet(Notification):
|
||||
|
||||
url = 'https://api.pushbullet.com/api/%s'
|
||||
|
||||
def notify(self, message = '', data = None, listener = None):
|
||||
if not data: data = {}
|
||||
|
||||
devices = self.getDevices()
|
||||
if devices is None:
|
||||
return False
|
||||
|
||||
# Get all the device IDs linked to this user
|
||||
if not len(devices):
|
||||
response = self.request('devices')
|
||||
if not response:
|
||||
return False
|
||||
|
||||
devices += [device.get('id') for device in response['devices']]
|
||||
|
||||
successful = 0
|
||||
for device in devices:
|
||||
response = self.request(
|
||||
'pushes',
|
||||
cache = False,
|
||||
device_id = device,
|
||||
type = 'note',
|
||||
title = self.default_title,
|
||||
body = toUnicode(message)
|
||||
)
|
||||
|
||||
if response:
|
||||
successful += 1
|
||||
else:
|
||||
log.error('Unable to push notification to Pushbullet device with ID %s' % device)
|
||||
|
||||
return successful == len(devices)
|
||||
|
||||
def getDevices(self):
|
||||
devices = [d.strip() for d in self.conf('devices').split(',')]
|
||||
|
||||
# Remove empty items
|
||||
devices = [d for d in devices if len(d)]
|
||||
|
||||
# Break on any ids that aren't integers
|
||||
valid_devices = []
|
||||
|
||||
for device_id in devices:
|
||||
d = tryInt(device_id, None)
|
||||
|
||||
if not d:
|
||||
log.error('Device ID "%s" is not valid', device_id)
|
||||
return None
|
||||
|
||||
valid_devices.append(d)
|
||||
|
||||
return valid_devices
|
||||
|
||||
def request(self, method, cache = True, **kwargs):
|
||||
try:
|
||||
base64string = base64.encodestring('%s:' % self.conf('api_key'))[:-1]
|
||||
|
||||
headers = {
|
||||
"Authorization": "Basic %s" % base64string
|
||||
}
|
||||
|
||||
if cache:
|
||||
return self.getJsonData(self.url % method, headers = headers, params = kwargs)
|
||||
else:
|
||||
data = self.urlopen(self.url % method, headers = headers, params = kwargs)
|
||||
return json.loads(data)
|
||||
|
||||
except Exception, ex:
|
||||
log.error('Pushbullet request failed')
|
||||
log.debug(ex)
|
||||
|
||||
return None
|
||||
@@ -79,6 +79,7 @@ class Manage(Plugin):
|
||||
try:
|
||||
|
||||
directories = self.directories()
|
||||
directories.sort()
|
||||
added_identifiers = []
|
||||
|
||||
# Add some progress
|
||||
|
||||
@@ -2,7 +2,7 @@ from couchpotato import get_session
|
||||
from couchpotato.api import addApiView
|
||||
from couchpotato.core.event import addEvent
|
||||
from couchpotato.core.helpers.encoding import toUnicode, ss
|
||||
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
|
||||
from couchpotato.core.helpers.variable import mergeDicts, getExt
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.plugins.base import Plugin
|
||||
from couchpotato.core.settings.model import Quality, Profile, ProfileType
|
||||
@@ -19,14 +19,14 @@ class QualityPlugin(Plugin):
|
||||
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
|
||||
{'identifier': '1080p', 'hd': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts', 'x264', 'h264']},
|
||||
{'identifier': '720p', 'hd': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']},
|
||||
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':['avi'], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
|
||||
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':[], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
|
||||
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd'], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r')]},
|
||||
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
|
||||
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':['avi', 'mpg', 'mpeg'], 'tags': ['webrip', ('web', 'rip')]},
|
||||
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
|
||||
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
|
||||
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
|
||||
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}
|
||||
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':[], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
|
||||
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': ['webrip', ('web', 'rip')]},
|
||||
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':[]},
|
||||
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':[]},
|
||||
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':[]},
|
||||
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':[]}
|
||||
]
|
||||
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
|
||||
|
||||
@@ -50,6 +50,8 @@ class QualityPlugin(Plugin):
|
||||
|
||||
addEvent('app.initialize', self.fill, priority = 10)
|
||||
|
||||
addEvent('app.test', self.doTest)
|
||||
|
||||
def preReleases(self):
|
||||
return self.pre_releases
|
||||
|
||||
@@ -165,9 +167,10 @@ class QualityPlugin(Plugin):
|
||||
if not extra: extra = {}
|
||||
|
||||
# Create hash for cache
|
||||
cache_key = md5(str([f.replace('.' + getExt(f), '') for f in files]))
|
||||
cache_key = str([f.replace('.' + getExt(f), '') if len(getExt(f)) < 4 else f for f in files])
|
||||
cached = self.getCache(cache_key)
|
||||
if cached and len(extra) == 0: return cached
|
||||
if cached and len(extra) == 0:
|
||||
return cached
|
||||
|
||||
qualities = self.all()
|
||||
|
||||
@@ -228,11 +231,6 @@ class QualityPlugin(Plugin):
|
||||
if len(set(words) & set(alt)) == len(alt):
|
||||
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
|
||||
score += points.get(tag_type)
|
||||
elif len(set(words) & set(alt)) > 0:
|
||||
partial = list(set(words) & set(alt))[0]
|
||||
if len(partial) > 2:
|
||||
log.debug('Found %s via partial %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
|
||||
score += points.get(tag_type) / 3
|
||||
|
||||
if (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()):
|
||||
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
|
||||
@@ -285,3 +283,33 @@ class QualityPlugin(Plugin):
|
||||
if add_score != 0:
|
||||
for allow in quality.get('allow', []):
|
||||
score[allow] -= 40 if self.cached_order[allow] < self.cached_order[quality['identifier']] else 5
|
||||
|
||||
def doTest(self):
|
||||
|
||||
tests = {
|
||||
'Movie Name (1999)-DVD-Rip.avi': 'dvdrip',
|
||||
'Movie Name 1999 720p Bluray.mkv': '720p',
|
||||
'Movie Name 1999 BR-Rip 720p.avi': 'brrip',
|
||||
'Movie Name 1999 720p Web Rip.avi': 'scr',
|
||||
'Movie Name 1999 Web DL.avi': 'brrip',
|
||||
'Movie.Name.1999.1080p.WEBRip.H264-Group': 'scr',
|
||||
'Movie.Name.1999.DVDRip-Group': 'dvdrip',
|
||||
'Movie.Name.1999.DVD-Rip-Group': 'dvdrip',
|
||||
'Movie.Name.1999.DVD-R-Group': 'dvdr',
|
||||
}
|
||||
|
||||
correct = 0
|
||||
for name in tests:
|
||||
success = self.guess([name]).get('identifier') == tests[name]
|
||||
if not success:
|
||||
log.error('%s failed check, thinks it\'s %s', (name, self.guess([name]).get('identifier')))
|
||||
|
||||
correct += success
|
||||
|
||||
if correct == len(tests):
|
||||
log.info('Quality test successful')
|
||||
return True
|
||||
else:
|
||||
log.error('Quality test failed: %s out of %s succeeded', (correct, len(tests)))
|
||||
|
||||
|
||||
|
||||
@@ -446,6 +446,6 @@ class Release(Plugin):
|
||||
db.commit()
|
||||
|
||||
#Update all movie info as there is no release update function
|
||||
fireEvent('notify.frontend', type = 'release.update_status.%s' % rel.id, data = status.get('id'))
|
||||
fireEvent('notify.frontend', type = 'release.update_status', data = rel.to_dict())
|
||||
|
||||
return True
|
||||
|
||||
66
couchpotato/core/providers/torrent/torrentpotato/__init__.py
Normal file
66
couchpotato/core/providers/torrent/torrentpotato/__init__.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from .main import TorrentPotato
|
||||
|
||||
def start():
|
||||
return TorrentPotato()
|
||||
|
||||
config = [{
|
||||
'name': 'torrentpotato',
|
||||
'groups': [
|
||||
{
|
||||
'tab': 'searcher',
|
||||
'list': 'torrent_providers',
|
||||
'name': 'TorrentPotato',
|
||||
'order': 10,
|
||||
'description': 'CouchPotato torrent provider. Checkout <a href="https://github.com/RuudBurger/CouchPotatoServer/wiki/CouchPotato-Torrent-Provider">the wiki page about this provider</a> for more info.',
|
||||
'wizard': True,
|
||||
'options': [
|
||||
{
|
||||
'name': 'enabled',
|
||||
'type': 'enabler',
|
||||
'default': False,
|
||||
},
|
||||
{
|
||||
'name': 'use',
|
||||
'default': ''
|
||||
},
|
||||
{
|
||||
'name': 'host',
|
||||
'default': '',
|
||||
'description': 'The url path of your TorrentPotato provider.',
|
||||
},
|
||||
{
|
||||
'name': 'extra_score',
|
||||
'advanced': True,
|
||||
'label': 'Extra Score',
|
||||
'default': '0',
|
||||
'description': 'Starting score for each release found via this provider.',
|
||||
},
|
||||
{
|
||||
'name': 'name',
|
||||
'label': 'Username',
|
||||
'default': '',
|
||||
},
|
||||
{
|
||||
'name': 'seed_ratio',
|
||||
'label': 'Seed ratio',
|
||||
'default': '1',
|
||||
'description': 'Will not be (re)moved until this seed ratio is met.',
|
||||
},
|
||||
{
|
||||
'name': 'seed_time',
|
||||
'label': 'Seed time',
|
||||
'default': '40',
|
||||
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
|
||||
},
|
||||
{
|
||||
'name': 'pass_key',
|
||||
'default': ',',
|
||||
'label': 'Pass Key',
|
||||
'description': 'Can be found on your profile page',
|
||||
'type': 'combined',
|
||||
'combine': ['use', 'host', 'pass_key', 'name', 'seed_ratio', 'seed_time', 'extra_score'],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}]
|
||||
129
couchpotato/core/providers/torrent/torrentpotato/main.py
Normal file
129
couchpotato/core/providers/torrent/torrentpotato/main.py
Normal file
@@ -0,0 +1,129 @@
|
||||
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
|
||||
from couchpotato.core.helpers.variable import splitString, tryInt, tryFloat
|
||||
from couchpotato.core.logger import CPLog
|
||||
from couchpotato.core.providers.base import ResultList
|
||||
from couchpotato.core.providers.torrent.base import TorrentProvider
|
||||
from urlparse import urlparse
|
||||
import re
|
||||
import traceback
|
||||
|
||||
log = CPLog(__name__)
|
||||
|
||||
|
||||
class TorrentPotato(TorrentProvider):
|
||||
|
||||
urls = {}
|
||||
limits_reached = {}
|
||||
|
||||
http_time_between_calls = 1 # Seconds
|
||||
|
||||
def search(self, movie, quality):
|
||||
hosts = self.getHosts()
|
||||
|
||||
results = ResultList(self, movie, quality, imdb_results = True)
|
||||
|
||||
for host in hosts:
|
||||
if self.isDisabled(host):
|
||||
continue
|
||||
|
||||
self._searchOnHost(host, movie, quality, results)
|
||||
|
||||
return results
|
||||
|
||||
def _searchOnHost(self, host, movie, quality, results):
|
||||
|
||||
arguments = tryUrlencode({
|
||||
'user': host['name'],
|
||||
'passkey': host['pass_key'],
|
||||
'imdbid': movie['library']['identifier']
|
||||
})
|
||||
url = '%s?%s' % (host['host'], arguments)
|
||||
|
||||
torrents = self.getJsonData(url, cache_timeout = 1800)
|
||||
|
||||
if torrents:
|
||||
try:
|
||||
if torrents.get('error'):
|
||||
log.error('%s: %s', (torrents.get('error'), host['host']))
|
||||
elif torrents.get('results'):
|
||||
for torrent in torrents.get('results', []):
|
||||
results.append({
|
||||
'id': torrent.get('torrent_id'),
|
||||
'protocol': 'torrent' if re.match('^(http|https|ftp)://.*$', torrent.get('download_url')) else 'torrent_magnet',
|
||||
'provider_extra': urlparse(host['host']).hostname or host['host'],
|
||||
'name': toUnicode(torrent.get('release_name')),
|
||||
'url': torrent.get('download_url'),
|
||||
'detail_url': torrent.get('details_url'),
|
||||
'size': torrent.get('size'),
|
||||
'score': host['extra_score'],
|
||||
'seeders': torrent.get('seeders'),
|
||||
'leechers': torrent.get('leechers'),
|
||||
'seed_ratio': host['seed_ratio'],
|
||||
'seed_time': host['seed_time'],
|
||||
})
|
||||
|
||||
except:
|
||||
log.error('Failed getting results from %s: %s', (host['host'], traceback.format_exc()))
|
||||
|
||||
def getHosts(self):
|
||||
|
||||
uses = splitString(str(self.conf('use')), clean = False)
|
||||
hosts = splitString(self.conf('host'), clean = False)
|
||||
names = splitString(self.conf('name'), clean = False)
|
||||
seed_times = splitString(self.conf('seed_time'), clean = False)
|
||||
seed_ratios = splitString(self.conf('seed_ratio'), clean = False)
|
||||
pass_keys = splitString(self.conf('pass_key'), clean = False)
|
||||
extra_score = splitString(self.conf('extra_score'), clean = False)
|
||||
|
||||
list = []
|
||||
for nr in range(len(hosts)):
|
||||
|
||||
try: key = pass_keys[nr]
|
||||
except: key = ''
|
||||
|
||||
try: host = hosts[nr]
|
||||
except: host = ''
|
||||
|
||||
try: name = names[nr]
|
||||
except: name = ''
|
||||
|
||||
try: ratio = seed_ratios[nr]
|
||||
except: ratio = ''
|
||||
|
||||
try: seed_time = seed_times[nr]
|
||||
except: seed_time = ''
|
||||
|
||||
list.append({
|
||||
'use': uses[nr],
|
||||
'host': host,
|
||||
'name': name,
|
||||
'seed_ratio': tryFloat(ratio),
|
||||
'seed_time': tryInt(seed_time),
|
||||
'pass_key': key,
|
||||
'extra_score': tryInt(extra_score[nr]) if len(extra_score) > nr else 0
|
||||
})
|
||||
|
||||
return list
|
||||
|
||||
def belongsTo(self, url, provider = None, host = None):
|
||||
|
||||
hosts = self.getHosts()
|
||||
|
||||
for host in hosts:
|
||||
result = super(TorrentPotato, self).belongsTo(url, host = host['host'], provider = provider)
|
||||
if result:
|
||||
return result
|
||||
|
||||
def isDisabled(self, host = None):
|
||||
return not self.isEnabled(host)
|
||||
|
||||
def isEnabled(self, host = None):
|
||||
|
||||
# Return true if at least one is enabled and no host is given
|
||||
if host is None:
|
||||
for host in self.getHosts():
|
||||
if self.isEnabled(host):
|
||||
return True
|
||||
return False
|
||||
|
||||
return TorrentProvider.isEnabled(self) and host['host'] and host['pass_key'] and int(host['use'])
|
||||
@@ -11,6 +11,12 @@
|
||||
pages: [],
|
||||
block: [],
|
||||
|
||||
initialize: function(){
|
||||
var self = this;
|
||||
|
||||
self.global_events = {};
|
||||
},
|
||||
|
||||
setup: function(options) {
|
||||
var self = this;
|
||||
self.setOptions(options);
|
||||
@@ -30,7 +36,7 @@
|
||||
History.addEvent('change', self.openPage.bind(self));
|
||||
self.c.addEvent('click:relay(a[href^=/]:not([target]))', self.pushState.bind(self));
|
||||
self.c.addEvent('click:relay(a[href^=http])', self.openDerefered.bind(self));
|
||||
|
||||
|
||||
// Check if device is touchenabled
|
||||
self.touch_device = 'ontouchstart' in window || navigator.msMaxTouchPoints;
|
||||
if(self.touch_device)
|
||||
@@ -55,7 +61,7 @@
|
||||
History.push(url);
|
||||
}
|
||||
},
|
||||
|
||||
|
||||
isMac: function(){
|
||||
return Browser.Platform.mac
|
||||
},
|
||||
@@ -111,7 +117,7 @@
|
||||
}
|
||||
})
|
||||
];
|
||||
|
||||
|
||||
setting_links.each(function(a){
|
||||
self.block.more.addLink(a)
|
||||
});
|
||||
@@ -336,6 +342,66 @@
|
||||
})
|
||||
)
|
||||
);
|
||||
},
|
||||
|
||||
/*
|
||||
* Global events
|
||||
*/
|
||||
on: function(name, handle){
|
||||
var self = this;
|
||||
|
||||
if(!self.global_events[name])
|
||||
self.global_events[name] = [];
|
||||
|
||||
self.global_events[name].push(handle);
|
||||
|
||||
},
|
||||
|
||||
trigger: function(name, args, on_complete){
|
||||
var self = this;
|
||||
|
||||
if(!self.global_events[name]){ return; }
|
||||
|
||||
if(!on_complete && typeOf(args) == 'function'){
|
||||
on_complete = args;
|
||||
args = {};
|
||||
}
|
||||
|
||||
// Create parallel callback
|
||||
var callbacks = [];
|
||||
self.global_events[name].each(function(handle, nr){
|
||||
|
||||
callbacks.push(function(callback){
|
||||
var results = handle(args || {});
|
||||
callback(null, results || null);
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
// Fire events
|
||||
async.parallel(callbacks, function(err, results){
|
||||
if(err) p(err);
|
||||
|
||||
if(on_complete)
|
||||
on_complete(results);
|
||||
});
|
||||
|
||||
},
|
||||
|
||||
off: function(name, handle){
|
||||
var self = this;
|
||||
|
||||
if(!self.global_events[name]) return;
|
||||
|
||||
// Remove single
|
||||
if(handle){
|
||||
self.global_events[name] = self.global_events[name].erase(handle);
|
||||
}
|
||||
// Reset full event
|
||||
else {
|
||||
self.global_events[name] = [];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
});
|
||||
@@ -503,7 +569,7 @@ function randomString(length, extra) {
|
||||
case "string": saveKeyPath(argument.match(/[+-]|[^.]+/g)); break;
|
||||
}
|
||||
});
|
||||
return this.sort(comparer);
|
||||
return this.stableSort(comparer);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
56
couchpotato/static/scripts/library/Array.stableSort.js
Normal file
56
couchpotato/static/scripts/library/Array.stableSort.js
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
---
|
||||
|
||||
script: Array.stableSort.js
|
||||
|
||||
description: Add a stable sort algorithm for all browsers
|
||||
|
||||
license: MIT-style license.
|
||||
|
||||
authors:
|
||||
- Yorick Sijsling
|
||||
|
||||
requires:
|
||||
core/1.3: '*'
|
||||
|
||||
provides:
|
||||
- [Array.stableSort, Array.mergeSort]
|
||||
|
||||
...
|
||||
*/
|
||||
|
||||
(function() {
|
||||
|
||||
var defaultSortFunction = function(a, b) {
|
||||
return a > b ? 1 : (a < b ? -1 : 0);
|
||||
}
|
||||
|
||||
Array.implement({
|
||||
|
||||
stableSort: function(compare) {
|
||||
// I would love some real feature recognition. Problem is that an unstable algorithm sometimes/often gives the same result as an unstable algorithm.
|
||||
return (Browser.chrome || Browser.firefox2 || Browser.opera9) ? this.mergeSort(compare) : this.sort(compare);
|
||||
},
|
||||
|
||||
mergeSort: function(compare, token) {
|
||||
compare = compare || defaultSortFunction;
|
||||
if (this.length > 1) {
|
||||
// Split and sort both parts
|
||||
var right = this.splice(Math.floor(this.length / 2)).mergeSort(compare);
|
||||
var left = this.splice(0).mergeSort(compare); // 'this' is now empty.
|
||||
|
||||
// Merge parts together
|
||||
while (left.length > 0 || right.length > 0) {
|
||||
this.push(
|
||||
right.length === 0 ? left.shift()
|
||||
: left.length === 0 ? right.shift()
|
||||
: compare(left[0], right[0]) > 0 ? right.shift()
|
||||
: left.shift());
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
});
|
||||
})();
|
||||
|
||||
955
couchpotato/static/scripts/library/async.js
Normal file
955
couchpotato/static/scripts/library/async.js
Normal file
@@ -0,0 +1,955 @@
|
||||
/*global setImmediate: false, setTimeout: false, console: false */
|
||||
(function () {
|
||||
|
||||
var async = {};
|
||||
|
||||
// global on the server, window in the browser
|
||||
var root, previous_async;
|
||||
|
||||
root = this;
|
||||
if (root != null) {
|
||||
previous_async = root.async;
|
||||
}
|
||||
|
||||
async.noConflict = function () {
|
||||
root.async = previous_async;
|
||||
return async;
|
||||
};
|
||||
|
||||
function only_once(fn) {
|
||||
var called = false;
|
||||
return function() {
|
||||
if (called) throw new Error("Callback was already called.");
|
||||
called = true;
|
||||
fn.apply(root, arguments);
|
||||
}
|
||||
}
|
||||
|
||||
//// cross-browser compatiblity functions ////
|
||||
|
||||
var _each = function (arr, iterator) {
|
||||
if (arr.forEach) {
|
||||
return arr.forEach(iterator);
|
||||
}
|
||||
for (var i = 0; i < arr.length; i += 1) {
|
||||
iterator(arr[i], i, arr);
|
||||
}
|
||||
};
|
||||
|
||||
var _map = function (arr, iterator) {
|
||||
if (arr.map) {
|
||||
return arr.map(iterator);
|
||||
}
|
||||
var results = [];
|
||||
_each(arr, function (x, i, a) {
|
||||
results.push(iterator(x, i, a));
|
||||
});
|
||||
return results;
|
||||
};
|
||||
|
||||
var _reduce = function (arr, iterator, memo) {
|
||||
if (arr.reduce) {
|
||||
return arr.reduce(iterator, memo);
|
||||
}
|
||||
_each(arr, function (x, i, a) {
|
||||
memo = iterator(memo, x, i, a);
|
||||
});
|
||||
return memo;
|
||||
};
|
||||
|
||||
var _keys = function (obj) {
|
||||
if (Object.keys) {
|
||||
return Object.keys(obj);
|
||||
}
|
||||
var keys = [];
|
||||
for (var k in obj) {
|
||||
if (obj.hasOwnProperty(k)) {
|
||||
keys.push(k);
|
||||
}
|
||||
}
|
||||
return keys;
|
||||
};
|
||||
|
||||
//// exported async module functions ////
|
||||
|
||||
//// nextTick implementation with browser-compatible fallback ////
|
||||
if (typeof process === 'undefined' || !(process.nextTick)) {
|
||||
if (typeof setImmediate === 'function') {
|
||||
async.nextTick = function (fn) {
|
||||
// not a direct alias for IE10 compatibility
|
||||
setImmediate(fn);
|
||||
};
|
||||
async.setImmediate = async.nextTick;
|
||||
}
|
||||
else {
|
||||
async.nextTick = function (fn) {
|
||||
setTimeout(fn, 0);
|
||||
};
|
||||
async.setImmediate = async.nextTick;
|
||||
}
|
||||
}
|
||||
else {
|
||||
async.nextTick = process.nextTick;
|
||||
if (typeof setImmediate !== 'undefined') {
|
||||
async.setImmediate = setImmediate;
|
||||
}
|
||||
else {
|
||||
async.setImmediate = async.nextTick;
|
||||
}
|
||||
}
|
||||
|
||||
async.each = function (arr, iterator, callback) {
|
||||
callback = callback || function () {};
|
||||
if (!arr.length) {
|
||||
return callback();
|
||||
}
|
||||
var completed = 0;
|
||||
_each(arr, function (x) {
|
||||
iterator(x, only_once(function (err) {
|
||||
if (err) {
|
||||
callback(err);
|
||||
callback = function () {};
|
||||
}
|
||||
else {
|
||||
completed += 1;
|
||||
if (completed >= arr.length) {
|
||||
callback(null);
|
||||
}
|
||||
}
|
||||
}));
|
||||
});
|
||||
};
|
||||
async.forEach = async.each;
|
||||
|
||||
async.eachSeries = function (arr, iterator, callback) {
|
||||
callback = callback || function () {};
|
||||
if (!arr.length) {
|
||||
return callback();
|
||||
}
|
||||
var completed = 0;
|
||||
var iterate = function () {
|
||||
iterator(arr[completed], function (err) {
|
||||
if (err) {
|
||||
callback(err);
|
||||
callback = function () {};
|
||||
}
|
||||
else {
|
||||
completed += 1;
|
||||
if (completed >= arr.length) {
|
||||
callback(null);
|
||||
}
|
||||
else {
|
||||
iterate();
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
iterate();
|
||||
};
|
||||
async.forEachSeries = async.eachSeries;
|
||||
|
||||
async.eachLimit = function (arr, limit, iterator, callback) {
|
||||
var fn = _eachLimit(limit);
|
||||
fn.apply(null, [arr, iterator, callback]);
|
||||
};
|
||||
async.forEachLimit = async.eachLimit;
|
||||
|
||||
var _eachLimit = function (limit) {
|
||||
|
||||
return function (arr, iterator, callback) {
|
||||
callback = callback || function () {};
|
||||
if (!arr.length || limit <= 0) {
|
||||
return callback();
|
||||
}
|
||||
var completed = 0;
|
||||
var started = 0;
|
||||
var running = 0;
|
||||
|
||||
(function replenish () {
|
||||
if (completed >= arr.length) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
while (running < limit && started < arr.length) {
|
||||
started += 1;
|
||||
running += 1;
|
||||
iterator(arr[started - 1], function (err) {
|
||||
if (err) {
|
||||
callback(err);
|
||||
callback = function () {};
|
||||
}
|
||||
else {
|
||||
completed += 1;
|
||||
running -= 1;
|
||||
if (completed >= arr.length) {
|
||||
callback();
|
||||
}
|
||||
else {
|
||||
replenish();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
})();
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
var doParallel = function (fn) {
|
||||
return function () {
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
return fn.apply(null, [async.each].concat(args));
|
||||
};
|
||||
};
|
||||
var doParallelLimit = function(limit, fn) {
|
||||
return function () {
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
return fn.apply(null, [_eachLimit(limit)].concat(args));
|
||||
};
|
||||
};
|
||||
var doSeries = function (fn) {
|
||||
return function () {
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
return fn.apply(null, [async.eachSeries].concat(args));
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
var _asyncMap = function (eachfn, arr, iterator, callback) {
|
||||
var results = [];
|
||||
arr = _map(arr, function (x, i) {
|
||||
return {index: i, value: x};
|
||||
});
|
||||
eachfn(arr, function (x, callback) {
|
||||
iterator(x.value, function (err, v) {
|
||||
results[x.index] = v;
|
||||
callback(err);
|
||||
});
|
||||
}, function (err) {
|
||||
callback(err, results);
|
||||
});
|
||||
};
|
||||
async.map = doParallel(_asyncMap);
|
||||
async.mapSeries = doSeries(_asyncMap);
|
||||
async.mapLimit = function (arr, limit, iterator, callback) {
|
||||
return _mapLimit(limit)(arr, iterator, callback);
|
||||
};
|
||||
|
||||
var _mapLimit = function(limit) {
|
||||
return doParallelLimit(limit, _asyncMap);
|
||||
};
|
||||
|
||||
// reduce only has a series version, as doing reduce in parallel won't
|
||||
// work in many situations.
|
||||
async.reduce = function (arr, memo, iterator, callback) {
|
||||
async.eachSeries(arr, function (x, callback) {
|
||||
iterator(memo, x, function (err, v) {
|
||||
memo = v;
|
||||
callback(err);
|
||||
});
|
||||
}, function (err) {
|
||||
callback(err, memo);
|
||||
});
|
||||
};
|
||||
// inject alias
|
||||
async.inject = async.reduce;
|
||||
// foldl alias
|
||||
async.foldl = async.reduce;
|
||||
|
||||
async.reduceRight = function (arr, memo, iterator, callback) {
|
||||
var reversed = _map(arr, function (x) {
|
||||
return x;
|
||||
}).reverse();
|
||||
async.reduce(reversed, memo, iterator, callback);
|
||||
};
|
||||
// foldr alias
|
||||
async.foldr = async.reduceRight;
|
||||
|
||||
var _filter = function (eachfn, arr, iterator, callback) {
|
||||
var results = [];
|
||||
arr = _map(arr, function (x, i) {
|
||||
return {index: i, value: x};
|
||||
});
|
||||
eachfn(arr, function (x, callback) {
|
||||
iterator(x.value, function (v) {
|
||||
if (v) {
|
||||
results.push(x);
|
||||
}
|
||||
callback();
|
||||
});
|
||||
}, function (err) {
|
||||
callback(_map(results.sort(function (a, b) {
|
||||
return a.index - b.index;
|
||||
}), function (x) {
|
||||
return x.value;
|
||||
}));
|
||||
});
|
||||
};
|
||||
async.filter = doParallel(_filter);
|
||||
async.filterSeries = doSeries(_filter);
|
||||
// select alias
|
||||
async.select = async.filter;
|
||||
async.selectSeries = async.filterSeries;
|
||||
|
||||
var _reject = function (eachfn, arr, iterator, callback) {
|
||||
var results = [];
|
||||
arr = _map(arr, function (x, i) {
|
||||
return {index: i, value: x};
|
||||
});
|
||||
eachfn(arr, function (x, callback) {
|
||||
iterator(x.value, function (v) {
|
||||
if (!v) {
|
||||
results.push(x);
|
||||
}
|
||||
callback();
|
||||
});
|
||||
}, function (err) {
|
||||
callback(_map(results.sort(function (a, b) {
|
||||
return a.index - b.index;
|
||||
}), function (x) {
|
||||
return x.value;
|
||||
}));
|
||||
});
|
||||
};
|
||||
async.reject = doParallel(_reject);
|
||||
async.rejectSeries = doSeries(_reject);
|
||||
|
||||
var _detect = function (eachfn, arr, iterator, main_callback) {
|
||||
eachfn(arr, function (x, callback) {
|
||||
iterator(x, function (result) {
|
||||
if (result) {
|
||||
main_callback(x);
|
||||
main_callback = function () {};
|
||||
}
|
||||
else {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
}, function (err) {
|
||||
main_callback();
|
||||
});
|
||||
};
|
||||
async.detect = doParallel(_detect);
|
||||
async.detectSeries = doSeries(_detect);
|
||||
|
||||
async.some = function (arr, iterator, main_callback) {
|
||||
async.each(arr, function (x, callback) {
|
||||
iterator(x, function (v) {
|
||||
if (v) {
|
||||
main_callback(true);
|
||||
main_callback = function () {};
|
||||
}
|
||||
callback();
|
||||
});
|
||||
}, function (err) {
|
||||
main_callback(false);
|
||||
});
|
||||
};
|
||||
// any alias
|
||||
async.any = async.some;
|
||||
|
||||
async.every = function (arr, iterator, main_callback) {
|
||||
async.each(arr, function (x, callback) {
|
||||
iterator(x, function (v) {
|
||||
if (!v) {
|
||||
main_callback(false);
|
||||
main_callback = function () {};
|
||||
}
|
||||
callback();
|
||||
});
|
||||
}, function (err) {
|
||||
main_callback(true);
|
||||
});
|
||||
};
|
||||
// all alias
|
||||
async.all = async.every;
|
||||
|
||||
async.sortBy = function (arr, iterator, callback) {
|
||||
async.map(arr, function (x, callback) {
|
||||
iterator(x, function (err, criteria) {
|
||||
if (err) {
|
||||
callback(err);
|
||||
}
|
||||
else {
|
||||
callback(null, {value: x, criteria: criteria});
|
||||
}
|
||||
});
|
||||
}, function (err, results) {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
else {
|
||||
var fn = function (left, right) {
|
||||
var a = left.criteria, b = right.criteria;
|
||||
return a < b ? -1 : a > b ? 1 : 0;
|
||||
};
|
||||
callback(null, _map(results.sort(fn), function (x) {
|
||||
return x.value;
|
||||
}));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
async.auto = function (tasks, callback) {
|
||||
callback = callback || function () {};
|
||||
var keys = _keys(tasks);
|
||||
if (!keys.length) {
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
var results = {};
|
||||
|
||||
var listeners = [];
|
||||
var addListener = function (fn) {
|
||||
listeners.unshift(fn);
|
||||
};
|
||||
var removeListener = function (fn) {
|
||||
for (var i = 0; i < listeners.length; i += 1) {
|
||||
if (listeners[i] === fn) {
|
||||
listeners.splice(i, 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
var taskComplete = function () {
|
||||
_each(listeners.slice(0), function (fn) {
|
||||
fn();
|
||||
});
|
||||
};
|
||||
|
||||
addListener(function () {
|
||||
if (_keys(results).length === keys.length) {
|
||||
callback(null, results);
|
||||
callback = function () {};
|
||||
}
|
||||
});
|
||||
|
||||
_each(keys, function (k) {
|
||||
var task = (tasks[k] instanceof Function) ? [tasks[k]]: tasks[k];
|
||||
var taskCallback = function (err) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
if (args.length <= 1) {
|
||||
args = args[0];
|
||||
}
|
||||
if (err) {
|
||||
var safeResults = {};
|
||||
_each(_keys(results), function(rkey) {
|
||||
safeResults[rkey] = results[rkey];
|
||||
});
|
||||
safeResults[k] = args;
|
||||
callback(err, safeResults);
|
||||
// stop subsequent errors hitting callback multiple times
|
||||
callback = function () {};
|
||||
}
|
||||
else {
|
||||
results[k] = args;
|
||||
async.setImmediate(taskComplete);
|
||||
}
|
||||
};
|
||||
var requires = task.slice(0, Math.abs(task.length - 1)) || [];
|
||||
var ready = function () {
|
||||
return _reduce(requires, function (a, x) {
|
||||
return (a && results.hasOwnProperty(x));
|
||||
}, true) && !results.hasOwnProperty(k);
|
||||
};
|
||||
if (ready()) {
|
||||
task[task.length - 1](taskCallback, results);
|
||||
}
|
||||
else {
|
||||
var listener = function () {
|
||||
if (ready()) {
|
||||
removeListener(listener);
|
||||
task[task.length - 1](taskCallback, results);
|
||||
}
|
||||
};
|
||||
addListener(listener);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
async.waterfall = function (tasks, callback) {
|
||||
callback = callback || function () {};
|
||||
if (tasks.constructor !== Array) {
|
||||
var err = new Error('First argument to waterfall must be an array of functions');
|
||||
return callback(err);
|
||||
}
|
||||
if (!tasks.length) {
|
||||
return callback();
|
||||
}
|
||||
var wrapIterator = function (iterator) {
|
||||
return function (err) {
|
||||
if (err) {
|
||||
callback.apply(null, arguments);
|
||||
callback = function () {};
|
||||
}
|
||||
else {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
var next = iterator.next();
|
||||
if (next) {
|
||||
args.push(wrapIterator(next));
|
||||
}
|
||||
else {
|
||||
args.push(callback);
|
||||
}
|
||||
async.setImmediate(function () {
|
||||
iterator.apply(null, args);
|
||||
});
|
||||
}
|
||||
};
|
||||
};
|
||||
wrapIterator(async.iterator(tasks))();
|
||||
};
|
||||
|
||||
var _parallel = function(eachfn, tasks, callback) {
|
||||
callback = callback || function () {};
|
||||
if (tasks.constructor === Array) {
|
||||
eachfn.map(tasks, function (fn, callback) {
|
||||
if (fn) {
|
||||
fn(function (err) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
if (args.length <= 1) {
|
||||
args = args[0];
|
||||
}
|
||||
callback.call(null, err, args);
|
||||
});
|
||||
}
|
||||
}, callback);
|
||||
}
|
||||
else {
|
||||
var results = {};
|
||||
eachfn.each(_keys(tasks), function (k, callback) {
|
||||
tasks[k](function (err) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
if (args.length <= 1) {
|
||||
args = args[0];
|
||||
}
|
||||
results[k] = args;
|
||||
callback(err);
|
||||
});
|
||||
}, function (err) {
|
||||
callback(err, results);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
async.parallel = function (tasks, callback) {
|
||||
_parallel({ map: async.map, each: async.each }, tasks, callback);
|
||||
};
|
||||
|
||||
async.parallelLimit = function(tasks, limit, callback) {
|
||||
_parallel({ map: _mapLimit(limit), each: _eachLimit(limit) }, tasks, callback);
|
||||
};
|
||||
|
||||
async.series = function (tasks, callback) {
|
||||
callback = callback || function () {};
|
||||
if (tasks.constructor === Array) {
|
||||
async.mapSeries(tasks, function (fn, callback) {
|
||||
if (fn) {
|
||||
fn(function (err) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
if (args.length <= 1) {
|
||||
args = args[0];
|
||||
}
|
||||
callback.call(null, err, args);
|
||||
});
|
||||
}
|
||||
}, callback);
|
||||
}
|
||||
else {
|
||||
var results = {};
|
||||
async.eachSeries(_keys(tasks), function (k, callback) {
|
||||
tasks[k](function (err) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
if (args.length <= 1) {
|
||||
args = args[0];
|
||||
}
|
||||
results[k] = args;
|
||||
callback(err);
|
||||
});
|
||||
}, function (err) {
|
||||
callback(err, results);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
async.iterator = function (tasks) {
|
||||
var makeCallback = function (index) {
|
||||
var fn = function () {
|
||||
if (tasks.length) {
|
||||
tasks[index].apply(null, arguments);
|
||||
}
|
||||
return fn.next();
|
||||
};
|
||||
fn.next = function () {
|
||||
return (index < tasks.length - 1) ? makeCallback(index + 1): null;
|
||||
};
|
||||
return fn;
|
||||
};
|
||||
return makeCallback(0);
|
||||
};
|
||||
|
||||
async.apply = function (fn) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
return function () {
|
||||
return fn.apply(
|
||||
null, args.concat(Array.prototype.slice.call(arguments))
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
var _concat = function (eachfn, arr, fn, callback) {
|
||||
var r = [];
|
||||
eachfn(arr, function (x, cb) {
|
||||
fn(x, function (err, y) {
|
||||
r = r.concat(y || []);
|
||||
cb(err);
|
||||
});
|
||||
}, function (err) {
|
||||
callback(err, r);
|
||||
});
|
||||
};
|
||||
async.concat = doParallel(_concat);
|
||||
async.concatSeries = doSeries(_concat);
|
||||
|
||||
async.whilst = function (test, iterator, callback) {
|
||||
if (test()) {
|
||||
iterator(function (err) {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
async.whilst(test, iterator, callback);
|
||||
});
|
||||
}
|
||||
else {
|
||||
callback();
|
||||
}
|
||||
};
|
||||
|
||||
async.doWhilst = function (iterator, test, callback) {
|
||||
iterator(function (err) {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
if (test()) {
|
||||
async.doWhilst(iterator, test, callback);
|
||||
}
|
||||
else {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
async.until = function (test, iterator, callback) {
|
||||
if (!test()) {
|
||||
iterator(function (err) {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
async.until(test, iterator, callback);
|
||||
});
|
||||
}
|
||||
else {
|
||||
callback();
|
||||
}
|
||||
};
|
||||
|
||||
async.doUntil = function (iterator, test, callback) {
|
||||
iterator(function (err) {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
if (!test()) {
|
||||
async.doUntil(iterator, test, callback);
|
||||
}
|
||||
else {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
async.queue = function (worker, concurrency) {
|
||||
if (concurrency === undefined) {
|
||||
concurrency = 1;
|
||||
}
|
||||
function _insert(q, data, pos, callback) {
|
||||
if(data.constructor !== Array) {
|
||||
data = [data];
|
||||
}
|
||||
_each(data, function(task) {
|
||||
var item = {
|
||||
data: task,
|
||||
callback: typeof callback === 'function' ? callback : null
|
||||
};
|
||||
|
||||
if (pos) {
|
||||
q.tasks.unshift(item);
|
||||
} else {
|
||||
q.tasks.push(item);
|
||||
}
|
||||
|
||||
if (q.saturated && q.tasks.length === concurrency) {
|
||||
q.saturated();
|
||||
}
|
||||
async.setImmediate(q.process);
|
||||
});
|
||||
}
|
||||
|
||||
var workers = 0;
|
||||
var q = {
|
||||
tasks: [],
|
||||
concurrency: concurrency,
|
||||
saturated: null,
|
||||
empty: null,
|
||||
drain: null,
|
||||
push: function (data, callback) {
|
||||
_insert(q, data, false, callback);
|
||||
},
|
||||
unshift: function (data, callback) {
|
||||
_insert(q, data, true, callback);
|
||||
},
|
||||
process: function () {
|
||||
if (workers < q.concurrency && q.tasks.length) {
|
||||
var task = q.tasks.shift();
|
||||
if (q.empty && q.tasks.length === 0) {
|
||||
q.empty();
|
||||
}
|
||||
workers += 1;
|
||||
var next = function () {
|
||||
workers -= 1;
|
||||
if (task.callback) {
|
||||
task.callback.apply(task, arguments);
|
||||
}
|
||||
if (q.drain && q.tasks.length + workers === 0) {
|
||||
q.drain();
|
||||
}
|
||||
q.process();
|
||||
};
|
||||
var cb = only_once(next);
|
||||
worker(task.data, cb);
|
||||
}
|
||||
},
|
||||
length: function () {
|
||||
return q.tasks.length;
|
||||
},
|
||||
running: function () {
|
||||
return workers;
|
||||
}
|
||||
};
|
||||
return q;
|
||||
};
|
||||
|
||||
async.cargo = function (worker, payload) {
|
||||
var working = false,
|
||||
tasks = [];
|
||||
|
||||
var cargo = {
|
||||
tasks: tasks,
|
||||
payload: payload,
|
||||
saturated: null,
|
||||
empty: null,
|
||||
drain: null,
|
||||
push: function (data, callback) {
|
||||
if(data.constructor !== Array) {
|
||||
data = [data];
|
||||
}
|
||||
_each(data, function(task) {
|
||||
tasks.push({
|
||||
data: task,
|
||||
callback: typeof callback === 'function' ? callback : null
|
||||
});
|
||||
if (cargo.saturated && tasks.length === payload) {
|
||||
cargo.saturated();
|
||||
}
|
||||
});
|
||||
async.setImmediate(cargo.process);
|
||||
},
|
||||
process: function process() {
|
||||
if (working) return;
|
||||
if (tasks.length === 0) {
|
||||
if(cargo.drain) cargo.drain();
|
||||
return;
|
||||
}
|
||||
|
||||
var ts = typeof payload === 'number'
|
||||
? tasks.splice(0, payload)
|
||||
: tasks.splice(0);
|
||||
|
||||
var ds = _map(ts, function (task) {
|
||||
return task.data;
|
||||
});
|
||||
|
||||
if(cargo.empty) cargo.empty();
|
||||
working = true;
|
||||
worker(ds, function () {
|
||||
working = false;
|
||||
|
||||
var args = arguments;
|
||||
_each(ts, function (data) {
|
||||
if (data.callback) {
|
||||
data.callback.apply(null, args);
|
||||
}
|
||||
});
|
||||
|
||||
process();
|
||||
});
|
||||
},
|
||||
length: function () {
|
||||
return tasks.length;
|
||||
},
|
||||
running: function () {
|
||||
return working;
|
||||
}
|
||||
};
|
||||
return cargo;
|
||||
};
|
||||
|
||||
var _console_fn = function (name) {
|
||||
return function (fn) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
fn.apply(null, args.concat([function (err) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
if (typeof console !== 'undefined') {
|
||||
if (err) {
|
||||
if (console.error) {
|
||||
console.error(err);
|
||||
}
|
||||
}
|
||||
else if (console[name]) {
|
||||
_each(args, function (x) {
|
||||
console[name](x);
|
||||
});
|
||||
}
|
||||
}
|
||||
}]));
|
||||
};
|
||||
};
|
||||
async.log = _console_fn('log');
|
||||
async.dir = _console_fn('dir');
|
||||
/*async.info = _console_fn('info');
|
||||
async.warn = _console_fn('warn');
|
||||
async.error = _console_fn('error');*/
|
||||
|
||||
async.memoize = function (fn, hasher) {
|
||||
var memo = {};
|
||||
var queues = {};
|
||||
hasher = hasher || function (x) {
|
||||
return x;
|
||||
};
|
||||
var memoized = function () {
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
var callback = args.pop();
|
||||
var key = hasher.apply(null, args);
|
||||
if (key in memo) {
|
||||
callback.apply(null, memo[key]);
|
||||
}
|
||||
else if (key in queues) {
|
||||
queues[key].push(callback);
|
||||
}
|
||||
else {
|
||||
queues[key] = [callback];
|
||||
fn.apply(null, args.concat([function () {
|
||||
memo[key] = arguments;
|
||||
var q = queues[key];
|
||||
delete queues[key];
|
||||
for (var i = 0, l = q.length; i < l; i++) {
|
||||
q[i].apply(null, arguments);
|
||||
}
|
||||
}]));
|
||||
}
|
||||
};
|
||||
memoized.memo = memo;
|
||||
memoized.unmemoized = fn;
|
||||
return memoized;
|
||||
};
|
||||
|
||||
async.unmemoize = function (fn) {
|
||||
return function () {
|
||||
return (fn.unmemoized || fn).apply(null, arguments);
|
||||
};
|
||||
};
|
||||
|
||||
async.times = function (count, iterator, callback) {
|
||||
var counter = [];
|
||||
for (var i = 0; i < count; i++) {
|
||||
counter.push(i);
|
||||
}
|
||||
return async.map(counter, iterator, callback);
|
||||
};
|
||||
|
||||
async.timesSeries = function (count, iterator, callback) {
|
||||
var counter = [];
|
||||
for (var i = 0; i < count; i++) {
|
||||
counter.push(i);
|
||||
}
|
||||
return async.mapSeries(counter, iterator, callback);
|
||||
};
|
||||
|
||||
async.compose = function (/* functions... */) {
|
||||
var fns = Array.prototype.reverse.call(arguments);
|
||||
return function () {
|
||||
var that = this;
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
var callback = args.pop();
|
||||
async.reduce(fns, args, function (newargs, fn, cb) {
|
||||
fn.apply(that, newargs.concat([function () {
|
||||
var err = arguments[0];
|
||||
var nextargs = Array.prototype.slice.call(arguments, 1);
|
||||
cb(err, nextargs);
|
||||
}]))
|
||||
},
|
||||
function (err, results) {
|
||||
callback.apply(that, [err].concat(results));
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
var _applyEach = function (eachfn, fns /*args...*/) {
|
||||
var go = function () {
|
||||
var that = this;
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
var callback = args.pop();
|
||||
return eachfn(fns, function (fn, cb) {
|
||||
fn.apply(that, args.concat([cb]));
|
||||
},
|
||||
callback);
|
||||
};
|
||||
if (arguments.length > 2) {
|
||||
var args = Array.prototype.slice.call(arguments, 2);
|
||||
return go.apply(this, args);
|
||||
}
|
||||
else {
|
||||
return go;
|
||||
}
|
||||
};
|
||||
async.applyEach = doParallel(_applyEach);
|
||||
async.applyEachSeries = doSeries(_applyEach);
|
||||
|
||||
async.forever = function (fn, callback) {
|
||||
function next(err) {
|
||||
if (err) {
|
||||
if (callback) {
|
||||
return callback(err);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
fn(next);
|
||||
}
|
||||
next();
|
||||
};
|
||||
|
||||
// AMD / RequireJS
|
||||
if (typeof define !== 'undefined' && define.amd) {
|
||||
define([], function () {
|
||||
return async;
|
||||
});
|
||||
}
|
||||
// Node.js
|
||||
else if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = async;
|
||||
}
|
||||
// included directly via <script> tag
|
||||
else {
|
||||
root.async = async;
|
||||
}
|
||||
|
||||
}());
|
||||
@@ -1,6 +1,6 @@
|
||||
// MooTools: the javascript framework.
|
||||
// Load this file's selection again by visiting: http://mootools.net/more/43db227db7a621ebb062ee621432ae3d
|
||||
// Or build this file again with packager using: packager build More/Events.Pseudos More/Date More/Date.Extras More/Element.Forms More/Element.Position More/Element.Shortcuts More/Fx.Scroll More/Fx.Slide More/Sortables More/Request.JSONP More/Request.Periodical
|
||||
// Load this file's selection again by visiting: http://mootools.net/more/7a819726f7f5e85fc48bef295ff78dbe
|
||||
// Or build this file again with packager using: packager build More/Events.Pseudos More/Date More/Date.Extras More/Element.Forms More/Element.Position More/Element.Shortcuts More/Fx.Scroll More/Fx.Slide More/Sortables More/Request.JSONP More/Request.Periodical More/Tips
|
||||
/*
|
||||
---
|
||||
|
||||
@@ -3161,3 +3161,264 @@ Request.implement({
|
||||
|
||||
});
|
||||
|
||||
|
||||
/*
|
||||
---
|
||||
|
||||
script: Tips.js
|
||||
|
||||
name: Tips
|
||||
|
||||
description: Class for creating nice tips that follow the mouse cursor when hovering an element.
|
||||
|
||||
license: MIT-style license
|
||||
|
||||
authors:
|
||||
- Valerio Proietti
|
||||
- Christoph Pojer
|
||||
- Luis Merino
|
||||
|
||||
requires:
|
||||
- Core/Options
|
||||
- Core/Events
|
||||
- Core/Element.Event
|
||||
- Core/Element.Style
|
||||
- Core/Element.Dimensions
|
||||
- /MooTools.More
|
||||
|
||||
provides: [Tips]
|
||||
|
||||
...
|
||||
*/
|
||||
|
||||
(function(){
|
||||
|
||||
var read = function(option, element){
|
||||
return (option) ? (typeOf(option) == 'function' ? option(element) : element.get(option)) : '';
|
||||
};
|
||||
|
||||
this.Tips = new Class({
|
||||
|
||||
Implements: [Events, Options],
|
||||
|
||||
options: {/*
|
||||
id: null,
|
||||
onAttach: function(element){},
|
||||
onDetach: function(element){},
|
||||
onBound: function(coords){},*/
|
||||
onShow: function(){
|
||||
this.tip.setStyle('display', 'block');
|
||||
},
|
||||
onHide: function(){
|
||||
this.tip.setStyle('display', 'none');
|
||||
},
|
||||
title: 'title',
|
||||
text: function(element){
|
||||
return element.get('rel') || element.get('href');
|
||||
},
|
||||
showDelay: 100,
|
||||
hideDelay: 100,
|
||||
className: 'tip-wrap',
|
||||
offset: {x: 16, y: 16},
|
||||
windowPadding: {x:0, y:0},
|
||||
fixed: false,
|
||||
waiAria: true
|
||||
},
|
||||
|
||||
initialize: function(){
|
||||
var params = Array.link(arguments, {
|
||||
options: Type.isObject,
|
||||
elements: function(obj){
|
||||
return obj != null;
|
||||
}
|
||||
});
|
||||
this.setOptions(params.options);
|
||||
if (params.elements) this.attach(params.elements);
|
||||
this.container = new Element('div', {'class': 'tip'});
|
||||
|
||||
if (this.options.id){
|
||||
this.container.set('id', this.options.id);
|
||||
if (this.options.waiAria) this.attachWaiAria();
|
||||
}
|
||||
},
|
||||
|
||||
toElement: function(){
|
||||
if (this.tip) return this.tip;
|
||||
|
||||
this.tip = new Element('div', {
|
||||
'class': this.options.className,
|
||||
styles: {
|
||||
position: 'absolute',
|
||||
top: 0,
|
||||
left: 0
|
||||
}
|
||||
}).adopt(
|
||||
new Element('div', {'class': 'tip-top'}),
|
||||
this.container,
|
||||
new Element('div', {'class': 'tip-bottom'})
|
||||
);
|
||||
|
||||
return this.tip;
|
||||
},
|
||||
|
||||
attachWaiAria: function(){
|
||||
var id = this.options.id;
|
||||
this.container.set('role', 'tooltip');
|
||||
|
||||
if (!this.waiAria){
|
||||
this.waiAria = {
|
||||
show: function(element){
|
||||
if (id) element.set('aria-describedby', id);
|
||||
this.container.set('aria-hidden', 'false');
|
||||
},
|
||||
hide: function(element){
|
||||
if (id) element.erase('aria-describedby');
|
||||
this.container.set('aria-hidden', 'true');
|
||||
}
|
||||
};
|
||||
}
|
||||
this.addEvents(this.waiAria);
|
||||
},
|
||||
|
||||
detachWaiAria: function(){
|
||||
if (this.waiAria){
|
||||
this.container.erase('role');
|
||||
this.container.erase('aria-hidden');
|
||||
this.removeEvents(this.waiAria);
|
||||
}
|
||||
},
|
||||
|
||||
attach: function(elements){
|
||||
$$(elements).each(function(element){
|
||||
var title = read(this.options.title, element),
|
||||
text = read(this.options.text, element);
|
||||
|
||||
element.set('title', '').store('tip:native', title).retrieve('tip:title', title);
|
||||
element.retrieve('tip:text', text);
|
||||
this.fireEvent('attach', [element]);
|
||||
|
||||
var events = ['enter', 'leave'];
|
||||
if (!this.options.fixed) events.push('move');
|
||||
|
||||
events.each(function(value){
|
||||
var event = element.retrieve('tip:' + value);
|
||||
if (!event) event = function(event){
|
||||
this['element' + value.capitalize()].apply(this, [event, element]);
|
||||
}.bind(this);
|
||||
|
||||
element.store('tip:' + value, event).addEvent('mouse' + value, event);
|
||||
}, this);
|
||||
}, this);
|
||||
|
||||
return this;
|
||||
},
|
||||
|
||||
detach: function(elements){
|
||||
$$(elements).each(function(element){
|
||||
['enter', 'leave', 'move'].each(function(value){
|
||||
element.removeEvent('mouse' + value, element.retrieve('tip:' + value)).eliminate('tip:' + value);
|
||||
});
|
||||
|
||||
this.fireEvent('detach', [element]);
|
||||
|
||||
if (this.options.title == 'title'){ // This is necessary to check if we can revert the title
|
||||
var original = element.retrieve('tip:native');
|
||||
if (original) element.set('title', original);
|
||||
}
|
||||
}, this);
|
||||
|
||||
return this;
|
||||
},
|
||||
|
||||
elementEnter: function(event, element){
|
||||
clearTimeout(this.timer);
|
||||
this.timer = (function(){
|
||||
this.container.empty();
|
||||
|
||||
['title', 'text'].each(function(value){
|
||||
var content = element.retrieve('tip:' + value);
|
||||
var div = this['_' + value + 'Element'] = new Element('div', {
|
||||
'class': 'tip-' + value
|
||||
}).inject(this.container);
|
||||
if (content) this.fill(div, content);
|
||||
}, this);
|
||||
this.show(element);
|
||||
this.position((this.options.fixed) ? {page: element.getPosition()} : event);
|
||||
}).delay(this.options.showDelay, this);
|
||||
},
|
||||
|
||||
elementLeave: function(event, element){
|
||||
clearTimeout(this.timer);
|
||||
this.timer = this.hide.delay(this.options.hideDelay, this, element);
|
||||
this.fireForParent(event, element);
|
||||
},
|
||||
|
||||
setTitle: function(title){
|
||||
if (this._titleElement){
|
||||
this._titleElement.empty();
|
||||
this.fill(this._titleElement, title);
|
||||
}
|
||||
return this;
|
||||
},
|
||||
|
||||
setText: function(text){
|
||||
if (this._textElement){
|
||||
this._textElement.empty();
|
||||
this.fill(this._textElement, text);
|
||||
}
|
||||
return this;
|
||||
},
|
||||
|
||||
fireForParent: function(event, element){
|
||||
element = element.getParent();
|
||||
if (!element || element == document.body) return;
|
||||
if (element.retrieve('tip:enter')) element.fireEvent('mouseenter', event);
|
||||
else this.fireForParent(event, element);
|
||||
},
|
||||
|
||||
elementMove: function(event, element){
|
||||
this.position(event);
|
||||
},
|
||||
|
||||
position: function(event){
|
||||
if (!this.tip) document.id(this);
|
||||
|
||||
var size = window.getSize(), scroll = window.getScroll(),
|
||||
tip = {x: this.tip.offsetWidth, y: this.tip.offsetHeight},
|
||||
props = {x: 'left', y: 'top'},
|
||||
bounds = {y: false, x2: false, y2: false, x: false},
|
||||
obj = {};
|
||||
|
||||
for (var z in props){
|
||||
obj[props[z]] = event.page[z] + this.options.offset[z];
|
||||
if (obj[props[z]] < 0) bounds[z] = true;
|
||||
if ((obj[props[z]] + tip[z] - scroll[z]) > size[z] - this.options.windowPadding[z]){
|
||||
obj[props[z]] = event.page[z] - this.options.offset[z] - tip[z];
|
||||
bounds[z+'2'] = true;
|
||||
}
|
||||
}
|
||||
|
||||
this.fireEvent('bound', bounds);
|
||||
this.tip.setStyles(obj);
|
||||
},
|
||||
|
||||
fill: function(element, contents){
|
||||
if (typeof contents == 'string') element.set('html', contents);
|
||||
else element.adopt(contents);
|
||||
},
|
||||
|
||||
show: function(element){
|
||||
if (!this.tip) document.id(this);
|
||||
if (!this.tip.getParent()) this.tip.inject(document.body);
|
||||
this.fireEvent('show', [this.tip, element]);
|
||||
},
|
||||
|
||||
hide: function(element){
|
||||
if (!this.tip) document.id(this);
|
||||
this.fireEvent('hide', [this.tip, element]);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
})();
|
||||
|
||||
|
||||
@@ -57,6 +57,19 @@ Page.Home = new Class({
|
||||
'limit': null,
|
||||
'onLoaded': function(){
|
||||
self.chain.callChain();
|
||||
},
|
||||
'onMovieAdded': function(notification){
|
||||
|
||||
// Track movie added
|
||||
var after_search = function(data){
|
||||
if(notification.data.id != data.data.id) return;
|
||||
|
||||
// Force update after search
|
||||
self.available_list.update();
|
||||
App.off('movie.searcher.ended', after_search);
|
||||
}
|
||||
App.on('movie.searcher.ended', after_search);
|
||||
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -102,6 +102,8 @@ Page.Manage = new Class({
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Capture progress so we can use it in our *each* closure
|
||||
var progress = json.progress
|
||||
|
||||
// Don't add loader when page is loading still
|
||||
if(!self.list.navigation)
|
||||
@@ -112,10 +114,13 @@ Page.Manage = new Class({
|
||||
|
||||
self.progress_container.empty();
|
||||
|
||||
Object.each(json.progress, function(progress, folder){
|
||||
var sorted_table = self.parseProgress(json.progress)
|
||||
|
||||
sorted_table.each(function(folder){
|
||||
var folder_progress = progress[folder]
|
||||
new Element('div').adopt(
|
||||
new Element('span.folder', {'text': folder}),
|
||||
new Element('span.percentage', {'text': progress.total ? (((progress.total-progress.to_go)/progress.total)*100).round() + '%' : '0%'})
|
||||
new Element('span.percentage', {'text': folder_progress.total ? (((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100).round() + '%' : '0%'})
|
||||
).inject(self.progress_container)
|
||||
});
|
||||
|
||||
@@ -124,7 +129,17 @@ Page.Manage = new Class({
|
||||
})
|
||||
|
||||
}, 1000);
|
||||
},
|
||||
|
||||
parseProgress: function (progress_object) {
|
||||
var folder, temp_array = [];
|
||||
|
||||
for (folder in progress_object) {
|
||||
if (progress_object.hasOwnProperty(folder)) {
|
||||
temp_array.push(folder)
|
||||
}
|
||||
}
|
||||
return temp_array.stableSort()
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
@@ -111,6 +111,10 @@ Page.Settings = new Class({
|
||||
Cookie.write('advanced_toggle_checked', +self.advanced_toggle.checked, {'duration': 365});
|
||||
},
|
||||
|
||||
sortByOrder: function(a, b){
|
||||
return (a.order || 100) - (b.order || 100)
|
||||
},
|
||||
|
||||
create: function(json){
|
||||
var self = this;
|
||||
|
||||
@@ -141,13 +145,11 @@ Page.Settings = new Class({
|
||||
options.include(section);
|
||||
});
|
||||
|
||||
options.sort(function(a, b){
|
||||
return (a.order || 100) - (b.order || 100)
|
||||
}).each(function(section){
|
||||
options.stableSort(self.sortByOrder).each(function(section){
|
||||
var section_name = section.section_name;
|
||||
|
||||
// Add groups to content
|
||||
section.groups.sortBy('order').each(function(group){
|
||||
section.groups.stableSort(self.sortByOrder).each(function(group){
|
||||
if(group.hidden) return;
|
||||
|
||||
if(self.wizard_only && !group.wizard)
|
||||
@@ -184,9 +186,7 @@ Page.Settings = new Class({
|
||||
}
|
||||
|
||||
// Add options to group
|
||||
group.options.sort(function(a, b){
|
||||
return (a.order || 100) - (b.order || 100)
|
||||
}).each(function(option){
|
||||
group.options.stableSort(self.sortByOrder).each(function(option){
|
||||
if(option.hidden) return;
|
||||
var class_name = (option.type || 'string').capitalize();
|
||||
var input = new Option[class_name](section_name, option.name, self.getValue(section_name, option.name), option);
|
||||
@@ -265,16 +265,37 @@ Page.Settings = new Class({
|
||||
},
|
||||
|
||||
createGroup: function(group){
|
||||
|
||||
if((typeOf(group.description) == 'array')){
|
||||
var hint = new Element('span.hint.more_hint', {
|
||||
'html': group.description[0],
|
||||
'title': group.description[1]
|
||||
});
|
||||
var tip = new Tips(hint, {
|
||||
'fixed': true,
|
||||
'offset': {'x': 0, 'y': 0},
|
||||
'onShow': function(tip, hint){
|
||||
tip.setStyles({
|
||||
'margin-top': hint.getSize().y,
|
||||
'visibility': 'hidden',
|
||||
'display': 'block'
|
||||
}).fade('in');
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
var hint = new Element('span.hint', {
|
||||
'html': group.description || ''
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
return new Element('fieldset', {
|
||||
'class': (group.advanced ? 'inlineLabels advanced' : 'inlineLabels') + ' group_' + (group.name || '') + ' subtab_' + (group.subtab || '')
|
||||
}).adopt(
|
||||
}).grab(
|
||||
new Element('h2', {
|
||||
'text': group.label || (group.name).capitalize()
|
||||
}).adopt(
|
||||
new Element('span.hint', {
|
||||
'html': group.description || ''
|
||||
})
|
||||
)
|
||||
}).grab(hint)
|
||||
);
|
||||
},
|
||||
|
||||
@@ -343,10 +364,33 @@ var OptionBase = new Class({
|
||||
|
||||
createHint: function(){
|
||||
var self = this;
|
||||
if(self.options.description)
|
||||
new Element('p.formHint', {
|
||||
'html': self.options.description
|
||||
}).inject(self.el);
|
||||
if(self.options.description){
|
||||
|
||||
|
||||
if((typeOf(self.options.description) == 'array')){
|
||||
var hint = new Element('p.formHint.more_hint', {
|
||||
'html': self.options.description[0],
|
||||
'title': self.options.description[1]
|
||||
}).inject(self.el);
|
||||
var tip = new Tips(hint, {
|
||||
'fixed': true,
|
||||
'offset': {'x': 0, 'y': 0},
|
||||
'onShow': function(tip, hint){
|
||||
tip.setStyles({
|
||||
'margin-left': 13,
|
||||
'margin-top': hint.getSize().y+3,
|
||||
'visibility': 'hidden',
|
||||
'display': 'block'
|
||||
}).fade('in');
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
var hint = new Element('p.formHint', {
|
||||
'html': self.options.description || ''
|
||||
}).inject(self.el)
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
afterInject: function(){
|
||||
|
||||
@@ -545,12 +545,31 @@
|
||||
.page .combined_table .head abbr:first-child {
|
||||
display: none;
|
||||
}
|
||||
.page .combined_table .head abbr.host {
|
||||
margin-right: 190px;
|
||||
.page .combined_table .head abbr.host { margin-right: 120px; }
|
||||
.page .combined_table input.host { width: 140px; }
|
||||
.page .section_newznab .combined_table .head abbr.host { margin-right: 200px; }
|
||||
.page .section_newznab .combined_table input.host { width: 220px; }
|
||||
|
||||
.page .combined_table .head abbr.name { margin-right: 57px; }
|
||||
.page .combined_table input.name { width: 120px; }
|
||||
.page .combined_table .head abbr.api_key { margin-right: 75px; }
|
||||
|
||||
.page .combined_table .head abbr.pass_key { margin-right: 71px; }
|
||||
.page .combined_table input.pass_key { width: 113px; }
|
||||
|
||||
.page .section_newznab .combined_table .head abbr.api_key { margin-right: 185px; }
|
||||
.page .section_newznab .combined_table input.api_key { width: 223px; }
|
||||
|
||||
.page .combined_table .seed_ratio,
|
||||
.page .combined_table .seed_time {
|
||||
width: 70px;
|
||||
text-align: center;
|
||||
margin-left: 10px;
|
||||
}
|
||||
.page .combined_table .head abbr.api_key {
|
||||
margin-right: 171px;
|
||||
.page .combined_table .seed_time {
|
||||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.page .combined_table .head .extra_score,
|
||||
.page .combined_table .extra_score {
|
||||
width: 70px;
|
||||
@@ -699,4 +718,20 @@
|
||||
.active .group_imdb_automation:not(.disabled) {
|
||||
background: url('../images/imdb_watchlist.png') no-repeat right 50px;
|
||||
min-height: 210px;
|
||||
}
|
||||
}
|
||||
|
||||
.tip-wrap {
|
||||
background: #FFF;
|
||||
color: #000;
|
||||
padding: 10px;
|
||||
width: 300px;
|
||||
z-index: 200;
|
||||
}
|
||||
.more_hint:after {
|
||||
position: relative;
|
||||
font-family: 'Elusive-Icons';
|
||||
content: "\e089";
|
||||
display: inline-block;
|
||||
top: 1px;
|
||||
left: 6px;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: CouchPotato application instance
|
||||
# Required-Start: $all
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
version_info = (2, 0, 2)
|
||||
version_info = (2, 1, 1)
|
||||
version = '.'.join(str(n) for n in version_info[:3])
|
||||
release = version + ''.join(str(n) for n in version_info[3:])
|
||||
release = '.'.join(str(n) for n in version_info)
|
||||
|
||||
@@ -16,22 +16,25 @@ class MaxInstancesReachedError(Exception):
|
||||
class Job(object):
|
||||
"""
|
||||
Encapsulates the actual Job along with its metadata. Job instances
|
||||
are created by the scheduler when adding jobs, and it should not be
|
||||
directly instantiated.
|
||||
are created by the scheduler when adding jobs, and should not be
|
||||
directly instantiated. These options can be set when adding jobs
|
||||
to the scheduler (see :ref:`job_options`).
|
||||
|
||||
:param trigger: trigger that determines the execution times
|
||||
:param func: callable to call when the trigger is triggered
|
||||
:param args: list of positional arguments to call func with
|
||||
:param kwargs: dict of keyword arguments to call func with
|
||||
:param name: name of the job (optional)
|
||||
:param misfire_grace_time: seconds after the designated run time that
|
||||
:var trigger: trigger that determines the execution times
|
||||
:var func: callable to call when the trigger is triggered
|
||||
:var args: list of positional arguments to call func with
|
||||
:var kwargs: dict of keyword arguments to call func with
|
||||
:var name: name of the job
|
||||
:var misfire_grace_time: seconds after the designated run time that
|
||||
the job is still allowed to be run
|
||||
:param coalesce: run once instead of many times if the scheduler determines
|
||||
:var coalesce: run once instead of many times if the scheduler determines
|
||||
that the job should be run more than once in succession
|
||||
:param max_runs: maximum number of times this job is allowed to be
|
||||
:var max_runs: maximum number of times this job is allowed to be
|
||||
triggered
|
||||
:param max_instances: maximum number of concurrently running
|
||||
:var max_instances: maximum number of concurrently running
|
||||
instances allowed for this job
|
||||
:var runs: number of times this job has been triggered
|
||||
:var instances: number of concurrently running instances of this job
|
||||
"""
|
||||
id = None
|
||||
next_run_time = None
|
||||
@@ -130,5 +133,5 @@ class Job(object):
|
||||
return '<Job (name=%s, trigger=%s)>' % (self.name, repr(self.trigger))
|
||||
|
||||
def __str__(self):
|
||||
return '%s (trigger: %s, next run at: %s)' % (self.name,
|
||||
str(self.trigger), str(self.next_run_time))
|
||||
return '%s (trigger: %s, next run at: %s)' % (
|
||||
self.name, str(self.trigger), str(self.next_run_time))
|
||||
|
||||
@@ -8,7 +8,7 @@ from apscheduler.jobstores.base import JobStore
|
||||
class RAMJobStore(JobStore):
|
||||
def __init__(self):
|
||||
self.jobs = []
|
||||
|
||||
|
||||
def add_job(self, job):
|
||||
self.jobs.append(job)
|
||||
|
||||
|
||||
91
libs/apscheduler/jobstores/redis_store.py
Normal file
91
libs/apscheduler/jobstores/redis_store.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""
|
||||
Stores jobs in a Redis database.
|
||||
"""
|
||||
from uuid import uuid4
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
from apscheduler.jobstores.base import JobStore
|
||||
from apscheduler.job import Job
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: nocover
|
||||
import pickle
|
||||
|
||||
try:
|
||||
from redis import StrictRedis
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('RedisJobStore requires redis installed')
|
||||
|
||||
try:
|
||||
long = long
|
||||
except NameError:
|
||||
long = int
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedisJobStore(JobStore):
|
||||
def __init__(self, db=0, key_prefix='jobs.',
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
|
||||
self.jobs = []
|
||||
self.pickle_protocol = pickle_protocol
|
||||
self.key_prefix = key_prefix
|
||||
|
||||
if db is None:
|
||||
raise ValueError('The "db" parameter must not be empty')
|
||||
if not key_prefix:
|
||||
raise ValueError('The "key_prefix" parameter must not be empty')
|
||||
|
||||
self.redis = StrictRedis(db=db, **connect_args)
|
||||
|
||||
def add_job(self, job):
|
||||
job.id = str(uuid4())
|
||||
job_state = job.__getstate__()
|
||||
job_dict = {
|
||||
'job_state': pickle.dumps(job_state, self.pickle_protocol),
|
||||
'runs': '0',
|
||||
'next_run_time': job_state.pop('next_run_time').isoformat()}
|
||||
self.redis.hmset(self.key_prefix + job.id, job_dict)
|
||||
self.jobs.append(job)
|
||||
|
||||
def remove_job(self, job):
|
||||
self.redis.delete(self.key_prefix + job.id)
|
||||
self.jobs.remove(job)
|
||||
|
||||
def load_jobs(self):
|
||||
jobs = []
|
||||
keys = self.redis.keys(self.key_prefix + '*')
|
||||
pipeline = self.redis.pipeline()
|
||||
for key in keys:
|
||||
pipeline.hgetall(key)
|
||||
results = pipeline.execute()
|
||||
|
||||
for job_dict in results:
|
||||
job_state = {}
|
||||
try:
|
||||
job = Job.__new__(Job)
|
||||
job_state = pickle.loads(job_dict['job_state'.encode()])
|
||||
job_state['runs'] = long(job_dict['runs'.encode()])
|
||||
dateval = job_dict['next_run_time'.encode()].decode()
|
||||
job_state['next_run_time'] = datetime.strptime(
|
||||
dateval, '%Y-%m-%dT%H:%M:%S')
|
||||
job.__setstate__(job_state)
|
||||
jobs.append(job)
|
||||
except Exception:
|
||||
job_name = job_state.get('name', '(unknown)')
|
||||
logger.exception('Unable to restore job "%s"', job_name)
|
||||
self.jobs = jobs
|
||||
|
||||
def update_job(self, job):
|
||||
attrs = {
|
||||
'next_run_time': job.next_run_time.isoformat(),
|
||||
'runs': job.runs}
|
||||
self.redis.hmset(self.key_prefix + job.id, attrs)
|
||||
|
||||
def close(self):
|
||||
self.redis.connection_pool.disconnect()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s>' % self.__class__.__name__
|
||||
@@ -32,17 +32,20 @@ class ShelveJobStore(JobStore):
|
||||
|
||||
def add_job(self, job):
|
||||
job.id = self._generate_id()
|
||||
self.jobs.append(job)
|
||||
self.store[job.id] = job.__getstate__()
|
||||
self.store.sync()
|
||||
self.jobs.append(job)
|
||||
|
||||
def update_job(self, job):
|
||||
job_dict = self.store[job.id]
|
||||
job_dict['next_run_time'] = job.next_run_time
|
||||
job_dict['runs'] = job.runs
|
||||
self.store[job.id] = job_dict
|
||||
self.store.sync()
|
||||
|
||||
def remove_job(self, job):
|
||||
del self.store[job.id]
|
||||
self.store.sync()
|
||||
self.jobs.remove(job)
|
||||
|
||||
def load_jobs(self):
|
||||
|
||||
@@ -4,6 +4,8 @@ Stores jobs in a database table using SQLAlchemy.
|
||||
import pickle
|
||||
import logging
|
||||
|
||||
import sqlalchemy
|
||||
|
||||
from apscheduler.jobstores.base import JobStore
|
||||
from apscheduler.job import Job
|
||||
|
||||
@@ -28,17 +30,19 @@ class SQLAlchemyJobStore(JobStore):
|
||||
else:
|
||||
raise ValueError('Need either "engine" or "url" defined')
|
||||
|
||||
self.jobs_t = Table(tablename, metadata or MetaData(),
|
||||
if sqlalchemy.__version__ < '0.7':
|
||||
pickle_coltype = PickleType(pickle_protocol, mutable=False)
|
||||
else:
|
||||
pickle_coltype = PickleType(pickle_protocol)
|
||||
self.jobs_t = Table(
|
||||
tablename, metadata or MetaData(),
|
||||
Column('id', Integer,
|
||||
Sequence(tablename + '_id_seq', optional=True),
|
||||
primary_key=True),
|
||||
Column('trigger', PickleType(pickle_protocol, mutable=False),
|
||||
nullable=False),
|
||||
Column('trigger', pickle_coltype, nullable=False),
|
||||
Column('func_ref', String(1024), nullable=False),
|
||||
Column('args', PickleType(pickle_protocol, mutable=False),
|
||||
nullable=False),
|
||||
Column('kwargs', PickleType(pickle_protocol, mutable=False),
|
||||
nullable=False),
|
||||
Column('args', pickle_coltype, nullable=False),
|
||||
Column('kwargs', pickle_coltype, nullable=False),
|
||||
Column('name', Unicode(1024)),
|
||||
Column('misfire_grace_time', Integer, nullable=False),
|
||||
Column('coalesce', Boolean, nullable=False),
|
||||
|
||||
@@ -35,7 +35,7 @@ class Scheduler(object):
|
||||
their execution.
|
||||
"""
|
||||
|
||||
_stopped = False
|
||||
_stopped = True
|
||||
_thread = None
|
||||
|
||||
def __init__(self, gconfig={}, **options):
|
||||
@@ -60,6 +60,7 @@ class Scheduler(object):
|
||||
self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
|
||||
self.coalesce = asbool(config.pop('coalesce', True))
|
||||
self.daemonic = asbool(config.pop('daemonic', True))
|
||||
self.standalone = asbool(config.pop('standalone', False))
|
||||
|
||||
# Configure the thread pool
|
||||
if 'threadpool' in config:
|
||||
@@ -85,6 +86,12 @@ class Scheduler(object):
|
||||
def start(self):
|
||||
"""
|
||||
Starts the scheduler in a new thread.
|
||||
|
||||
In threaded mode (the default), this method will return immediately
|
||||
after starting the scheduler thread.
|
||||
|
||||
In standalone mode, this method will block until there are no more
|
||||
scheduled jobs.
|
||||
"""
|
||||
if self.running:
|
||||
raise SchedulerAlreadyRunningError
|
||||
@@ -99,11 +106,15 @@ class Scheduler(object):
|
||||
del self._pending_jobs[:]
|
||||
|
||||
self._stopped = False
|
||||
self._thread = Thread(target=self._main_loop, name='APScheduler')
|
||||
self._thread.setDaemon(self.daemonic)
|
||||
self._thread.start()
|
||||
if self.standalone:
|
||||
self._main_loop()
|
||||
else:
|
||||
self._thread = Thread(target=self._main_loop, name='APScheduler')
|
||||
self._thread.setDaemon(self.daemonic)
|
||||
self._thread.start()
|
||||
|
||||
def shutdown(self, wait=True, shutdown_threadpool=True):
|
||||
def shutdown(self, wait=True, shutdown_threadpool=True,
|
||||
close_jobstores=True):
|
||||
"""
|
||||
Shuts down the scheduler and terminates the thread.
|
||||
Does not interrupt any currently running jobs.
|
||||
@@ -111,6 +122,7 @@ class Scheduler(object):
|
||||
:param wait: ``True`` to wait until all currently executing jobs have
|
||||
finished (if ``shutdown_threadpool`` is also ``True``)
|
||||
:param shutdown_threadpool: ``True`` to shut down the thread pool
|
||||
:param close_jobstores: ``True`` to close all job stores after shutdown
|
||||
"""
|
||||
if not self.running:
|
||||
return
|
||||
@@ -123,11 +135,19 @@ class Scheduler(object):
|
||||
self._threadpool.shutdown(wait)
|
||||
|
||||
# Wait until the scheduler thread terminates
|
||||
self._thread.join()
|
||||
if self._thread:
|
||||
self._thread.join()
|
||||
|
||||
# Close all job stores
|
||||
if close_jobstores:
|
||||
for jobstore in itervalues(self._jobstores):
|
||||
jobstore.close()
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
return not self._stopped and self._thread and self._thread.isAlive()
|
||||
thread_alive = self._thread and self._thread.isAlive()
|
||||
standalone = getattr(self, 'standalone', False)
|
||||
return not self._stopped and (standalone or thread_alive)
|
||||
|
||||
def add_jobstore(self, jobstore, alias, quiet=False):
|
||||
"""
|
||||
@@ -156,21 +176,25 @@ class Scheduler(object):
|
||||
if not quiet:
|
||||
self._wakeup.set()
|
||||
|
||||
def remove_jobstore(self, alias):
|
||||
def remove_jobstore(self, alias, close=True):
|
||||
"""
|
||||
Removes the job store by the given alias from this scheduler.
|
||||
|
||||
:param close: ``True`` to close the job store after removing it
|
||||
:type alias: str
|
||||
"""
|
||||
self._jobstores_lock.acquire()
|
||||
try:
|
||||
try:
|
||||
del self._jobstores[alias]
|
||||
except KeyError:
|
||||
jobstore = self._jobstores.pop(alias)
|
||||
if not jobstore:
|
||||
raise KeyError('No such job store: %s' % alias)
|
||||
finally:
|
||||
self._jobstores_lock.release()
|
||||
|
||||
# Close the job store if requested
|
||||
if close:
|
||||
jobstore.close()
|
||||
|
||||
# Notify listeners that a job store has been removed
|
||||
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias))
|
||||
|
||||
@@ -245,8 +269,10 @@ class Scheduler(object):
|
||||
**options):
|
||||
"""
|
||||
Adds the given job to the job list and notifies the scheduler thread.
|
||||
Any extra keyword arguments are passed along to the constructor of the
|
||||
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
|
||||
|
||||
:param trigger: alias of the job store to store the job in
|
||||
:param trigger: trigger that determines when ``func`` is called
|
||||
:param func: callable to run at the given time
|
||||
:param args: list of positional arguments to call func with
|
||||
:param kwargs: dict of keyword arguments to call func with
|
||||
@@ -276,6 +302,8 @@ class Scheduler(object):
|
||||
def add_date_job(self, func, date, args=None, kwargs=None, **options):
|
||||
"""
|
||||
Schedules a job to be completed on a specific date and time.
|
||||
Any extra keyword arguments are passed along to the constructor of the
|
||||
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
|
||||
|
||||
:param func: callable to run at the given time
|
||||
:param date: the date/time to run the job at
|
||||
@@ -294,6 +322,8 @@ class Scheduler(object):
|
||||
**options):
|
||||
"""
|
||||
Schedules a job to be completed on specified intervals.
|
||||
Any extra keyword arguments are passed along to the constructor of the
|
||||
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
|
||||
|
||||
:param func: callable to run
|
||||
:param weeks: number of weeks to wait
|
||||
@@ -322,6 +352,8 @@ class Scheduler(object):
|
||||
"""
|
||||
Schedules a job to be completed on times that match the given
|
||||
expressions.
|
||||
Any extra keyword arguments are passed along to the constructor of the
|
||||
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
|
||||
|
||||
:param func: callable to run
|
||||
:param year: year to run on
|
||||
@@ -352,6 +384,8 @@ class Scheduler(object):
|
||||
This decorator does not wrap its host function.
|
||||
Unscheduling decorated functions is possible by passing the ``job``
|
||||
attribute of the scheduled function to :meth:`unschedule_job`.
|
||||
Any extra keyword arguments are passed along to the constructor of the
|
||||
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
|
||||
"""
|
||||
def inner(func):
|
||||
func.job = self.add_cron_job(func, **options)
|
||||
@@ -364,6 +398,8 @@ class Scheduler(object):
|
||||
This decorator does not wrap its host function.
|
||||
Unscheduling decorated functions is possible by passing the ``job``
|
||||
attribute of the scheduled function to :meth:`unschedule_job`.
|
||||
Any extra keyword arguments are passed along to the constructor of the
|
||||
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
|
||||
"""
|
||||
def inner(func):
|
||||
func.job = self.add_interval_job(func, **options)
|
||||
@@ -517,7 +553,8 @@ class Scheduler(object):
|
||||
job.runs += len(run_times)
|
||||
|
||||
# Update the job, but don't keep finished jobs around
|
||||
if job.compute_next_run_time(now + timedelta(microseconds=1)):
|
||||
if job.compute_next_run_time(
|
||||
now + timedelta(microseconds=1)):
|
||||
jobstore.update_job(job)
|
||||
else:
|
||||
self._remove_job(job, alias, jobstore)
|
||||
@@ -550,10 +587,15 @@ class Scheduler(object):
|
||||
logger.debug('Next wakeup is due at %s (in %f seconds)',
|
||||
next_wakeup_time, wait_seconds)
|
||||
self._wakeup.wait(wait_seconds)
|
||||
self._wakeup.clear()
|
||||
elif self.standalone:
|
||||
logger.debug('No jobs left; shutting down scheduler')
|
||||
self.shutdown()
|
||||
break
|
||||
else:
|
||||
logger.debug('No jobs; waiting until a job is added')
|
||||
self._wakeup.wait()
|
||||
self._wakeup.clear()
|
||||
self._wakeup.clear()
|
||||
|
||||
logger.info('Scheduler has been shut down')
|
||||
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
|
||||
|
||||
@@ -21,8 +21,10 @@ class CronTrigger(object):
|
||||
if self.start_date:
|
||||
self.start_date = convert_to_datetime(self.start_date)
|
||||
|
||||
# Yank out all None valued fields
|
||||
# Check field names and yank out all None valued fields
|
||||
for key, value in list(iteritems(values)):
|
||||
if key not in self.FIELD_NAMES:
|
||||
raise TypeError('Invalid field name: %s' % key)
|
||||
if value is None:
|
||||
del values[key]
|
||||
|
||||
@@ -111,17 +113,17 @@ class CronTrigger(object):
|
||||
|
||||
if next_value is None:
|
||||
# No valid value was found
|
||||
next_date, fieldnum = self._increment_field_value(next_date,
|
||||
fieldnum - 1)
|
||||
next_date, fieldnum = self._increment_field_value(
|
||||
next_date, fieldnum - 1)
|
||||
elif next_value > curr_value:
|
||||
# A valid, but higher than the starting value, was found
|
||||
if field.REAL:
|
||||
next_date = self._set_field_value(next_date, fieldnum,
|
||||
next_value)
|
||||
next_date = self._set_field_value(
|
||||
next_date, fieldnum, next_value)
|
||||
fieldnum += 1
|
||||
else:
|
||||
next_date, fieldnum = self._increment_field_value(next_date,
|
||||
fieldnum)
|
||||
next_date, fieldnum = self._increment_field_value(
|
||||
next_date, fieldnum)
|
||||
else:
|
||||
# A valid value was found, no changes necessary
|
||||
fieldnum += 1
|
||||
|
||||
@@ -8,7 +8,7 @@ import re
|
||||
from apscheduler.util import asint
|
||||
|
||||
__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression',
|
||||
'WeekdayPositionExpression')
|
||||
'WeekdayPositionExpression', 'LastDayOfMonthExpression')
|
||||
|
||||
|
||||
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
|
||||
@@ -176,3 +176,19 @@ class WeekdayPositionExpression(AllExpression):
|
||||
return "%s('%s', '%s')" % (self.__class__.__name__,
|
||||
self.options[self.option_num],
|
||||
WEEKDAYS[self.weekday])
|
||||
|
||||
|
||||
class LastDayOfMonthExpression(AllExpression):
|
||||
value_re = re.compile(r'last', re.IGNORECASE)
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
return monthrange(date.year, date.month)[1]
|
||||
|
||||
def __str__(self):
|
||||
return 'last'
|
||||
|
||||
def __repr__(self):
|
||||
return "%s()" % self.__class__.__name__
|
||||
|
||||
@@ -85,7 +85,8 @@ class WeekField(BaseField):
|
||||
|
||||
|
||||
class DayOfMonthField(BaseField):
|
||||
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression]
|
||||
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression,
|
||||
LastDayOfMonthExpression]
|
||||
|
||||
def get_max(self, dateval):
|
||||
return monthrange(dateval.year, dateval.month)[1]
|
||||
|
||||
@@ -6,7 +6,6 @@ from datetime import date, datetime, timedelta
|
||||
from time import mktime
|
||||
import re
|
||||
import sys
|
||||
from types import MethodType
|
||||
|
||||
__all__ = ('asint', 'asbool', 'convert_to_datetime', 'timedelta_seconds',
|
||||
'time_difference', 'datetime_ceil', 'combine_opts',
|
||||
@@ -64,7 +63,7 @@ def convert_to_datetime(input):
|
||||
return input
|
||||
elif isinstance(input, date):
|
||||
return datetime.fromordinal(input.toordinal())
|
||||
elif isinstance(input, str):
|
||||
elif isinstance(input, basestring):
|
||||
m = _DATE_REGEX.match(input)
|
||||
if not m:
|
||||
raise ValueError('Invalid date string')
|
||||
@@ -109,7 +108,7 @@ def datetime_ceil(dateval):
|
||||
"""
|
||||
if dateval.microsecond > 0:
|
||||
return dateval + timedelta(seconds=1,
|
||||
microseconds= -dateval.microsecond)
|
||||
microseconds=-dateval.microsecond)
|
||||
return dateval
|
||||
|
||||
|
||||
@@ -143,7 +142,8 @@ def get_callable_name(func):
|
||||
if f_self and hasattr(func, '__name__'):
|
||||
if isinstance(f_self, type):
|
||||
# class method
|
||||
return '%s.%s' % (f_self.__name__, func.__name__)
|
||||
clsname = getattr(f_self, '__qualname__', None) or f_self.__name__
|
||||
return '%s.%s' % (clsname, func.__name__)
|
||||
# bound method
|
||||
return '%s.%s' % (f_self.__class__.__name__, func.__name__)
|
||||
|
||||
@@ -169,7 +169,7 @@ def obj_to_ref(obj):
|
||||
raise ValueError
|
||||
except Exception:
|
||||
raise ValueError('Cannot determine the reference to %s' % repr(obj))
|
||||
|
||||
|
||||
return ref
|
||||
|
||||
|
||||
|
||||
3
libs/backports/__init__.py
Normal file
3
libs/backports/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# This is a Python "namespace package" http://www.python.org/dev/peps/pep-0382/
|
||||
from pkgutil import extend_path
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
42
libs/backports/ssl_match_hostname/README.txt
Normal file
42
libs/backports/ssl_match_hostname/README.txt
Normal file
@@ -0,0 +1,42 @@
|
||||
|
||||
The ssl.match_hostname() function from Python 3.2
|
||||
=================================================
|
||||
|
||||
The Secure Sockets layer is only actually *secure*
|
||||
if you check the hostname in the certificate returned
|
||||
by the server to which you are connecting,
|
||||
and verify that it matches to hostname
|
||||
that you are trying to reach.
|
||||
|
||||
But the matching logic, defined in `RFC2818`_,
|
||||
can be a bit tricky to implement on your own.
|
||||
So the ``ssl`` package in the Standard Library of Python 3.2
|
||||
now includes a ``match_hostname()`` function
|
||||
for performing this check instead of requiring every application
|
||||
to implement the check separately.
|
||||
|
||||
This backport brings ``match_hostname()`` to users
|
||||
of earlier versions of Python.
|
||||
Simply make this distribution a dependency of your package,
|
||||
and then use it like this::
|
||||
|
||||
from backports.ssl_match_hostname import match_hostname, CertificateError
|
||||
...
|
||||
sslsock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_SSLv3,
|
||||
cert_reqs=ssl.CERT_REQUIRED, ca_certs=...)
|
||||
try:
|
||||
match_hostname(sslsock.getpeercert(), hostname)
|
||||
except CertificateError, ce:
|
||||
...
|
||||
|
||||
Note that the ``ssl`` module is only included in the Standard Library
|
||||
for Python 2.6 and later;
|
||||
users of Python 2.5 or earlier versions
|
||||
will also need to install the ``ssl`` distribution
|
||||
from the Python Package Index to use code like that shown above.
|
||||
|
||||
Brandon Craig Rhodes is merely the packager of this distribution;
|
||||
the actual code inside comes verbatim from Python 3.2.
|
||||
|
||||
.. _RFC2818: http://tools.ietf.org/html/rfc2818.html
|
||||
|
||||
60
libs/backports/ssl_match_hostname/__init__.py
Normal file
60
libs/backports/ssl_match_hostname/__init__.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""The match_hostname() function from Python 3.2, essential when using SSL."""
|
||||
|
||||
import re
|
||||
|
||||
__version__ = '3.2a3'
|
||||
|
||||
class CertificateError(ValueError):
|
||||
pass
|
||||
|
||||
def _dnsname_to_pat(dn):
|
||||
pats = []
|
||||
for frag in dn.split(r'.'):
|
||||
if frag == '*':
|
||||
# When '*' is a fragment by itself, it matches a non-empty dotless
|
||||
# fragment.
|
||||
pats.append('[^.]+')
|
||||
else:
|
||||
# Otherwise, '*' matches any dotless fragment.
|
||||
frag = re.escape(frag)
|
||||
pats.append(frag.replace(r'\*', '[^.]*'))
|
||||
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
|
||||
|
||||
def match_hostname(cert, hostname):
|
||||
"""Verify that *cert* (in decoded format as returned by
|
||||
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
|
||||
are mostly followed, but IP addresses are not accepted for *hostname*.
|
||||
|
||||
CertificateError is raised on failure. On success, the function
|
||||
returns nothing.
|
||||
"""
|
||||
if not cert:
|
||||
raise ValueError("empty or no certificate")
|
||||
dnsnames = []
|
||||
san = cert.get('subjectAltName', ())
|
||||
for key, value in san:
|
||||
if key == 'DNS':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if not san:
|
||||
# The subject is only checked when subjectAltName is empty
|
||||
for sub in cert.get('subject', ()):
|
||||
for key, value in sub:
|
||||
# XXX according to RFC 2818, the most specific Common Name
|
||||
# must be used.
|
||||
if key == 'commonName':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if len(dnsnames) > 1:
|
||||
raise CertificateError("hostname %r "
|
||||
"doesn't match either of %s"
|
||||
% (hostname, ', '.join(map(repr, dnsnames))))
|
||||
elif len(dnsnames) == 1:
|
||||
raise CertificateError("hostname %r "
|
||||
"doesn't match %r"
|
||||
% (hostname, dnsnames[0]))
|
||||
else:
|
||||
raise CertificateError("no appropriate commonName or "
|
||||
"subjectAltName fields were found")
|
||||
@@ -1,143 +0,0 @@
|
||||
BitTorrent Open Source License
|
||||
|
||||
Version 1.1
|
||||
|
||||
This BitTorrent Open Source License (the "License") applies to the BitTorrent client and related software products as well as any updates or maintenance releases of that software ("BitTorrent Products") that are distributed by BitTorrent, Inc. ("Licensor"). Any BitTorrent Product licensed pursuant to this License is a Licensed Product. Licensed Product, in its entirety, is protected by U.S. copyright law. This License identifies the terms under which you may use, copy, distribute or modify Licensed Product.
|
||||
|
||||
Preamble
|
||||
|
||||
This Preamble is intended to describe, in plain English, the nature and scope of this License. However, this Preamble is not a part of this license. The legal effect of this License is dependent only upon the terms of the License and not this Preamble.
|
||||
|
||||
This License complies with the Open Source Definition and is derived from the Jabber Open Source License 1.0 (the "JOSL"), which has been approved by Open Source Initiative. Sections 4(c) and 4(f)(iii) from the JOSL have been deleted.
|
||||
|
||||
This License provides that:
|
||||
|
||||
1. You may use or give away the Licensed Product, alone or as a component of an aggregate software distribution containing programs from several different sources. No royalty or other fee is required.
|
||||
|
||||
2. Both Source Code and executable versions of the Licensed Product, including Modifications made by previous Contributors, are available for your use. (The terms "Licensed Product," "Modifications," "Contributors" and "Source Code" are defined in the License.)
|
||||
|
||||
3. You are allowed to make Modifications to the Licensed Product, and you can create Derivative Works from it. (The term "Derivative Works" is defined in the License.)
|
||||
|
||||
4. By accepting the Licensed Product under the provisions of this License, you agree that any Modifications you make to the Licensed Product and then distribute are governed by the provisions of this License. In particular, you must make the Source Code of your Modifications available to others free of charge and without a royalty.
|
||||
|
||||
5. You may sell, accept donations or otherwise receive compensation for executable versions of a Licensed Product, without paying a royalty or other fee to the Licensor or any Contributor, provided that such executable versions contain your or another Contributor?s material Modifications. For the avoidance of doubt, to the extent your executable version of a Licensed Product does not contain your or another Contributor?s material Modifications, you may not sell, accept donations or otherwise receive compensation for such executable.
|
||||
|
||||
You may use the Licensed Product for any purpose, but the Licensor is not providing you any warranty whatsoever, nor is the Licensor accepting any liability in the event that the Licensed Product doesn't work properly or causes you any injury or damages.
|
||||
|
||||
6. If you sublicense the Licensed Product or Derivative Works, you may charge fees for warranty or support, or for accepting indemnity or liability obligations to your customers. You cannot charge for, sell, accept donations or otherwise receive compensation for the Source Code.
|
||||
|
||||
7. If you assert any patent claims against the Licensor relating to the Licensed Product, or if you breach any terms of the License, your rights to the Licensed Product under this License automatically terminate.
|
||||
|
||||
You may use this License to distribute your own Derivative Works, in which case the provisions of this License will apply to your Derivative Works just as they do to the original Licensed Product.
|
||||
|
||||
Alternatively, you may distribute your Derivative Works under any other OSI-approved Open Source license, or under a proprietary license of your choice. If you use any license other than this License, however, you must continue to fulfill the requirements of this License (including the provisions relating to publishing the Source Code) for those portions of your Derivative Works that consist of the Licensed Product, including the files containing Modifications.
|
||||
|
||||
New versions of this License may be published from time to time in connection with new versions of a Licensed Product or otherwise. You may choose to continue to use the license terms in this version of the License for the Licensed Product that was originally licensed hereunder, however, the new versions of this License will at all times apply to new versions of the Licensed Product released by Licensor after the release of the new version of this License. Only the Licensor has the right to change the License terms as they apply to the Licensed Product.
|
||||
|
||||
This License relies on precise definitions for certain terms. Those terms are defined when they are first used, and the definitions are repeated for your convenience in a Glossary at the end of the License.
|
||||
|
||||
License Terms
|
||||
|
||||
1. Grant of License From Licensor. Subject to the terms and conditions of this License, Licensor hereby grants you a world-wide, royalty-free, non-exclusive license, subject to third party intellectual property claims, to do the following:
|
||||
|
||||
a. Use, reproduce, modify, display, perform, sublicense and distribute any Modifications created by a Contributor or portions thereof, in both Source Code or as an executable program, either on an unmodified basis or as part of Derivative Works.
|
||||
|
||||
b. Under claims of patents now or hereafter owned or controlled by Contributor, to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof, but solely to the extent that any such claim is necessary to enable you to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof or Derivative Works thereof.
|
||||
|
||||
2. Grant of License to Modifications From Contributor. "Modifications" means any additions to or deletions from the substance or structure of (i) a file containing a Licensed Product, or (ii) any new file that contains any part of a Licensed Product. Hereinafter in this License, the term "Licensed Product" shall include all previous Modifications that you receive from any Contributor. Subject to the terms and conditions of this License, By application of the provisions in Section 4(a) below, each person or entity who created or contributed to the creation of, and distributed, a Modification (a "Contributor") hereby grants you a world-wide, royalty-free, non-exclusive license, subject to third party intellectual property claims, to do the following:
|
||||
|
||||
a. Use, reproduce, modify, display, perform, sublicense and distribute any Modifications created by such Contributor or portions thereof, in both Source Code or as an executable program, either on an unmodified basis or as part of Derivative Works.
|
||||
|
||||
b. Under claims of patents now or hereafter owned or controlled by Contributor, to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof, but solely to the extent that any such claim is necessary to enable you to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof or Derivative Works thereof.
|
||||
|
||||
3. Exclusions From License Grant. Nothing in this License shall be deemed to grant any rights to trademarks, copyrights, patents, trade secrets or any other intellectual property of Licensor or any Contributor except as expressly stated herein. No patent license is granted separate from the Licensed Product, for code that you delete from the Licensed Product, or for combinations of the Licensed Product with other software or hardware. No right is granted to the trademarks of Licensor or any Contributor even if such marks are included in the Licensed Product. Nothing in this License shall be interpreted to prohibit Licensor from licensing under different terms from this License any code that Licensor otherwise would have a right to license. As an express condition for your use of the Licensed Product, you hereby agree that you will not, without the prior written consent of Licensor, use any trademarks, copyrights, patents, trade secrets or any other intellectual property of Licensor or any Contributor except as expressly stated herein. For the avoidance of doubt and without limiting the foregoing, you hereby agree that you will not use or display any trademark of Licensor or any Contributor in any domain name, directory filepath, advertisement, link or other reference to you in any manner or in any media.
|
||||
|
||||
4. Your Obligations Regarding Distribution.
|
||||
|
||||
a. Application of This License to Your Modifications. As an express condition for your use of the Licensed Product, you hereby agree that any Modifications that you create or to which you contribute, and which you distribute, are governed by the terms of this License including, without limitation, Section 2. Any Modifications that you create or to which you contribute may be distributed only under the terms of this License or a future version of this License released under Section 7. You must include a copy of this License with every copy of the Modifications you distribute. You agree not to offer or impose any terms on any Source Code or executable version of the Licensed Product or Modifications that alter or restrict the applicable version of this License or the recipients' rights hereunder. However, you may include an additional document offering the additional rights described in Section 4(d).
|
||||
|
||||
b. Availability of Source Code. You must make available, without charge, under the terms of this License, the Source Code of the Licensed Product and any Modifications that you distribute, either on the same media as you distribute any executable or other form of the Licensed Product, or via a mechanism generally accepted in the software development community for the electronic transfer of data (an "Electronic Distribution Mechanism"). The Source Code for any version of Licensed Product or Modifications that you distribute must remain available for as long as any executable or other form of the Licensed Product is distributed by you. You are responsible for ensuring that the Source Code version remains available even if the Electronic Distribution Mechanism is maintained by a third party.
|
||||
|
||||
c. Intellectual Property Matters.
|
||||
|
||||
i. Third Party Claims. If you have knowledge that a license to a third party's intellectual property right is required to exercise the rights granted by this License, you must include a text file with the Source Code distribution titled "LEGAL" that describes the claim and the party making the claim in sufficient detail that a recipient will know whom to contact. If you obtain such knowledge after you make any Modifications available as described in Section 4(b), you shall promptly modify the LEGAL file in all copies you make available thereafter and shall take other steps (such as notifying appropriate mailing lists or newsgroups) reasonably calculated to inform those who received the Licensed Product from you that new knowledge has been obtained.
|
||||
|
||||
ii. Contributor APIs. If your Modifications include an application programming interface ("API") and you have knowledge of patent licenses that are reasonably necessary to implement that API, you must also include this information in the LEGAL file.
|
||||
|
||||
iii. Representations. You represent that, except as disclosed pursuant to 4(c)(i) above, you believe that any Modifications you distribute are your original creations and that you have sufficient rights to grant the rights conveyed by this License.
|
||||
|
||||
d. Required Notices. You must duplicate this License in any documentation you provide along with the Source Code of any Modifications you create or to which you contribute, and which you distribute, wherever you describe recipients' rights relating to Licensed Product. You must duplicate the notice contained in Exhibit A (the "Notice") in each file of the Source Code of any copy you distribute of the Licensed Product. If you created a Modification, you may add your name as a Contributor to the Notice. If it is not possible to put the Notice in a particular Source Code file due to its structure, then you must include such Notice in a location (such as a relevant directory file) where a user would be likely to look for such a notice. You may choose to offer, and charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Licensed Product. However, you may do so only on your own behalf, and not on behalf of the Licensor or any Contributor. You must make it clear that any such warranty, support, indemnity or liability obligation is offered by you alone, and you hereby agree to indemnify the Licensor and every Contributor for any liability incurred by the Licensor or such Contributor as a result of warranty, support, indemnity or liability terms you offer.
|
||||
|
||||
e. Distribution of Executable Versions. You may distribute Licensed Product as an executable program under a license of your choice that may contain terms different from this License provided (i) you have satisfied the requirements of Sections 4(a) through 4(d) for that distribution, (ii) you include a conspicuous notice in the executable version, related documentation and collateral materials stating that the Source Code version of the
|
||||
Licensed Product is available under the terms of this License, including a description of how and where you have fulfilled the obligations of Section 4(b), and (iii) you make it clear that any terms that differ from this License are offered by you alone, not by Licensor or any Contributor. You hereby agree to indemnify the Licensor and every Contributor for any liability incurred by Licensor or such Contributor as a result of any terms you offer.
|
||||
|
||||
f. Distribution of Derivative Works. You may create Derivative Works (e.g., combinations of some or all of the Licensed Product with other code) and distribute the Derivative Works as products under any other license you select, with the proviso that the requirements of this License are fulfilled for those portions of the Derivative Works that consist of the Licensed Product or any Modifications thereto.
|
||||
|
||||
g. Compensation for Distribution of Executable Versions of Licensed Products, Modifications or Derivative Works. Notwithstanding any provision of this License to the contrary, by distributing, selling, licensing, sublicensing or otherwise making available any Licensed Product, or Modification or Derivative Work thereof, you and Licensor hereby acknowledge and agree that you may sell, license or sublicense for a fee, accept donations or otherwise receive compensation for executable versions of a Licensed Product, without paying a royalty or other fee to the Licensor or any other Contributor, provided that such executable versions (i) contain your or another Contributor?s material Modifications, or (ii) are otherwise material Derivative Works. For purposes of this License, an executable version of the Licensed Product will be deemed to contain a material Modification, or will otherwise be deemed a material Derivative Work, if (a) the Licensed Product is modified with your own or a third party?s software programs or other code, and/or the Licensed Product is combined with a number of your own or a third party?s software programs or code, respectively, and (b) such software programs or code add or contribute material value, functionality or features to the License Product. For the avoidance of doubt, to the extent your executable version of a Licensed Product does not contain your or another Contributor?s material Modifications or is otherwise not a material Derivative Work, in each case as contemplated herein, you may not sell, license or sublicense for a fee, accept donations or otherwise receive compensation for such executable. Additionally, without limitation of the foregoing and notwithstanding any provision of this License to the contrary, you cannot charge for, sell, license or sublicense for a fee, accept donations or otherwise receive compensation for the Source Code.
|
||||
|
||||
5. Inability to Comply Due to Statute or Regulation. If it is impossible for you to comply with any of the terms of this License with respect to some or all of the Licensed Product due to statute, judicial order, or regulation, then you must (i) comply with the terms of this License to the maximum extent possible, (ii) cite the statute or regulation that prohibits you from adhering to the License, and (iii) describe the limitations and the code they affect. Such description must be included in the LEGAL file described in Section 4(d), and must be included with all distributions of the Source Code. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill at computer programming to be able to understand it.
|
||||
|
||||
6. Application of This License. This License applies to code to which Licensor or Contributor has attached the Notice in Exhibit A, which is incorporated herein by this reference.
|
||||
|
||||
7. Versions of This License.
|
||||
|
||||
a. New Versions. Licensor may publish from time to time revised and/or new versions of the License.
|
||||
|
||||
b. Effect of New Versions. Once Licensed Product has been published under a particular version of the License, you may always continue to use it under the terms of that version, provided that any such license be in full force and effect at the time, and has not been revoked or otherwise terminated. You may also choose to use such Licensed Product under the terms of any subsequent version (but not any prior version) of the License published by Licensor. No one other than Licensor has the right to modify the terms applicable to Licensed Product created under this License.
|
||||
|
||||
c. Derivative Works of this License. If you create or use a modified version of this License, which you may do only in order to apply it to software that is not already a Licensed Product under this License, you must rename your license so that it is not confusingly similar to this License, and must make it clear that your license contains terms that differ from this License. In so naming your license, you may not use any trademark of Licensor or any Contributor.
|
||||
|
||||
8. Disclaimer of Warranty. LICENSED PRODUCT IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE LICENSED PRODUCT IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LICENSED PRODUCT IS WITH YOU. SHOULD LICENSED PRODUCT PROVE DEFECTIVE IN ANY RESPECT, YOU (AND NOT THE LICENSOR OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS
|
||||
DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF LICENSED PRODUCT IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
|
||||
|
||||
9. Termination.
|
||||
|
||||
a. Automatic Termination Upon Breach. This license and the rights granted hereunder will terminate automatically if you fail to comply with the terms herein and fail to cure such breach within ten (10) days of being notified of the breach by the Licensor. For purposes of this provision, proof of delivery via email to the address listed in the ?WHOIS? database of the registrar for any website through which you distribute or market any Licensed Product, or to any alternate email address which you designate in writing to the Licensor, shall constitute sufficient notification. All sublicenses to the Licensed Product that are properly granted shall survive any termination of this license so long as they continue to complye with the terms of this License. Provisions that, by their nature, must remain in effect beyond the termination of this License, shall survive.
|
||||
|
||||
b. Termination Upon Assertion of Patent Infringement. If you initiate litigation by asserting a patent infringement claim (excluding declaratory judgment actions) against Licensor or a Contributor (Licensor or Contributor against whom you file such an action is referred to herein as Respondent) alleging that Licensed Product directly or indirectly infringes any patent, then any and all rights granted by such Respondent to you under Sections 1 or 2 of this License shall terminate prospectively upon sixty (60) days notice from Respondent (the "Notice Period") unless within that Notice Period you either agree in writing (i) to pay Respondent a mutually agreeable reasonably royalty for your past or future use of Licensed Product made by such Respondent, or (ii) withdraw your litigation claim with respect to Licensed Product against such Respondent. If within said Notice Period a reasonable royalty and payment arrangement are not mutually agreed upon in writing by the parties or the litigation claim is not withdrawn, the rights granted by Licensor to you under Sections 1 and 2 automatically terminate at the expiration of said Notice Period.
|
||||
|
||||
c. Reasonable Value of This License. If you assert a patent infringement claim against Respondent alleging that Licensed Product directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by said Respondent under Sections 1 and 2 shall be taken into account in determining the amount or value of any payment or license.
|
||||
|
||||
d. No Retroactive Effect of Termination. In the event of termination under Sections 9(a) or 9(b) above, all end user license agreements (excluding licenses to distributors and resellers) that have been validly granted by you or any distributor hereunder prior to termination shall survive termination.
|
||||
|
||||
10. Limitation of Liability. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL THE LICENSOR, ANY CONTRIBUTOR, OR ANY DISTRIBUTOR OF LICENSED PRODUCT, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
|
||||
|
||||
11. Responsibility for Claims. As between Licensor and Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License. You agree to work with Licensor and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
|
||||
|
||||
12. U.S. Government End Users. The Licensed Product is a commercial item, as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of commercial computer software and commercial computer software documentation, as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Licensed Product with only those rights set forth herein.
|
||||
|
||||
13. Miscellaneous. This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by California law provisions (except to the extent applicable law, if any, provides otherwise), excluding its conflict-of-law provisions. You expressly agree that in any litigation relating to this license the losing party shall be responsible for costs including, without limitation, court costs and reasonable attorneys fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation that provides that the language of a contract shall be construed against the drafter shall not apply to this License.
|
||||
|
||||
14. Definition of You in This License. You throughout this License, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License or a future version of this License issued under Section 7. For legal entities, you includes any entity that controls, is controlled by, is under common control with, or affiliated with, you. For purposes of this definition, control means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. You are responsible for advising any affiliated entity of the terms of this License, and that any rights or privileges derived from or obtained by way of this License are subject to the restrictions outlined herein.
|
||||
|
||||
15. Glossary. All defined terms in this License that are used in more than one Section of this License are repeated here, in alphabetical order, for the convenience of the reader. The Section of this License in which each defined term is first used is shown in parentheses.
|
||||
|
||||
Contributor: Each person or entity who created or contributed to the creation of, and distributed, a Modification. (See Section 2)
|
||||
|
||||
Derivative Works: That term as used in this License is defined under U.S. copyright law. (See Section 1(b))
|
||||
|
||||
License: This BitTorrent Open Source License. (See first paragraph of License)
|
||||
|
||||
Licensed Product: Any BitTorrent Product licensed pursuant to this License. The term "Licensed Product" includes all previous Modifications from any Contributor that you receive. (See first paragraph of License and Section 2)
|
||||
|
||||
Licensor: BitTorrent, Inc. (See first paragraph of License)
|
||||
|
||||
Modifications: Any additions to or deletions from the substance or structure of (i) a file containing Licensed Product, or (ii) any new file that contains any part of Licensed Product. (See Section 2)
|
||||
|
||||
Notice: The notice contained in Exhibit A. (See Section 4(e))
|
||||
|
||||
Source Code: The preferred form for making modifications to the Licensed Product, including all modules contained therein, plus any associated interface definition files, scripts used to control compilation and installation of an executable program, or a list of differential comparisons against the Source Code of the Licensed Product. (See Section 1(a))
|
||||
|
||||
You: This term is defined in Section 14 of this License.
|
||||
|
||||
|
||||
EXHIBIT A
|
||||
|
||||
The Notice below must appear in each file of the Source Code of any copy you distribute of the Licensed Product or any hereto. Contributors to any Modifications may add their own copyright notices to identify their own contributions.
|
||||
|
||||
License:
|
||||
|
||||
The contents of this file are subject to the BitTorrent Open Source License Version 1.0 (the License). You may not copy or use this file, in either source code or executable form, except in compliance with the License. You may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
|
||||
Software distributed under the License is distributed on an AS IS basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License.
|
||||
|
||||
@@ -1 +1,131 @@
|
||||
from bencode import *
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Petru Paler
|
||||
|
||||
from BTL import BTFailure
|
||||
|
||||
|
||||
def decode_int(x, f):
|
||||
f += 1
|
||||
newf = x.index('e', f)
|
||||
n = int(x[f:newf])
|
||||
if x[f] == '-':
|
||||
if x[f + 1] == '0':
|
||||
raise ValueError
|
||||
elif x[f] == '0' and newf != f+1:
|
||||
raise ValueError
|
||||
return (n, newf+1)
|
||||
|
||||
def decode_string(x, f):
|
||||
colon = x.index(':', f)
|
||||
n = int(x[f:colon])
|
||||
if x[f] == '0' and colon != f+1:
|
||||
raise ValueError
|
||||
colon += 1
|
||||
return (x[colon:colon+n], colon+n)
|
||||
|
||||
def decode_list(x, f):
|
||||
r, f = [], f+1
|
||||
while x[f] != 'e':
|
||||
v, f = decode_func[x[f]](x, f)
|
||||
r.append(v)
|
||||
return (r, f + 1)
|
||||
|
||||
def decode_dict(x, f):
|
||||
r, f = {}, f+1
|
||||
while x[f] != 'e':
|
||||
k, f = decode_string(x, f)
|
||||
r[k], f = decode_func[x[f]](x, f)
|
||||
return (r, f + 1)
|
||||
|
||||
decode_func = {}
|
||||
decode_func['l'] = decode_list
|
||||
decode_func['d'] = decode_dict
|
||||
decode_func['i'] = decode_int
|
||||
decode_func['0'] = decode_string
|
||||
decode_func['1'] = decode_string
|
||||
decode_func['2'] = decode_string
|
||||
decode_func['3'] = decode_string
|
||||
decode_func['4'] = decode_string
|
||||
decode_func['5'] = decode_string
|
||||
decode_func['6'] = decode_string
|
||||
decode_func['7'] = decode_string
|
||||
decode_func['8'] = decode_string
|
||||
decode_func['9'] = decode_string
|
||||
|
||||
def bdecode(x):
|
||||
try:
|
||||
r, l = decode_func[x[0]](x, 0)
|
||||
except (IndexError, KeyError, ValueError):
|
||||
raise BTFailure("not a valid bencoded string")
|
||||
if l != len(x):
|
||||
raise BTFailure("invalid bencoded value (data after valid prefix)")
|
||||
return r
|
||||
|
||||
from types import StringType, IntType, LongType, DictType, ListType, TupleType
|
||||
|
||||
|
||||
class Bencached(object):
|
||||
|
||||
__slots__ = ['bencoded']
|
||||
|
||||
def __init__(self, s):
|
||||
self.bencoded = s
|
||||
|
||||
def encode_bencached(x,r):
|
||||
r.append(x.bencoded)
|
||||
|
||||
def encode_int(x, r):
|
||||
r.extend(('i', str(x), 'e'))
|
||||
|
||||
def encode_bool(x, r):
|
||||
if x:
|
||||
encode_int(1, r)
|
||||
else:
|
||||
encode_int(0, r)
|
||||
|
||||
def encode_string(x, r):
|
||||
r.extend((str(len(x)), ':', x))
|
||||
|
||||
def encode_list(x, r):
|
||||
r.append('l')
|
||||
for i in x:
|
||||
encode_func[type(i)](i, r)
|
||||
r.append('e')
|
||||
|
||||
def encode_dict(x,r):
|
||||
r.append('d')
|
||||
ilist = x.items()
|
||||
ilist.sort()
|
||||
for k, v in ilist:
|
||||
r.extend((str(len(k)), ':', k))
|
||||
encode_func[type(v)](v, r)
|
||||
r.append('e')
|
||||
|
||||
encode_func = {}
|
||||
encode_func[Bencached] = encode_bencached
|
||||
encode_func[IntType] = encode_int
|
||||
encode_func[LongType] = encode_int
|
||||
encode_func[StringType] = encode_string
|
||||
encode_func[ListType] = encode_list
|
||||
encode_func[TupleType] = encode_list
|
||||
encode_func[DictType] = encode_dict
|
||||
|
||||
try:
|
||||
from types import BooleanType
|
||||
encode_func[BooleanType] = encode_bool
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
def bencode(x):
|
||||
r = []
|
||||
encode_func[type(x)](x, r)
|
||||
return ''.join(r)
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
# The contents of this file are subject to the BitTorrent Open Source License
|
||||
# Version 1.1 (the License). You may not copy or use this file, in either
|
||||
# source code or executable form, except in compliance with the License. You
|
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/.
|
||||
#
|
||||
# Software distributed under the License is distributed on an AS IS basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
|
||||
# Written by Petru Paler
|
||||
|
||||
from BTL import BTFailure
|
||||
|
||||
|
||||
def decode_int(x, f):
|
||||
f += 1
|
||||
newf = x.index('e', f)
|
||||
n = int(x[f:newf])
|
||||
if x[f] == '-':
|
||||
if x[f + 1] == '0':
|
||||
raise ValueError
|
||||
elif x[f] == '0' and newf != f+1:
|
||||
raise ValueError
|
||||
return (n, newf+1)
|
||||
|
||||
def decode_string(x, f):
|
||||
colon = x.index(':', f)
|
||||
n = int(x[f:colon])
|
||||
if x[f] == '0' and colon != f+1:
|
||||
raise ValueError
|
||||
colon += 1
|
||||
return (x[colon:colon+n], colon+n)
|
||||
|
||||
def decode_list(x, f):
|
||||
r, f = [], f+1
|
||||
while x[f] != 'e':
|
||||
v, f = decode_func[x[f]](x, f)
|
||||
r.append(v)
|
||||
return (r, f + 1)
|
||||
|
||||
def decode_dict(x, f):
|
||||
r, f = {}, f+1
|
||||
while x[f] != 'e':
|
||||
k, f = decode_string(x, f)
|
||||
r[k], f = decode_func[x[f]](x, f)
|
||||
return (r, f + 1)
|
||||
|
||||
decode_func = {}
|
||||
decode_func['l'] = decode_list
|
||||
decode_func['d'] = decode_dict
|
||||
decode_func['i'] = decode_int
|
||||
decode_func['0'] = decode_string
|
||||
decode_func['1'] = decode_string
|
||||
decode_func['2'] = decode_string
|
||||
decode_func['3'] = decode_string
|
||||
decode_func['4'] = decode_string
|
||||
decode_func['5'] = decode_string
|
||||
decode_func['6'] = decode_string
|
||||
decode_func['7'] = decode_string
|
||||
decode_func['8'] = decode_string
|
||||
decode_func['9'] = decode_string
|
||||
|
||||
def bdecode(x):
|
||||
try:
|
||||
r, l = decode_func[x[0]](x, 0)
|
||||
except (IndexError, KeyError, ValueError):
|
||||
raise BTFailure("not a valid bencoded string")
|
||||
if l != len(x):
|
||||
raise BTFailure("invalid bencoded value (data after valid prefix)")
|
||||
return r
|
||||
|
||||
from types import StringType, IntType, LongType, DictType, ListType, TupleType
|
||||
|
||||
|
||||
class Bencached(object):
|
||||
|
||||
__slots__ = ['bencoded']
|
||||
|
||||
def __init__(self, s):
|
||||
self.bencoded = s
|
||||
|
||||
def encode_bencached(x,r):
|
||||
r.append(x.bencoded)
|
||||
|
||||
def encode_int(x, r):
|
||||
r.extend(('i', str(x), 'e'))
|
||||
|
||||
def encode_bool(x, r):
|
||||
if x:
|
||||
encode_int(1, r)
|
||||
else:
|
||||
encode_int(0, r)
|
||||
|
||||
def encode_string(x, r):
|
||||
r.extend((str(len(x)), ':', x))
|
||||
|
||||
def encode_list(x, r):
|
||||
r.append('l')
|
||||
for i in x:
|
||||
encode_func[type(i)](i, r)
|
||||
r.append('e')
|
||||
|
||||
def encode_dict(x,r):
|
||||
r.append('d')
|
||||
ilist = x.items()
|
||||
ilist.sort()
|
||||
for k, v in ilist:
|
||||
r.extend((str(len(k)), ':', k))
|
||||
encode_func[type(v)](v, r)
|
||||
r.append('e')
|
||||
|
||||
encode_func = {}
|
||||
encode_func[Bencached] = encode_bencached
|
||||
encode_func[IntType] = encode_int
|
||||
encode_func[LongType] = encode_int
|
||||
encode_func[StringType] = encode_string
|
||||
encode_func[ListType] = encode_list
|
||||
encode_func[TupleType] = encode_list
|
||||
encode_func[DictType] = encode_dict
|
||||
|
||||
try:
|
||||
from types import BooleanType
|
||||
encode_func[BooleanType] = encode_bool
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
def bencode(x):
|
||||
r = []
|
||||
encode_func[type(x)](x, r)
|
||||
return ''.join(r)
|
||||
@@ -17,16 +17,17 @@ http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||
"""
|
||||
|
||||
__author__ = "Leonard Richardson (leonardr@segfault.org)"
|
||||
__version__ = "4.1.0"
|
||||
__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
|
||||
__version__ = "4.3.2"
|
||||
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = ['BeautifulSoup']
|
||||
|
||||
import os
|
||||
import re
|
||||
import warnings
|
||||
|
||||
from .builder import builder_registry
|
||||
from .builder import builder_registry, ParserRejectedMarkup
|
||||
from .dammit import UnicodeDammit
|
||||
from .element import (
|
||||
CData,
|
||||
@@ -74,11 +75,7 @@ class BeautifulSoup(Tag):
|
||||
# want, look for one with these features.
|
||||
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
|
||||
|
||||
# Used when determining whether a text node is all whitespace and
|
||||
# can be replaced with a single space. A text node that contains
|
||||
# fancy Unicode spaces (usually non-breaking) should be left
|
||||
# alone.
|
||||
STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, }
|
||||
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
|
||||
|
||||
def __init__(self, markup="", features=None, builder=None,
|
||||
parse_only=None, from_encoding=None, **kwargs):
|
||||
@@ -149,7 +146,7 @@ class BeautifulSoup(Tag):
|
||||
features = self.DEFAULT_BUILDER_FEATURES
|
||||
builder_class = builder_registry.lookup(*features)
|
||||
if builder_class is None:
|
||||
raise ValueError(
|
||||
raise FeatureNotFound(
|
||||
"Couldn't find a tree builder with the features you "
|
||||
"requested: %s. Do you need to install a parser library?"
|
||||
% ",".join(features))
|
||||
@@ -160,18 +157,46 @@ class BeautifulSoup(Tag):
|
||||
|
||||
self.parse_only = parse_only
|
||||
|
||||
self.reset()
|
||||
|
||||
if hasattr(markup, 'read'): # It's a file-type object.
|
||||
markup = markup.read()
|
||||
(self.markup, self.original_encoding, self.declared_html_encoding,
|
||||
self.contains_replacement_characters) = (
|
||||
self.builder.prepare_markup(markup, from_encoding))
|
||||
elif len(markup) <= 256:
|
||||
# Print out warnings for a couple beginner problems
|
||||
# involving passing non-markup to Beautiful Soup.
|
||||
# Beautiful Soup will still parse the input as markup,
|
||||
# just in case that's what the user really wants.
|
||||
if (isinstance(markup, unicode)
|
||||
and not os.path.supports_unicode_filenames):
|
||||
possible_filename = markup.encode("utf8")
|
||||
else:
|
||||
possible_filename = markup
|
||||
is_file = False
|
||||
try:
|
||||
is_file = os.path.exists(possible_filename)
|
||||
except Exception, e:
|
||||
# This is almost certainly a problem involving
|
||||
# characters not valid in filenames on this
|
||||
# system. Just let it go.
|
||||
pass
|
||||
if is_file:
|
||||
warnings.warn(
|
||||
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
|
||||
if markup[:5] == "http:" or markup[:6] == "https:":
|
||||
# TODO: This is ugly but I couldn't get it to work in
|
||||
# Python 3 otherwise.
|
||||
if ((isinstance(markup, bytes) and not b' ' in markup)
|
||||
or (isinstance(markup, unicode) and not u' ' in markup)):
|
||||
warnings.warn(
|
||||
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
|
||||
|
||||
try:
|
||||
self._feed()
|
||||
except StopParsing:
|
||||
pass
|
||||
for (self.markup, self.original_encoding, self.declared_html_encoding,
|
||||
self.contains_replacement_characters) in (
|
||||
self.builder.prepare_markup(markup, from_encoding)):
|
||||
self.reset()
|
||||
try:
|
||||
self._feed()
|
||||
break
|
||||
except ParserRejectedMarkup:
|
||||
pass
|
||||
|
||||
# Clear out the markup and remove the builder's circular
|
||||
# reference to this object.
|
||||
@@ -192,29 +217,32 @@ class BeautifulSoup(Tag):
|
||||
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
|
||||
self.hidden = 1
|
||||
self.builder.reset()
|
||||
self.currentData = []
|
||||
self.current_data = []
|
||||
self.currentTag = None
|
||||
self.tagStack = []
|
||||
self.preserve_whitespace_tag_stack = []
|
||||
self.pushTag(self)
|
||||
|
||||
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
|
||||
"""Create a new tag associated with this soup."""
|
||||
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
|
||||
|
||||
def new_string(self, s):
|
||||
def new_string(self, s, subclass=NavigableString):
|
||||
"""Create a new NavigableString associated with this soup."""
|
||||
navigable = NavigableString(s)
|
||||
navigable = subclass(s)
|
||||
navigable.setup()
|
||||
return navigable
|
||||
|
||||
def insert_before(self, successor):
|
||||
raise ValueError("BeautifulSoup objects don't support insert_before().")
|
||||
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
|
||||
|
||||
def insert_after(self, successor):
|
||||
raise ValueError("BeautifulSoup objects don't support insert_after().")
|
||||
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
|
||||
|
||||
def popTag(self):
|
||||
tag = self.tagStack.pop()
|
||||
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
|
||||
self.preserve_whitespace_tag_stack.pop()
|
||||
#print "Pop", tag.name
|
||||
if self.tagStack:
|
||||
self.currentTag = self.tagStack[-1]
|
||||
@@ -226,32 +254,49 @@ class BeautifulSoup(Tag):
|
||||
self.currentTag.contents.append(tag)
|
||||
self.tagStack.append(tag)
|
||||
self.currentTag = self.tagStack[-1]
|
||||
if tag.name in self.builder.preserve_whitespace_tags:
|
||||
self.preserve_whitespace_tag_stack.append(tag)
|
||||
|
||||
def endData(self, containerClass=NavigableString):
|
||||
if self.currentData:
|
||||
currentData = u''.join(self.currentData)
|
||||
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
|
||||
not set([tag.name for tag in self.tagStack]).intersection(
|
||||
self.builder.preserve_whitespace_tags)):
|
||||
if '\n' in currentData:
|
||||
currentData = '\n'
|
||||
else:
|
||||
currentData = ' '
|
||||
self.currentData = []
|
||||
if self.current_data:
|
||||
current_data = u''.join(self.current_data)
|
||||
# If whitespace is not preserved, and this string contains
|
||||
# nothing but ASCII spaces, replace it with a single space
|
||||
# or newline.
|
||||
if not self.preserve_whitespace_tag_stack:
|
||||
strippable = True
|
||||
for i in current_data:
|
||||
if i not in self.ASCII_SPACES:
|
||||
strippable = False
|
||||
break
|
||||
if strippable:
|
||||
if '\n' in current_data:
|
||||
current_data = '\n'
|
||||
else:
|
||||
current_data = ' '
|
||||
|
||||
# Reset the data collector.
|
||||
self.current_data = []
|
||||
|
||||
# Should we add this string to the tree at all?
|
||||
if self.parse_only and len(self.tagStack) <= 1 and \
|
||||
(not self.parse_only.text or \
|
||||
not self.parse_only.search(currentData)):
|
||||
not self.parse_only.search(current_data)):
|
||||
return
|
||||
o = containerClass(currentData)
|
||||
|
||||
o = containerClass(current_data)
|
||||
self.object_was_parsed(o)
|
||||
|
||||
def object_was_parsed(self, o):
|
||||
def object_was_parsed(self, o, parent=None, most_recent_element=None):
|
||||
"""Add an object to the parse tree."""
|
||||
o.setup(self.currentTag, self.previous_element)
|
||||
if self.previous_element:
|
||||
self.previous_element.next_element = o
|
||||
self.previous_element = o
|
||||
self.currentTag.contents.append(o)
|
||||
parent = parent or self.currentTag
|
||||
most_recent_element = most_recent_element or self._most_recent_element
|
||||
o.setup(parent, most_recent_element)
|
||||
|
||||
if most_recent_element is not None:
|
||||
most_recent_element.next_element = o
|
||||
self._most_recent_element = o
|
||||
parent.contents.append(o)
|
||||
|
||||
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
|
||||
"""Pops the tag stack up to and including the most recent
|
||||
@@ -260,22 +305,21 @@ class BeautifulSoup(Tag):
|
||||
the given tag."""
|
||||
#print "Popping to %s" % name
|
||||
if name == self.ROOT_TAG_NAME:
|
||||
# The BeautifulSoup object itself can never be popped.
|
||||
return
|
||||
|
||||
numPops = 0
|
||||
mostRecentTag = None
|
||||
most_recently_popped = None
|
||||
|
||||
for i in range(len(self.tagStack) - 1, 0, -1):
|
||||
if (name == self.tagStack[i].name
|
||||
and nsprefix == self.tagStack[i].nsprefix == nsprefix):
|
||||
numPops = len(self.tagStack) - i
|
||||
stack_size = len(self.tagStack)
|
||||
for i in range(stack_size - 1, 0, -1):
|
||||
t = self.tagStack[i]
|
||||
if (name == t.name and nsprefix == t.prefix):
|
||||
if inclusivePop:
|
||||
most_recently_popped = self.popTag()
|
||||
break
|
||||
if not inclusivePop:
|
||||
numPops = numPops - 1
|
||||
most_recently_popped = self.popTag()
|
||||
|
||||
for i in range(0, numPops):
|
||||
mostRecentTag = self.popTag()
|
||||
return mostRecentTag
|
||||
return most_recently_popped
|
||||
|
||||
def handle_starttag(self, name, namespace, nsprefix, attrs):
|
||||
"""Push a start tag on to the stack.
|
||||
@@ -295,12 +339,12 @@ class BeautifulSoup(Tag):
|
||||
return None
|
||||
|
||||
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
|
||||
self.currentTag, self.previous_element)
|
||||
self.currentTag, self._most_recent_element)
|
||||
if tag is None:
|
||||
return tag
|
||||
if self.previous_element:
|
||||
self.previous_element.next_element = tag
|
||||
self.previous_element = tag
|
||||
if self._most_recent_element:
|
||||
self._most_recent_element.next_element = tag
|
||||
self._most_recent_element = tag
|
||||
self.pushTag(tag)
|
||||
return tag
|
||||
|
||||
@@ -310,7 +354,7 @@ class BeautifulSoup(Tag):
|
||||
self._popToTag(name, nsprefix)
|
||||
|
||||
def handle_data(self, data):
|
||||
self.currentData.append(data)
|
||||
self.current_data.append(data)
|
||||
|
||||
def decode(self, pretty_print=False,
|
||||
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
|
||||
@@ -333,6 +377,10 @@ class BeautifulSoup(Tag):
|
||||
return prefix + super(BeautifulSoup, self).decode(
|
||||
indent_level, eventual_encoding, formatter)
|
||||
|
||||
# Alias to make it easier to type import: 'from bs4 import _soup'
|
||||
_s = BeautifulSoup
|
||||
_soup = BeautifulSoup
|
||||
|
||||
class BeautifulStoneSoup(BeautifulSoup):
|
||||
"""Deprecated interface to an XML parser."""
|
||||
|
||||
@@ -347,6 +395,9 @@ class BeautifulStoneSoup(BeautifulSoup):
|
||||
class StopParsing(Exception):
|
||||
pass
|
||||
|
||||
class FeatureNotFound(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
#By default, act as an HTML pretty-printer.
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -147,18 +147,29 @@ class TreeBuilder(object):
|
||||
|
||||
Modifies its input in place.
|
||||
"""
|
||||
if not attrs:
|
||||
return attrs
|
||||
if self.cdata_list_attributes:
|
||||
universal = self.cdata_list_attributes.get('*', [])
|
||||
tag_specific = self.cdata_list_attributes.get(
|
||||
tag_name.lower(), [])
|
||||
for cdata_list_attr in itertools.chain(universal, tag_specific):
|
||||
if cdata_list_attr in dict(attrs):
|
||||
# Basically, we have a "class" attribute whose
|
||||
# value is a whitespace-separated list of CSS
|
||||
# classes. Split it into a list.
|
||||
value = attrs[cdata_list_attr]
|
||||
values = whitespace_re.split(value)
|
||||
attrs[cdata_list_attr] = values
|
||||
tag_name.lower(), None)
|
||||
for attr in attrs.keys():
|
||||
if attr in universal or (tag_specific and attr in tag_specific):
|
||||
# We have a "class"-type attribute whose string
|
||||
# value is a whitespace-separated list of
|
||||
# values. Split it into a list.
|
||||
value = attrs[attr]
|
||||
if isinstance(value, basestring):
|
||||
values = whitespace_re.split(value)
|
||||
else:
|
||||
# html5lib sometimes calls setAttributes twice
|
||||
# for the same tag when rearranging the parse
|
||||
# tree. On the second call the attribute value
|
||||
# here is already a list. If this happens,
|
||||
# leave the value alone rather than trying to
|
||||
# split it again.
|
||||
values = value
|
||||
attrs[attr] = values
|
||||
return attrs
|
||||
|
||||
class SAXTreeBuilder(TreeBuilder):
|
||||
@@ -287,6 +298,9 @@ def register_treebuilders_from(module):
|
||||
# Register the builder while we're at it.
|
||||
this_module.builder_registry.register(obj)
|
||||
|
||||
class ParserRejectedMarkup(Exception):
|
||||
pass
|
||||
|
||||
# Builders are registered in reverse order of priority, so that custom
|
||||
# builder registrations will take precedence. In general, we want lxml
|
||||
# to take precedence over html5lib, because it's faster. And we only
|
||||
|
||||
@@ -27,7 +27,7 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
def prepare_markup(self, markup, user_specified_encoding):
|
||||
# Store the user-specified encoding for use later on.
|
||||
self.user_specified_encoding = user_specified_encoding
|
||||
return markup, None, None, False
|
||||
yield (markup, None, None, False)
|
||||
|
||||
# These methods are defined by Beautiful Soup.
|
||||
def feed(self, markup):
|
||||
@@ -123,17 +123,50 @@ class Element(html5lib.treebuilders._base.Node):
|
||||
self.namespace = namespace
|
||||
|
||||
def appendChild(self, node):
|
||||
if (node.element.__class__ == NavigableString and self.element.contents
|
||||
string_child = child = None
|
||||
if isinstance(node, basestring):
|
||||
# Some other piece of code decided to pass in a string
|
||||
# instead of creating a TextElement object to contain the
|
||||
# string.
|
||||
string_child = child = node
|
||||
elif isinstance(node, Tag):
|
||||
# Some other piece of code decided to pass in a Tag
|
||||
# instead of creating an Element object to contain the
|
||||
# Tag.
|
||||
child = node
|
||||
elif node.element.__class__ == NavigableString:
|
||||
string_child = child = node.element
|
||||
else:
|
||||
child = node.element
|
||||
|
||||
if not isinstance(child, basestring) and child.parent is not None:
|
||||
node.element.extract()
|
||||
|
||||
if (string_child and self.element.contents
|
||||
and self.element.contents[-1].__class__ == NavigableString):
|
||||
# Concatenate new text onto old text node
|
||||
# XXX This has O(n^2) performance, for input like
|
||||
# We are appending a string onto another string.
|
||||
# TODO This has O(n^2) performance, for input like
|
||||
# "a</a>a</a>a</a>..."
|
||||
old_element = self.element.contents[-1]
|
||||
new_element = self.soup.new_string(old_element + node.element)
|
||||
new_element = self.soup.new_string(old_element + string_child)
|
||||
old_element.replace_with(new_element)
|
||||
self.soup._most_recent_element = new_element
|
||||
else:
|
||||
self.element.append(node.element)
|
||||
node.parent = self
|
||||
if isinstance(node, basestring):
|
||||
# Create a brand new NavigableString from this string.
|
||||
child = self.soup.new_string(node)
|
||||
|
||||
# Tell Beautiful Soup to act as if it parsed this element
|
||||
# immediately after the parent's last descendant. (Or
|
||||
# immediately after the parent, if it has no children.)
|
||||
if self.element.contents:
|
||||
most_recent_element = self.element._last_descendant(False)
|
||||
else:
|
||||
most_recent_element = self.element
|
||||
|
||||
self.soup.object_was_parsed(
|
||||
child, parent=self.element,
|
||||
most_recent_element=most_recent_element)
|
||||
|
||||
def getAttributes(self):
|
||||
return AttrList(self.element)
|
||||
@@ -162,11 +195,11 @@ class Element(html5lib.treebuilders._base.Node):
|
||||
attributes = property(getAttributes, setAttributes)
|
||||
|
||||
def insertText(self, data, insertBefore=None):
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
if insertBefore:
|
||||
self.insertBefore(text, insertBefore)
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
self.insertBefore(data, insertBefore)
|
||||
else:
|
||||
self.appendChild(text)
|
||||
self.appendChild(data)
|
||||
|
||||
def insertBefore(self, node, refNode):
|
||||
index = self.element.index(refNode.element)
|
||||
@@ -183,16 +216,46 @@ class Element(html5lib.treebuilders._base.Node):
|
||||
def removeChild(self, node):
|
||||
node.element.extract()
|
||||
|
||||
def reparentChildren(self, newParent):
|
||||
while self.element.contents:
|
||||
child = self.element.contents[0]
|
||||
child.extract()
|
||||
if isinstance(child, Tag):
|
||||
newParent.appendChild(
|
||||
Element(child, self.soup, namespaces["html"]))
|
||||
else:
|
||||
newParent.appendChild(
|
||||
TextNode(child, self.soup))
|
||||
def reparentChildren(self, new_parent):
|
||||
"""Move all of this tag's children into another tag."""
|
||||
element = self.element
|
||||
new_parent_element = new_parent.element
|
||||
# Determine what this tag's next_element will be once all the children
|
||||
# are removed.
|
||||
final_next_element = element.next_sibling
|
||||
|
||||
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
|
||||
if len(new_parent_element.contents) > 0:
|
||||
# The new parent already contains children. We will be
|
||||
# appending this tag's children to the end.
|
||||
new_parents_last_child = new_parent_element.contents[-1]
|
||||
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
|
||||
else:
|
||||
# The new parent contains no children.
|
||||
new_parents_last_child = None
|
||||
new_parents_last_descendant_next_element = new_parent_element.next_element
|
||||
|
||||
to_append = element.contents
|
||||
append_after = new_parent.element.contents
|
||||
if len(to_append) > 0:
|
||||
# Set the first child's previous_element and previous_sibling
|
||||
# to elements within the new parent
|
||||
first_child = to_append[0]
|
||||
first_child.previous_element = new_parents_last_descendant
|
||||
first_child.previous_sibling = new_parents_last_child
|
||||
|
||||
# Fix the last child's next_element and next_sibling
|
||||
last_child = to_append[-1]
|
||||
last_child.next_element = new_parents_last_descendant_next_element
|
||||
last_child.next_sibling = None
|
||||
|
||||
for child in to_append:
|
||||
child.parent = new_parent_element
|
||||
new_parent_element.contents.append(child)
|
||||
|
||||
# Now that this element has no children, change its .next_element.
|
||||
element.contents = []
|
||||
element.next_element = final_next_element
|
||||
|
||||
def cloneNode(self):
|
||||
tag = self.soup.new_tag(self.element.name, self.namespace)
|
||||
|
||||
@@ -45,7 +45,15 @@ HTMLPARSER = 'html.parser'
|
||||
class BeautifulSoupHTMLParser(HTMLParser):
|
||||
def handle_starttag(self, name, attrs):
|
||||
# XXX namespace
|
||||
self.soup.handle_starttag(name, None, None, dict(attrs))
|
||||
attr_dict = {}
|
||||
for key, value in attrs:
|
||||
# Change None attribute values to the empty string
|
||||
# for consistency with the other tree builders.
|
||||
if value is None:
|
||||
value = ''
|
||||
attr_dict[key] = value
|
||||
attrvalue = '""'
|
||||
self.soup.handle_starttag(name, None, None, attr_dict)
|
||||
|
||||
def handle_endtag(self, name):
|
||||
self.soup.handle_endtag(name)
|
||||
@@ -58,6 +66,8 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
||||
# it's fixed.
|
||||
if name.startswith('x'):
|
||||
real_name = int(name.lstrip('x'), 16)
|
||||
elif name.startswith('X'):
|
||||
real_name = int(name.lstrip('X'), 16)
|
||||
else:
|
||||
real_name = int(name)
|
||||
|
||||
@@ -85,6 +95,9 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
||||
self.soup.endData()
|
||||
if data.startswith("DOCTYPE "):
|
||||
data = data[len("DOCTYPE "):]
|
||||
elif data == 'DOCTYPE':
|
||||
# i.e. "<!DOCTYPE>"
|
||||
data = ''
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(Doctype)
|
||||
|
||||
@@ -130,13 +143,14 @@ class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
||||
replaced with REPLACEMENT CHARACTER).
|
||||
"""
|
||||
if isinstance(markup, unicode):
|
||||
return markup, None, None, False
|
||||
yield (markup, None, None, False)
|
||||
return
|
||||
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
|
||||
return (dammit.markup, dammit.original_encoding,
|
||||
dammit.declared_html_encoding,
|
||||
dammit.contains_replacement_characters)
|
||||
yield (dammit.markup, dammit.original_encoding,
|
||||
dammit.declared_html_encoding,
|
||||
dammit.contains_replacement_characters)
|
||||
|
||||
def feed(self, markup):
|
||||
args, kwargs = self.parser_args
|
||||
|
||||
@@ -3,6 +3,7 @@ __all__ = [
|
||||
'LXMLTreeBuilder',
|
||||
]
|
||||
|
||||
from io import BytesIO
|
||||
from StringIO import StringIO
|
||||
import collections
|
||||
from lxml import etree
|
||||
@@ -12,9 +13,10 @@ from bs4.builder import (
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
PERMISSIVE,
|
||||
ParserRejectedMarkup,
|
||||
TreeBuilder,
|
||||
XML)
|
||||
from bs4.dammit import UnicodeDammit
|
||||
from bs4.dammit import EncodingDetector
|
||||
|
||||
LXML = 'lxml'
|
||||
|
||||
@@ -28,24 +30,36 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
|
||||
CHUNK_SIZE = 512
|
||||
|
||||
@property
|
||||
def default_parser(self):
|
||||
# This namespace mapping is specified in the XML Namespace
|
||||
# standard.
|
||||
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
|
||||
|
||||
def default_parser(self, encoding):
|
||||
# This can either return a parser object or a class, which
|
||||
# will be instantiated with default arguments.
|
||||
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
|
||||
if self._default_parser is not None:
|
||||
return self._default_parser
|
||||
return etree.XMLParser(
|
||||
target=self, strip_cdata=False, recover=True, encoding=encoding)
|
||||
|
||||
def parser_for(self, encoding):
|
||||
# Use the default parser.
|
||||
parser = self.default_parser(encoding)
|
||||
|
||||
def __init__(self, parser=None, empty_element_tags=None):
|
||||
if empty_element_tags is not None:
|
||||
self.empty_element_tags = set(empty_element_tags)
|
||||
if parser is None:
|
||||
# Use the default parser.
|
||||
parser = self.default_parser
|
||||
if isinstance(parser, collections.Callable):
|
||||
# Instantiate the parser with default arguments
|
||||
parser = parser(target=self, strip_cdata=False)
|
||||
self.parser = parser
|
||||
parser = parser(target=self, strip_cdata=False, encoding=encoding)
|
||||
return parser
|
||||
|
||||
def __init__(self, parser=None, empty_element_tags=None):
|
||||
# TODO: Issue a warning if parser is present but not a
|
||||
# callable, since that means there's no way to create new
|
||||
# parsers for different encodings.
|
||||
self._default_parser = parser
|
||||
if empty_element_tags is not None:
|
||||
self.empty_element_tags = set(empty_element_tags)
|
||||
self.soup = None
|
||||
self.nsmaps = None
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS]
|
||||
|
||||
def _getNsTag(self, tag):
|
||||
# Split the namespace URL out of a fully-qualified lxml tag
|
||||
@@ -58,50 +72,69 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
document_declared_encoding=None):
|
||||
"""
|
||||
:return: A 3-tuple (markup, original encoding, encoding
|
||||
declared within markup).
|
||||
:yield: A series of 4-tuples.
|
||||
(markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for parsing the document.
|
||||
"""
|
||||
if isinstance(markup, unicode):
|
||||
return markup, None, None, False
|
||||
# We were given Unicode. Maybe lxml can parse Unicode on
|
||||
# this system?
|
||||
yield markup, None, document_declared_encoding, False
|
||||
|
||||
if isinstance(markup, unicode):
|
||||
# No, apparently not. Convert the Unicode to UTF-8 and
|
||||
# tell lxml to parse it as UTF-8.
|
||||
yield (markup.encode("utf8"), "utf8",
|
||||
document_declared_encoding, False)
|
||||
|
||||
# Instead of using UnicodeDammit to convert the bytestring to
|
||||
# Unicode using different encodings, use EncodingDetector to
|
||||
# iterate over the encodings, and tell lxml to try to parse
|
||||
# the document as each one in turn.
|
||||
is_html = not self.is_xml
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
|
||||
return (dammit.markup, dammit.original_encoding,
|
||||
dammit.declared_html_encoding,
|
||||
dammit.contains_replacement_characters)
|
||||
detector = EncodingDetector(markup, try_encodings, is_html)
|
||||
for encoding in detector.encodings:
|
||||
yield (detector.markup, encoding, document_declared_encoding, False)
|
||||
|
||||
def feed(self, markup):
|
||||
if isinstance(markup, basestring):
|
||||
if isinstance(markup, bytes):
|
||||
markup = BytesIO(markup)
|
||||
elif isinstance(markup, unicode):
|
||||
markup = StringIO(markup)
|
||||
|
||||
# Call feed() at least once, even if the markup is empty,
|
||||
# or the parser won't be initialized.
|
||||
data = markup.read(self.CHUNK_SIZE)
|
||||
self.parser.feed(data)
|
||||
while data != '':
|
||||
# Now call feed() on the rest of the data, chunk by chunk.
|
||||
data = markup.read(self.CHUNK_SIZE)
|
||||
if data != '':
|
||||
self.parser.feed(data)
|
||||
self.parser.close()
|
||||
try:
|
||||
self.parser = self.parser_for(self.soup.original_encoding)
|
||||
self.parser.feed(data)
|
||||
while len(data) != 0:
|
||||
# Now call feed() on the rest of the data, chunk by chunk.
|
||||
data = markup.read(self.CHUNK_SIZE)
|
||||
if len(data) != 0:
|
||||
self.parser.feed(data)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
|
||||
raise ParserRejectedMarkup(str(e))
|
||||
|
||||
def close(self):
|
||||
self.nsmaps = None
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS]
|
||||
|
||||
def start(self, name, attrs, nsmap={}):
|
||||
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
|
||||
attrs = dict(attrs)
|
||||
|
||||
nsprefix = None
|
||||
# Invert each namespace map as it comes in.
|
||||
if len(nsmap) == 0 and self.nsmaps != None:
|
||||
# There are no new namespaces for this tag, but namespaces
|
||||
# are in play, so we need a separate tag stack to know
|
||||
# when they end.
|
||||
if len(self.nsmaps) > 1:
|
||||
# There are no new namespaces for this tag, but
|
||||
# non-default namespaces are in play, so we need a
|
||||
# separate tag stack to know when they end.
|
||||
self.nsmaps.append(None)
|
||||
elif len(nsmap) > 0:
|
||||
# A new namespace mapping has come into play.
|
||||
if self.nsmaps is None:
|
||||
self.nsmaps = []
|
||||
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
|
||||
self.nsmaps.append(inverted_nsmap)
|
||||
# Also treat the namespace mapping as a set of attributes on the
|
||||
@@ -111,14 +144,34 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
attribute = NamespacedAttribute(
|
||||
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
|
||||
attrs[attribute] = namespace
|
||||
|
||||
# Namespaces are in play. Find any attributes that came in
|
||||
# from lxml with namespaces attached to their names, and
|
||||
# turn then into NamespacedAttribute objects.
|
||||
new_attrs = {}
|
||||
for attr, value in attrs.items():
|
||||
namespace, attr = self._getNsTag(attr)
|
||||
if namespace is None:
|
||||
new_attrs[attr] = value
|
||||
else:
|
||||
nsprefix = self._prefix_for_namespace(namespace)
|
||||
attr = NamespacedAttribute(nsprefix, attr, namespace)
|
||||
new_attrs[attr] = value
|
||||
attrs = new_attrs
|
||||
|
||||
namespace, name = self._getNsTag(name)
|
||||
if namespace is not None:
|
||||
for inverted_nsmap in reversed(self.nsmaps):
|
||||
if inverted_nsmap is not None and namespace in inverted_nsmap:
|
||||
nsprefix = inverted_nsmap[namespace]
|
||||
break
|
||||
nsprefix = self._prefix_for_namespace(namespace)
|
||||
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
|
||||
|
||||
def _prefix_for_namespace(self, namespace):
|
||||
"""Find the currently active prefix for the given namespace."""
|
||||
if namespace is None:
|
||||
return None
|
||||
for inverted_nsmap in reversed(self.nsmaps):
|
||||
if inverted_nsmap is not None and namespace in inverted_nsmap:
|
||||
return inverted_nsmap[namespace]
|
||||
return None
|
||||
|
||||
def end(self, name):
|
||||
self.soup.endData()
|
||||
completed_tag = self.soup.tagStack[-1]
|
||||
@@ -130,14 +183,10 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
nsprefix = inverted_nsmap[namespace]
|
||||
break
|
||||
self.soup.handle_endtag(name, nsprefix)
|
||||
if self.nsmaps != None:
|
||||
if len(self.nsmaps) > 1:
|
||||
# This tag, or one of its parents, introduced a namespace
|
||||
# mapping, so pop it off the stack.
|
||||
self.nsmaps.pop()
|
||||
if len(self.nsmaps) == 0:
|
||||
# Namespaces are no longer in play, so don't bother keeping
|
||||
# track of the namespace stack.
|
||||
self.nsmaps = None
|
||||
|
||||
def pi(self, target, data):
|
||||
pass
|
||||
@@ -166,13 +215,18 @@ class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||
features = [LXML, HTML, FAST, PERMISSIVE]
|
||||
is_xml = False
|
||||
|
||||
@property
|
||||
def default_parser(self):
|
||||
def default_parser(self, encoding):
|
||||
return etree.HTMLParser
|
||||
|
||||
def feed(self, markup):
|
||||
self.parser.feed(markup)
|
||||
self.parser.close()
|
||||
encoding = self.soup.original_encoding
|
||||
try:
|
||||
self.parser = self.parser_for(encoding)
|
||||
self.parser.feed(markup)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
|
||||
raise ParserRejectedMarkup(str(e))
|
||||
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
"""See `TreeBuilder`."""
|
||||
|
||||
@@ -1,27 +1,40 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Beautiful Soup bonus library: Unicode, Dammit
|
||||
|
||||
This class forces XML data into a standard format (usually to UTF-8 or
|
||||
Unicode). It is heavily based on code from Mark Pilgrim's Universal
|
||||
Feed Parser. It does not rewrite the XML or HTML to reflect a new
|
||||
encoding; that's the tree builder's job.
|
||||
This library converts a bytestream to Unicode through any means
|
||||
necessary. It is heavily based on code from Mark Pilgrim's Universal
|
||||
Feed Parser. It works best on XML and XML, but it does not rewrite the
|
||||
XML or HTML to reflect a new encoding; that's the tree builder's job.
|
||||
"""
|
||||
|
||||
import codecs
|
||||
from htmlentitydefs import codepoint2name
|
||||
import re
|
||||
import warnings
|
||||
import logging
|
||||
import string
|
||||
|
||||
# Autodetects character encodings. Very useful.
|
||||
# Download from http://chardet.feedparser.org/
|
||||
# or 'apt-get install python-chardet'
|
||||
# or 'easy_install chardet'
|
||||
# Import a library to autodetect character encodings.
|
||||
chardet_type = None
|
||||
try:
|
||||
import chardet
|
||||
#import chardet.constants
|
||||
#chardet.constants._debug = 1
|
||||
# First try the fast C implementation.
|
||||
# PyPI package: cchardet
|
||||
import cchardet
|
||||
def chardet_dammit(s):
|
||||
return cchardet.detect(s)['encoding']
|
||||
except ImportError:
|
||||
chardet = None
|
||||
try:
|
||||
# Fall back to the pure Python implementation
|
||||
# Debian package: python-chardet
|
||||
# PyPI package: chardet
|
||||
import chardet
|
||||
def chardet_dammit(s):
|
||||
return chardet.detect(s)['encoding']
|
||||
#import chardet.constants
|
||||
#chardet.constants._debug = 1
|
||||
except ImportError:
|
||||
# No chardet available.
|
||||
def chardet_dammit(s):
|
||||
return None
|
||||
|
||||
# Available from http://cjkpython.i18n.org/.
|
||||
try:
|
||||
@@ -69,6 +82,8 @@ class EntitySubstitution(object):
|
||||
"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
|
||||
")")
|
||||
|
||||
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
|
||||
|
||||
@classmethod
|
||||
def _substitute_html_entity(cls, matchobj):
|
||||
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
|
||||
@@ -122,6 +137,28 @@ class EntitySubstitution(object):
|
||||
def substitute_xml(cls, value, make_quoted_attribute=False):
|
||||
"""Substitute XML entities for special XML characters.
|
||||
|
||||
:param value: A string to be substituted. The less-than sign
|
||||
will become <, the greater-than sign will become >,
|
||||
and any ampersands will become &. If you want ampersands
|
||||
that appear to be part of an entity definition to be left
|
||||
alone, use substitute_xml_containing_entities() instead.
|
||||
|
||||
:param make_quoted_attribute: If True, then the string will be
|
||||
quoted, as befits an attribute value.
|
||||
"""
|
||||
# Escape angle brackets and ampersands.
|
||||
value = cls.AMPERSAND_OR_BRACKET.sub(
|
||||
cls._substitute_xml_entity, value)
|
||||
|
||||
if make_quoted_attribute:
|
||||
value = cls.quoted_attribute_value(value)
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def substitute_xml_containing_entities(
|
||||
cls, value, make_quoted_attribute=False):
|
||||
"""Substitute XML entities for special XML characters.
|
||||
|
||||
:param value: A string to be substituted. The less-than sign will
|
||||
become <, the greater-than sign will become >, and any
|
||||
ampersands that are not part of an entity defition will
|
||||
@@ -155,6 +192,125 @@ class EntitySubstitution(object):
|
||||
cls._substitute_html_entity, s)
|
||||
|
||||
|
||||
class EncodingDetector:
|
||||
"""Suggests a number of possible encodings for a bytestring.
|
||||
|
||||
Order of precedence:
|
||||
|
||||
1. Encodings you specifically tell EncodingDetector to try first
|
||||
(the override_encodings argument to the constructor).
|
||||
|
||||
2. An encoding declared within the bytestring itself, either in an
|
||||
XML declaration (if the bytestring is to be interpreted as an XML
|
||||
document), or in a <meta> tag (if the bytestring is to be
|
||||
interpreted as an HTML document.)
|
||||
|
||||
3. An encoding detected through textual analysis by chardet,
|
||||
cchardet, or a similar external library.
|
||||
|
||||
4. UTF-8.
|
||||
|
||||
5. Windows-1252.
|
||||
"""
|
||||
def __init__(self, markup, override_encodings=None, is_html=False):
|
||||
self.override_encodings = override_encodings or []
|
||||
self.chardet_encoding = None
|
||||
self.is_html = is_html
|
||||
self.declared_encoding = None
|
||||
|
||||
# First order of business: strip a byte-order mark.
|
||||
self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
|
||||
|
||||
def _usable(self, encoding, tried):
|
||||
if encoding is not None:
|
||||
encoding = encoding.lower()
|
||||
if encoding not in tried:
|
||||
tried.add(encoding)
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def encodings(self):
|
||||
"""Yield a number of encodings that might work for this markup."""
|
||||
tried = set()
|
||||
for e in self.override_encodings:
|
||||
if self._usable(e, tried):
|
||||
yield e
|
||||
|
||||
# Did the document originally start with a byte-order mark
|
||||
# that indicated its encoding?
|
||||
if self._usable(self.sniffed_encoding, tried):
|
||||
yield self.sniffed_encoding
|
||||
|
||||
# Look within the document for an XML or HTML encoding
|
||||
# declaration.
|
||||
if self.declared_encoding is None:
|
||||
self.declared_encoding = self.find_declared_encoding(
|
||||
self.markup, self.is_html)
|
||||
if self._usable(self.declared_encoding, tried):
|
||||
yield self.declared_encoding
|
||||
|
||||
# Use third-party character set detection to guess at the
|
||||
# encoding.
|
||||
if self.chardet_encoding is None:
|
||||
self.chardet_encoding = chardet_dammit(self.markup)
|
||||
if self._usable(self.chardet_encoding, tried):
|
||||
yield self.chardet_encoding
|
||||
|
||||
# As a last-ditch effort, try utf-8 and windows-1252.
|
||||
for e in ('utf-8', 'windows-1252'):
|
||||
if self._usable(e, tried):
|
||||
yield e
|
||||
|
||||
@classmethod
|
||||
def strip_byte_order_mark(cls, data):
|
||||
"""If a byte-order mark is present, strip it and return the encoding it implies."""
|
||||
encoding = None
|
||||
if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
|
||||
and (data[2:4] != '\x00\x00'):
|
||||
encoding = 'utf-16be'
|
||||
data = data[2:]
|
||||
elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \
|
||||
and (data[2:4] != '\x00\x00'):
|
||||
encoding = 'utf-16le'
|
||||
data = data[2:]
|
||||
elif data[:3] == b'\xef\xbb\xbf':
|
||||
encoding = 'utf-8'
|
||||
data = data[3:]
|
||||
elif data[:4] == b'\x00\x00\xfe\xff':
|
||||
encoding = 'utf-32be'
|
||||
data = data[4:]
|
||||
elif data[:4] == b'\xff\xfe\x00\x00':
|
||||
encoding = 'utf-32le'
|
||||
data = data[4:]
|
||||
return data, encoding
|
||||
|
||||
@classmethod
|
||||
def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False):
|
||||
"""Given a document, tries to find its declared encoding.
|
||||
|
||||
An XML encoding is declared at the beginning of the document.
|
||||
|
||||
An HTML encoding is declared in a <meta> tag, hopefully near the
|
||||
beginning of the document.
|
||||
"""
|
||||
if search_entire_document:
|
||||
xml_endpos = html_endpos = len(markup)
|
||||
else:
|
||||
xml_endpos = 1024
|
||||
html_endpos = max(2048, int(len(markup) * 0.05))
|
||||
|
||||
declared_encoding = None
|
||||
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
|
||||
if not declared_encoding_match and is_html:
|
||||
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
|
||||
if declared_encoding_match is not None:
|
||||
declared_encoding = declared_encoding_match.groups()[0].decode(
|
||||
'ascii')
|
||||
if declared_encoding:
|
||||
return declared_encoding.lower()
|
||||
return None
|
||||
|
||||
class UnicodeDammit:
|
||||
"""A class for detecting the encoding of a *ML document and
|
||||
converting it to a Unicode string. If the source encoding is
|
||||
@@ -176,65 +332,48 @@ class UnicodeDammit:
|
||||
|
||||
def __init__(self, markup, override_encodings=[],
|
||||
smart_quotes_to=None, is_html=False):
|
||||
self.declared_html_encoding = None
|
||||
self.smart_quotes_to = smart_quotes_to
|
||||
self.tried_encodings = []
|
||||
self.contains_replacement_characters = False
|
||||
self.is_html = is_html
|
||||
|
||||
if markup == '' or isinstance(markup, unicode):
|
||||
self.detector = EncodingDetector(markup, override_encodings, is_html)
|
||||
|
||||
# Short-circuit if the data is in Unicode to begin with.
|
||||
if isinstance(markup, unicode) or markup == '':
|
||||
self.markup = markup
|
||||
self.unicode_markup = unicode(markup)
|
||||
self.original_encoding = None
|
||||
return
|
||||
|
||||
new_markup, document_encoding, sniffed_encoding = \
|
||||
self._detectEncoding(markup, is_html)
|
||||
self.markup = new_markup
|
||||
# The encoding detector may have stripped a byte-order mark.
|
||||
# Use the stripped markup from this point on.
|
||||
self.markup = self.detector.markup
|
||||
|
||||
u = None
|
||||
if new_markup != markup:
|
||||
# _detectEncoding modified the markup, then converted it to
|
||||
# Unicode and then to UTF-8. So convert it from UTF-8.
|
||||
u = self._convert_from("utf8")
|
||||
self.original_encoding = sniffed_encoding
|
||||
for encoding in self.detector.encodings:
|
||||
markup = self.detector.markup
|
||||
u = self._convert_from(encoding)
|
||||
if u is not None:
|
||||
break
|
||||
|
||||
if not u:
|
||||
for proposed_encoding in (
|
||||
override_encodings + [document_encoding, sniffed_encoding]):
|
||||
if proposed_encoding is not None:
|
||||
u = self._convert_from(proposed_encoding)
|
||||
if u:
|
||||
break
|
||||
# None of the encodings worked. As an absolute last resort,
|
||||
# try them again with character replacement.
|
||||
|
||||
# If no luck and we have auto-detection library, try that:
|
||||
if not u and chardet and not isinstance(self.markup, unicode):
|
||||
u = self._convert_from(chardet.detect(self.markup)['encoding'])
|
||||
|
||||
# As a last resort, try utf-8 and windows-1252:
|
||||
if not u:
|
||||
for proposed_encoding in ("utf-8", "windows-1252"):
|
||||
u = self._convert_from(proposed_encoding)
|
||||
if u:
|
||||
break
|
||||
|
||||
# As an absolute last resort, try the encodings again with
|
||||
# character replacement.
|
||||
if not u:
|
||||
for proposed_encoding in (
|
||||
override_encodings + [
|
||||
document_encoding, sniffed_encoding, "utf-8", "windows-1252"]):
|
||||
if proposed_encoding != "ascii":
|
||||
u = self._convert_from(proposed_encoding, "replace")
|
||||
for encoding in self.detector.encodings:
|
||||
if encoding != "ascii":
|
||||
u = self._convert_from(encoding, "replace")
|
||||
if u is not None:
|
||||
warnings.warn(
|
||||
UnicodeWarning(
|
||||
logging.warning(
|
||||
"Some characters could not be decoded, and were "
|
||||
"replaced with REPLACEMENT CHARACTER."))
|
||||
"replaced with REPLACEMENT CHARACTER.")
|
||||
self.contains_replacement_characters = True
|
||||
break
|
||||
|
||||
# We could at this point force it to ASCII, but that would
|
||||
# destroy so much data that I think giving up is better
|
||||
# If none of that worked, we could at this point force it to
|
||||
# ASCII, but that would destroy so much data that I think
|
||||
# giving up is better.
|
||||
self.unicode_markup = u
|
||||
if not u:
|
||||
self.original_encoding = None
|
||||
@@ -262,11 +401,10 @@ class UnicodeDammit:
|
||||
return None
|
||||
self.tried_encodings.append((proposed, errors))
|
||||
markup = self.markup
|
||||
|
||||
# Convert smart quotes to HTML if coming from an encoding
|
||||
# that might have them.
|
||||
if (self.smart_quotes_to is not None
|
||||
and proposed.lower() in self.ENCODINGS_WITH_SMART_QUOTES):
|
||||
and proposed in self.ENCODINGS_WITH_SMART_QUOTES):
|
||||
smart_quotes_re = b"([\x80-\x9f])"
|
||||
smart_quotes_compiled = re.compile(smart_quotes_re)
|
||||
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
|
||||
@@ -287,99 +425,24 @@ class UnicodeDammit:
|
||||
def _to_unicode(self, data, encoding, errors="strict"):
|
||||
'''Given a string and its encoding, decodes the string into Unicode.
|
||||
%encoding is a string recognized by encodings.aliases'''
|
||||
return unicode(data, encoding, errors)
|
||||
|
||||
# strip Byte Order Mark (if present)
|
||||
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
|
||||
and (data[2:4] != '\x00\x00'):
|
||||
encoding = 'utf-16be'
|
||||
data = data[2:]
|
||||
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
|
||||
and (data[2:4] != '\x00\x00'):
|
||||
encoding = 'utf-16le'
|
||||
data = data[2:]
|
||||
elif data[:3] == '\xef\xbb\xbf':
|
||||
encoding = 'utf-8'
|
||||
data = data[3:]
|
||||
elif data[:4] == '\x00\x00\xfe\xff':
|
||||
encoding = 'utf-32be'
|
||||
data = data[4:]
|
||||
elif data[:4] == '\xff\xfe\x00\x00':
|
||||
encoding = 'utf-32le'
|
||||
data = data[4:]
|
||||
newdata = unicode(data, encoding, errors)
|
||||
return newdata
|
||||
|
||||
def _detectEncoding(self, xml_data, is_html=False):
|
||||
"""Given a document, tries to detect its XML encoding."""
|
||||
xml_encoding = sniffed_xml_encoding = None
|
||||
try:
|
||||
if xml_data[:4] == b'\x4c\x6f\xa7\x94':
|
||||
# EBCDIC
|
||||
xml_data = self._ebcdic_to_ascii(xml_data)
|
||||
elif xml_data[:4] == b'\x00\x3c\x00\x3f':
|
||||
# UTF-16BE
|
||||
sniffed_xml_encoding = 'utf-16be'
|
||||
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
|
||||
elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xfe\xff') \
|
||||
and (xml_data[2:4] != b'\x00\x00'):
|
||||
# UTF-16BE with BOM
|
||||
sniffed_xml_encoding = 'utf-16be'
|
||||
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
|
||||
elif xml_data[:4] == b'\x3c\x00\x3f\x00':
|
||||
# UTF-16LE
|
||||
sniffed_xml_encoding = 'utf-16le'
|
||||
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
|
||||
elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xff\xfe') and \
|
||||
(xml_data[2:4] != b'\x00\x00'):
|
||||
# UTF-16LE with BOM
|
||||
sniffed_xml_encoding = 'utf-16le'
|
||||
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
|
||||
elif xml_data[:4] == b'\x00\x00\x00\x3c':
|
||||
# UTF-32BE
|
||||
sniffed_xml_encoding = 'utf-32be'
|
||||
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
|
||||
elif xml_data[:4] == b'\x3c\x00\x00\x00':
|
||||
# UTF-32LE
|
||||
sniffed_xml_encoding = 'utf-32le'
|
||||
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
|
||||
elif xml_data[:4] == b'\x00\x00\xfe\xff':
|
||||
# UTF-32BE with BOM
|
||||
sniffed_xml_encoding = 'utf-32be'
|
||||
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
|
||||
elif xml_data[:4] == b'\xff\xfe\x00\x00':
|
||||
# UTF-32LE with BOM
|
||||
sniffed_xml_encoding = 'utf-32le'
|
||||
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
|
||||
elif xml_data[:3] == b'\xef\xbb\xbf':
|
||||
# UTF-8 with BOM
|
||||
sniffed_xml_encoding = 'utf-8'
|
||||
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
|
||||
else:
|
||||
sniffed_xml_encoding = 'ascii'
|
||||
pass
|
||||
except:
|
||||
xml_encoding_match = None
|
||||
xml_encoding_match = xml_encoding_re.match(xml_data)
|
||||
if not xml_encoding_match and is_html:
|
||||
xml_encoding_match = html_meta_re.search(xml_data)
|
||||
if xml_encoding_match is not None:
|
||||
xml_encoding = xml_encoding_match.groups()[0].decode(
|
||||
'ascii').lower()
|
||||
if is_html:
|
||||
self.declared_html_encoding = xml_encoding
|
||||
if sniffed_xml_encoding and \
|
||||
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
|
||||
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
|
||||
'utf-16', 'utf-32', 'utf_16', 'utf_32',
|
||||
'utf16', 'u16')):
|
||||
xml_encoding = sniffed_xml_encoding
|
||||
return xml_data, xml_encoding, sniffed_xml_encoding
|
||||
@property
|
||||
def declared_html_encoding(self):
|
||||
if not self.is_html:
|
||||
return None
|
||||
return self.detector.declared_encoding
|
||||
|
||||
def find_codec(self, charset):
|
||||
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
|
||||
or (charset and self._codec(charset.replace("-", ""))) \
|
||||
or (charset and self._codec(charset.replace("-", "_"))) \
|
||||
value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
|
||||
or (charset and self._codec(charset.replace("-", "")))
|
||||
or (charset and self._codec(charset.replace("-", "_")))
|
||||
or (charset and charset.lower())
|
||||
or charset
|
||||
)
|
||||
if value:
|
||||
return value.lower()
|
||||
return None
|
||||
|
||||
def _codec(self, charset):
|
||||
if not charset:
|
||||
@@ -392,32 +455,6 @@ class UnicodeDammit:
|
||||
pass
|
||||
return codec
|
||||
|
||||
EBCDIC_TO_ASCII_MAP = None
|
||||
|
||||
def _ebcdic_to_ascii(self, s):
|
||||
c = self.__class__
|
||||
if not c.EBCDIC_TO_ASCII_MAP:
|
||||
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
|
||||
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
|
||||
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
|
||||
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
|
||||
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
|
||||
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
|
||||
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
|
||||
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
|
||||
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
|
||||
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
|
||||
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
|
||||
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
|
||||
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
|
||||
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
|
||||
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
|
||||
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
|
||||
250,251,252,253,254,255)
|
||||
import string
|
||||
c.EBCDIC_TO_ASCII_MAP = string.maketrans(
|
||||
''.join(map(chr, list(range(256)))), ''.join(map(chr, emap)))
|
||||
return s.translate(c.EBCDIC_TO_ASCII_MAP)
|
||||
|
||||
# A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
|
||||
MS_CHARS = {b'\x80': ('euro', '20AC'),
|
||||
|
||||
204
libs/bs4/diagnose.py
Normal file
204
libs/bs4/diagnose.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""Diagnostic functions, mainly for use when doing tech support."""
|
||||
import cProfile
|
||||
from StringIO import StringIO
|
||||
from HTMLParser import HTMLParser
|
||||
import bs4
|
||||
from bs4 import BeautifulSoup, __version__
|
||||
from bs4.builder import builder_registry
|
||||
|
||||
import os
|
||||
import pstats
|
||||
import random
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
import sys
|
||||
import cProfile
|
||||
|
||||
def diagnose(data):
|
||||
"""Diagnostic suite for isolating common problems."""
|
||||
print "Diagnostic running on Beautiful Soup %s" % __version__
|
||||
print "Python version %s" % sys.version
|
||||
|
||||
basic_parsers = ["html.parser", "html5lib", "lxml"]
|
||||
for name in basic_parsers:
|
||||
for builder in builder_registry.builders:
|
||||
if name in builder.features:
|
||||
break
|
||||
else:
|
||||
basic_parsers.remove(name)
|
||||
print (
|
||||
"I noticed that %s is not installed. Installing it may help." %
|
||||
name)
|
||||
|
||||
if 'lxml' in basic_parsers:
|
||||
basic_parsers.append(["lxml", "xml"])
|
||||
from lxml import etree
|
||||
print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))
|
||||
|
||||
if 'html5lib' in basic_parsers:
|
||||
import html5lib
|
||||
print "Found html5lib version %s" % html5lib.__version__
|
||||
|
||||
if hasattr(data, 'read'):
|
||||
data = data.read()
|
||||
elif os.path.exists(data):
|
||||
print '"%s" looks like a filename. Reading data from the file.' % data
|
||||
data = open(data).read()
|
||||
elif data.startswith("http:") or data.startswith("https:"):
|
||||
print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data
|
||||
print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup."
|
||||
return
|
||||
print
|
||||
|
||||
for parser in basic_parsers:
|
||||
print "Trying to parse your markup with %s" % parser
|
||||
success = False
|
||||
try:
|
||||
soup = BeautifulSoup(data, parser)
|
||||
success = True
|
||||
except Exception, e:
|
||||
print "%s could not parse the markup." % parser
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print "Here's what %s did with the markup:" % parser
|
||||
print soup.prettify()
|
||||
|
||||
print "-" * 80
|
||||
|
||||
def lxml_trace(data, html=True, **kwargs):
|
||||
"""Print out the lxml events that occur during parsing.
|
||||
|
||||
This lets you see how lxml parses a document when no Beautiful
|
||||
Soup code is running.
|
||||
"""
|
||||
from lxml import etree
|
||||
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
|
||||
print("%s, %4s, %s" % (event, element.tag, element.text))
|
||||
|
||||
class AnnouncingParser(HTMLParser):
|
||||
"""Announces HTMLParser parse events, without doing anything else."""
|
||||
|
||||
def _p(self, s):
|
||||
print(s)
|
||||
|
||||
def handle_starttag(self, name, attrs):
|
||||
self._p("%s START" % name)
|
||||
|
||||
def handle_endtag(self, name):
|
||||
self._p("%s END" % name)
|
||||
|
||||
def handle_data(self, data):
|
||||
self._p("%s DATA" % data)
|
||||
|
||||
def handle_charref(self, name):
|
||||
self._p("%s CHARREF" % name)
|
||||
|
||||
def handle_entityref(self, name):
|
||||
self._p("%s ENTITYREF" % name)
|
||||
|
||||
def handle_comment(self, data):
|
||||
self._p("%s COMMENT" % data)
|
||||
|
||||
def handle_decl(self, data):
|
||||
self._p("%s DECL" % data)
|
||||
|
||||
def unknown_decl(self, data):
|
||||
self._p("%s UNKNOWN-DECL" % data)
|
||||
|
||||
def handle_pi(self, data):
|
||||
self._p("%s PI" % data)
|
||||
|
||||
def htmlparser_trace(data):
|
||||
"""Print out the HTMLParser events that occur during parsing.
|
||||
|
||||
This lets you see how HTMLParser parses a document when no
|
||||
Beautiful Soup code is running.
|
||||
"""
|
||||
parser = AnnouncingParser()
|
||||
parser.feed(data)
|
||||
|
||||
_vowels = "aeiou"
|
||||
_consonants = "bcdfghjklmnpqrstvwxyz"
|
||||
|
||||
def rword(length=5):
|
||||
"Generate a random word-like string."
|
||||
s = ''
|
||||
for i in range(length):
|
||||
if i % 2 == 0:
|
||||
t = _consonants
|
||||
else:
|
||||
t = _vowels
|
||||
s += random.choice(t)
|
||||
return s
|
||||
|
||||
def rsentence(length=4):
|
||||
"Generate a random sentence-like string."
|
||||
return " ".join(rword(random.randint(4,9)) for i in range(length))
|
||||
|
||||
def rdoc(num_elements=1000):
|
||||
"""Randomly generate an invalid HTML document."""
|
||||
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
|
||||
elements = []
|
||||
for i in range(num_elements):
|
||||
choice = random.randint(0,3)
|
||||
if choice == 0:
|
||||
# New tag.
|
||||
tag_name = random.choice(tag_names)
|
||||
elements.append("<%s>" % tag_name)
|
||||
elif choice == 1:
|
||||
elements.append(rsentence(random.randint(1,4)))
|
||||
elif choice == 2:
|
||||
# Close a tag.
|
||||
tag_name = random.choice(tag_names)
|
||||
elements.append("</%s>" % tag_name)
|
||||
return "<html>" + "\n".join(elements) + "</html>"
|
||||
|
||||
def benchmark_parsers(num_elements=100000):
|
||||
"""Very basic head-to-head performance benchmark."""
|
||||
print "Comparative parser benchmark on Beautiful Soup %s" % __version__
|
||||
data = rdoc(num_elements)
|
||||
print "Generated a large invalid HTML document (%d bytes)." % len(data)
|
||||
|
||||
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
|
||||
success = False
|
||||
try:
|
||||
a = time.time()
|
||||
soup = BeautifulSoup(data, parser)
|
||||
b = time.time()
|
||||
success = True
|
||||
except Exception, e:
|
||||
print "%s could not parse the markup." % parser
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print "BS4+%s parsed the markup in %.2fs." % (parser, b-a)
|
||||
|
||||
from lxml import etree
|
||||
a = time.time()
|
||||
etree.HTML(data)
|
||||
b = time.time()
|
||||
print "Raw lxml parsed the markup in %.2fs." % (b-a)
|
||||
|
||||
import html5lib
|
||||
parser = html5lib.HTMLParser()
|
||||
a = time.time()
|
||||
parser.parse(data)
|
||||
b = time.time()
|
||||
print "Raw html5lib parsed the markup in %.2fs." % (b-a)
|
||||
|
||||
def profile(num_elements=100000, parser="lxml"):
|
||||
|
||||
filehandle = tempfile.NamedTemporaryFile()
|
||||
filename = filehandle.name
|
||||
|
||||
data = rdoc(num_elements)
|
||||
vars = dict(bs4=bs4, data=data, parser=parser)
|
||||
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
|
||||
|
||||
stats = pstats.Stats(filename)
|
||||
# stats.strip_dirs()
|
||||
stats.sort_stats("cumulative")
|
||||
stats.print_stats('_html5lib|bs4', 50)
|
||||
|
||||
if __name__ == '__main__':
|
||||
diagnose(sys.stdin.read())
|
||||
@@ -26,6 +26,9 @@ class NamespacedAttribute(unicode):
|
||||
def __new__(cls, prefix, name, namespace=None):
|
||||
if name is None:
|
||||
obj = unicode.__new__(cls, prefix)
|
||||
elif prefix is None:
|
||||
# Not really namespaced.
|
||||
obj = unicode.__new__(cls, name)
|
||||
else:
|
||||
obj = unicode.__new__(cls, prefix + ":" + name)
|
||||
obj.prefix = prefix
|
||||
@@ -78,6 +81,40 @@ class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
|
||||
return match.group(1) + encoding
|
||||
return self.CHARSET_RE.sub(rewrite, self.original_value)
|
||||
|
||||
class HTMLAwareEntitySubstitution(EntitySubstitution):
|
||||
|
||||
"""Entity substitution rules that are aware of some HTML quirks.
|
||||
|
||||
Specifically, the contents of <script> and <style> tags should not
|
||||
undergo entity substitution.
|
||||
|
||||
Incoming NavigableString objects are checked to see if they're the
|
||||
direct children of a <script> or <style> tag.
|
||||
"""
|
||||
|
||||
cdata_containing_tags = set(["script", "style"])
|
||||
|
||||
preformatted_tags = set(["pre"])
|
||||
|
||||
@classmethod
|
||||
def _substitute_if_appropriate(cls, ns, f):
|
||||
if (isinstance(ns, NavigableString)
|
||||
and ns.parent is not None
|
||||
and ns.parent.name in cls.cdata_containing_tags):
|
||||
# Do nothing.
|
||||
return ns
|
||||
# Substitute.
|
||||
return f(ns)
|
||||
|
||||
@classmethod
|
||||
def substitute_html(cls, ns):
|
||||
return cls._substitute_if_appropriate(
|
||||
ns, EntitySubstitution.substitute_html)
|
||||
|
||||
@classmethod
|
||||
def substitute_xml(cls, ns):
|
||||
return cls._substitute_if_appropriate(
|
||||
ns, EntitySubstitution.substitute_xml)
|
||||
|
||||
class PageElement(object):
|
||||
"""Contains the navigational information for some part of the page
|
||||
@@ -94,25 +131,60 @@ class PageElement(object):
|
||||
# converted to entities. This is not recommended, but it's
|
||||
# faster than "minimal".
|
||||
# A function - This function will be called on every string that
|
||||
# needs to undergo entity substition
|
||||
FORMATTERS = {
|
||||
# needs to undergo entity substitution.
|
||||
#
|
||||
|
||||
# In an HTML document, the default "html" and "minimal" functions
|
||||
# will leave the contents of <script> and <style> tags alone. For
|
||||
# an XML document, all tags will be given the same treatment.
|
||||
|
||||
HTML_FORMATTERS = {
|
||||
"html" : HTMLAwareEntitySubstitution.substitute_html,
|
||||
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
|
||||
None : None
|
||||
}
|
||||
|
||||
XML_FORMATTERS = {
|
||||
"html" : EntitySubstitution.substitute_html,
|
||||
"minimal" : EntitySubstitution.substitute_xml,
|
||||
None : None
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def format_string(self, s, formatter='minimal'):
|
||||
"""Format the given string using the given formatter."""
|
||||
if not callable(formatter):
|
||||
formatter = self.FORMATTERS.get(
|
||||
formatter, EntitySubstitution.substitute_xml)
|
||||
formatter = self._formatter_for_name(formatter)
|
||||
if formatter is None:
|
||||
output = s
|
||||
else:
|
||||
output = formatter(s)
|
||||
return output
|
||||
|
||||
@property
|
||||
def _is_xml(self):
|
||||
"""Is this element part of an XML tree or an HTML tree?
|
||||
|
||||
This is used when mapping a formatter name ("minimal") to an
|
||||
appropriate function (one that performs entity-substitution on
|
||||
the contents of <script> and <style> tags, or not). It's
|
||||
inefficient, but it should be called very rarely.
|
||||
"""
|
||||
if self.parent is None:
|
||||
# This is the top-level object. It should have .is_xml set
|
||||
# from tree creation. If not, take a guess--BS is usually
|
||||
# used on HTML markup.
|
||||
return getattr(self, 'is_xml', False)
|
||||
return self.parent._is_xml
|
||||
|
||||
def _formatter_for_name(self, name):
|
||||
"Look up a formatter function based on its name and the tree."
|
||||
if self._is_xml:
|
||||
return self.XML_FORMATTERS.get(
|
||||
name, EntitySubstitution.substitute_xml)
|
||||
else:
|
||||
return self.HTML_FORMATTERS.get(
|
||||
name, HTMLAwareEntitySubstitution.substitute_xml)
|
||||
|
||||
def setup(self, parent=None, previous_element=None):
|
||||
"""Sets up the initial relations between this element and
|
||||
other elements."""
|
||||
@@ -183,11 +255,16 @@ class PageElement(object):
|
||||
self.previous_sibling = self.next_sibling = None
|
||||
return self
|
||||
|
||||
def _last_descendant(self):
|
||||
def _last_descendant(self, is_initialized=True, accept_self=True):
|
||||
"Finds the last element beneath this object to be parsed."
|
||||
last_child = self
|
||||
while hasattr(last_child, 'contents') and last_child.contents:
|
||||
last_child = last_child.contents[-1]
|
||||
if is_initialized and self.next_sibling:
|
||||
last_child = self.next_sibling.previous_element
|
||||
else:
|
||||
last_child = self
|
||||
while isinstance(last_child, Tag) and last_child.contents:
|
||||
last_child = last_child.contents[-1]
|
||||
if not accept_self and last_child == self:
|
||||
last_child = None
|
||||
return last_child
|
||||
# BS3: Not part of the API!
|
||||
_lastRecursiveChild = _last_descendant
|
||||
@@ -222,11 +299,11 @@ class PageElement(object):
|
||||
previous_child = self.contents[position - 1]
|
||||
new_child.previous_sibling = previous_child
|
||||
new_child.previous_sibling.next_sibling = new_child
|
||||
new_child.previous_element = previous_child._last_descendant()
|
||||
new_child.previous_element = previous_child._last_descendant(False)
|
||||
if new_child.previous_element is not None:
|
||||
new_child.previous_element.next_element = new_child
|
||||
|
||||
new_childs_last_element = new_child._last_descendant()
|
||||
new_childs_last_element = new_child._last_descendant(False)
|
||||
|
||||
if position >= len(self.contents):
|
||||
new_child.next_sibling = None
|
||||
@@ -366,7 +443,7 @@ class PageElement(object):
|
||||
# NOTE: We can't use _find_one because findParents takes a different
|
||||
# set of arguments.
|
||||
r = None
|
||||
l = self.find_parents(name, attrs, 1)
|
||||
l = self.find_parents(name, attrs, 1, **kwargs)
|
||||
if l:
|
||||
r = l[0]
|
||||
return r
|
||||
@@ -403,20 +480,21 @@ class PageElement(object):
|
||||
|
||||
if isinstance(name, SoupStrainer):
|
||||
strainer = name
|
||||
elif text is None and not limit and not attrs and not kwargs:
|
||||
# Optimization to find all tags.
|
||||
if name is True or name is None:
|
||||
return [element for element in generator
|
||||
if isinstance(element, Tag)]
|
||||
# Optimization to find all tags with a given name.
|
||||
elif isinstance(name, basestring):
|
||||
return [element for element in generator
|
||||
if isinstance(element, Tag) and element.name == name]
|
||||
else:
|
||||
strainer = SoupStrainer(name, attrs, text, **kwargs)
|
||||
else:
|
||||
# Build a SoupStrainer
|
||||
strainer = SoupStrainer(name, attrs, text, **kwargs)
|
||||
|
||||
if text is None and not limit and not attrs and not kwargs:
|
||||
if name is True or name is None:
|
||||
# Optimization to find all tags.
|
||||
result = (element for element in generator
|
||||
if isinstance(element, Tag))
|
||||
return ResultSet(strainer, result)
|
||||
elif isinstance(name, basestring):
|
||||
# Optimization to find all tags with a given name.
|
||||
result = (element for element in generator
|
||||
if isinstance(element, Tag)
|
||||
and element.name == name)
|
||||
return ResultSet(strainer, result)
|
||||
results = ResultSet(strainer)
|
||||
while True:
|
||||
try:
|
||||
@@ -495,6 +573,14 @@ class PageElement(object):
|
||||
value =" ".join(value)
|
||||
return value
|
||||
|
||||
def _tag_name_matches_and(self, function, tag_name):
|
||||
if not tag_name:
|
||||
return function
|
||||
else:
|
||||
def _match(tag):
|
||||
return tag.name == tag_name and function(tag)
|
||||
return _match
|
||||
|
||||
def _attribute_checker(self, operator, attribute, value=''):
|
||||
"""Create a function that performs a CSS selector operation.
|
||||
|
||||
@@ -536,87 +622,6 @@ class PageElement(object):
|
||||
else:
|
||||
return lambda el: el.has_attr(attribute)
|
||||
|
||||
def select(self, selector):
|
||||
"""Perform a CSS selection operation on the current element."""
|
||||
tokens = selector.split()
|
||||
current_context = [self]
|
||||
for index, token in enumerate(tokens):
|
||||
if tokens[index - 1] == '>':
|
||||
# already found direct descendants in last step. skip this
|
||||
# step.
|
||||
continue
|
||||
m = self.attribselect_re.match(token)
|
||||
if m is not None:
|
||||
# Attribute selector
|
||||
tag, attribute, operator, value = m.groups()
|
||||
if not tag:
|
||||
tag = True
|
||||
checker = self._attribute_checker(operator, attribute, value)
|
||||
found = []
|
||||
for context in current_context:
|
||||
found.extend(
|
||||
[el for el in context.find_all(tag) if checker(el)])
|
||||
current_context = found
|
||||
continue
|
||||
|
||||
if '#' in token:
|
||||
# ID selector
|
||||
tag, id = token.split('#', 1)
|
||||
if tag == "":
|
||||
tag = True
|
||||
el = current_context[0].find(tag, {'id': id})
|
||||
if el is None:
|
||||
return [] # No match
|
||||
current_context = [el]
|
||||
continue
|
||||
|
||||
if '.' in token:
|
||||
# Class selector
|
||||
tag_name, klass = token.split('.', 1)
|
||||
if not tag_name:
|
||||
tag_name = True
|
||||
classes = set(klass.split('.'))
|
||||
found = []
|
||||
def classes_match(tag):
|
||||
if tag_name is not True and tag.name != tag_name:
|
||||
return False
|
||||
if not tag.has_attr('class'):
|
||||
return False
|
||||
return classes.issubset(tag['class'])
|
||||
for context in current_context:
|
||||
found.extend(context.find_all(classes_match))
|
||||
current_context = found
|
||||
continue
|
||||
|
||||
if token == '*':
|
||||
# Star selector
|
||||
found = []
|
||||
for context in current_context:
|
||||
found.extend(context.findAll(True))
|
||||
current_context = found
|
||||
continue
|
||||
|
||||
if token == '>':
|
||||
# Child selector
|
||||
tag = tokens[index + 1]
|
||||
if not tag:
|
||||
tag = True
|
||||
|
||||
found = []
|
||||
for context in current_context:
|
||||
found.extend(context.find_all(tag, recursive=False))
|
||||
current_context = found
|
||||
continue
|
||||
|
||||
# Here we should just have a regular tag
|
||||
if not self.tag_name_re.match(token):
|
||||
return []
|
||||
found = []
|
||||
for context in current_context:
|
||||
found.extend(context.findAll(token))
|
||||
current_context = found
|
||||
return current_context
|
||||
|
||||
# Old non-property versions of the generators, for backwards
|
||||
# compatibility with BS3.
|
||||
def nextGenerator(self):
|
||||
@@ -652,6 +657,9 @@ class NavigableString(unicode, PageElement):
|
||||
return unicode.__new__(cls, value)
|
||||
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
|
||||
|
||||
def __copy__(self):
|
||||
return self
|
||||
|
||||
def __getnewargs__(self):
|
||||
return (unicode(self),)
|
||||
|
||||
@@ -670,6 +678,13 @@ class NavigableString(unicode, PageElement):
|
||||
output = self.format_string(self, formatter)
|
||||
return self.PREFIX + output + self.SUFFIX
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return None
|
||||
|
||||
@name.setter
|
||||
def name(self, name):
|
||||
raise AttributeError("A NavigableString cannot be given a name.")
|
||||
|
||||
class PreformattedString(NavigableString):
|
||||
"""A NavigableString not subject to the normal formatting rules.
|
||||
@@ -709,7 +724,7 @@ class Doctype(PreformattedString):
|
||||
|
||||
@classmethod
|
||||
def for_name_and_ids(cls, name, pub_id, system_id):
|
||||
value = name
|
||||
value = name or ''
|
||||
if pub_id is not None:
|
||||
value += ' PUBLIC "%s"' % pub_id
|
||||
if system_id is not None:
|
||||
@@ -744,7 +759,7 @@ class Tag(PageElement):
|
||||
self.prefix = prefix
|
||||
if attrs is None:
|
||||
attrs = {}
|
||||
elif builder.cdata_list_attributes:
|
||||
elif attrs and builder.cdata_list_attributes:
|
||||
attrs = builder._replace_cdata_list_attribute_values(
|
||||
self.name, attrs)
|
||||
else:
|
||||
@@ -803,16 +818,24 @@ class Tag(PageElement):
|
||||
self.clear()
|
||||
self.append(string.__class__(string))
|
||||
|
||||
def _all_strings(self, strip=False):
|
||||
"""Yield all child strings, possibly stripping them."""
|
||||
def _all_strings(self, strip=False, types=(NavigableString, CData)):
|
||||
"""Yield all strings of certain classes, possibly stripping them.
|
||||
|
||||
By default, yields only NavigableString and CData objects. So
|
||||
no comments, processing instructions, etc.
|
||||
"""
|
||||
for descendant in self.descendants:
|
||||
if not isinstance(descendant, NavigableString):
|
||||
if (
|
||||
(types is None and not isinstance(descendant, NavigableString))
|
||||
or
|
||||
(types is not None and type(descendant) not in types)):
|
||||
continue
|
||||
if strip:
|
||||
descendant = descendant.strip()
|
||||
if len(descendant) == 0:
|
||||
continue
|
||||
yield descendant
|
||||
|
||||
strings = property(_all_strings)
|
||||
|
||||
@property
|
||||
@@ -820,11 +843,13 @@ class Tag(PageElement):
|
||||
for string in self._all_strings(True):
|
||||
yield string
|
||||
|
||||
def get_text(self, separator="", strip=False):
|
||||
def get_text(self, separator=u"", strip=False,
|
||||
types=(NavigableString, CData)):
|
||||
"""
|
||||
Get all child strings, concatenated using the given separator.
|
||||
"""
|
||||
return separator.join([s for s in self._all_strings(strip)])
|
||||
return separator.join([s for s in self._all_strings(
|
||||
strip, types=types)])
|
||||
getText = get_text
|
||||
text = property(get_text)
|
||||
|
||||
@@ -835,6 +860,7 @@ class Tag(PageElement):
|
||||
while i is not None:
|
||||
next = i.next_element
|
||||
i.__dict__.clear()
|
||||
i.contents = []
|
||||
i = next
|
||||
|
||||
def clear(self, decompose=False):
|
||||
@@ -966,6 +992,13 @@ class Tag(PageElement):
|
||||
u = self.decode(indent_level, encoding, formatter)
|
||||
return u.encode(encoding, errors)
|
||||
|
||||
def _should_pretty_print(self, indent_level):
|
||||
"""Should this tag be pretty-printed?"""
|
||||
return (
|
||||
indent_level is not None and
|
||||
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
|
||||
or self._is_xml))
|
||||
|
||||
def decode(self, indent_level=None,
|
||||
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
|
||||
formatter="minimal"):
|
||||
@@ -978,6 +1011,12 @@ class Tag(PageElement):
|
||||
document contains a <META> tag that mentions the document's
|
||||
encoding.
|
||||
"""
|
||||
|
||||
# First off, turn a string formatter into a function. This
|
||||
# will stop the lookup from happening over and over again.
|
||||
if not callable(formatter):
|
||||
formatter = self._formatter_for_name(formatter)
|
||||
|
||||
attrs = []
|
||||
if self.attrs:
|
||||
for key, val in sorted(self.attrs.items()):
|
||||
@@ -987,7 +1026,7 @@ class Tag(PageElement):
|
||||
if isinstance(val, list) or isinstance(val, tuple):
|
||||
val = ' '.join(val)
|
||||
elif not isinstance(val, basestring):
|
||||
val = str(val)
|
||||
val = unicode(val)
|
||||
elif (
|
||||
isinstance(val, AttributeValueWithCharsetSubstitution)
|
||||
and eventual_encoding is not None):
|
||||
@@ -995,26 +1034,30 @@ class Tag(PageElement):
|
||||
|
||||
text = self.format_string(val, formatter)
|
||||
decoded = (
|
||||
str(key) + '='
|
||||
unicode(key) + '='
|
||||
+ EntitySubstitution.quoted_attribute_value(text))
|
||||
attrs.append(decoded)
|
||||
close = ''
|
||||
closeTag = ''
|
||||
if self.is_empty_element:
|
||||
close = '/'
|
||||
else:
|
||||
closeTag = '</%s>' % self.name
|
||||
|
||||
prefix = ''
|
||||
if self.prefix:
|
||||
prefix = self.prefix + ":"
|
||||
|
||||
pretty_print = (indent_level is not None)
|
||||
if self.is_empty_element:
|
||||
close = '/'
|
||||
else:
|
||||
closeTag = '</%s%s>' % (prefix, self.name)
|
||||
|
||||
pretty_print = self._should_pretty_print(indent_level)
|
||||
space = ''
|
||||
indent_space = ''
|
||||
if indent_level is not None:
|
||||
indent_space = (' ' * (indent_level - 1))
|
||||
if pretty_print:
|
||||
space = (' ' * (indent_level - 1))
|
||||
space = indent_space
|
||||
indent_contents = indent_level + 1
|
||||
else:
|
||||
space = ''
|
||||
indent_contents = None
|
||||
contents = self.decode_contents(
|
||||
indent_contents, eventual_encoding, formatter)
|
||||
@@ -1027,8 +1070,10 @@ class Tag(PageElement):
|
||||
attribute_string = ''
|
||||
if attrs:
|
||||
attribute_string = ' ' + ' '.join(attrs)
|
||||
if pretty_print:
|
||||
s.append(space)
|
||||
if indent_level is not None:
|
||||
# Even if this particular tag is not pretty-printed,
|
||||
# we should indent up to the start of the tag.
|
||||
s.append(indent_space)
|
||||
s.append('<%s%s%s%s>' % (
|
||||
prefix, self.name, attribute_string, close))
|
||||
if pretty_print:
|
||||
@@ -1039,7 +1084,10 @@ class Tag(PageElement):
|
||||
if pretty_print and closeTag:
|
||||
s.append(space)
|
||||
s.append(closeTag)
|
||||
if pretty_print and closeTag and self.next_sibling:
|
||||
if indent_level is not None and closeTag and self.next_sibling:
|
||||
# Even if this particular tag is not pretty-printed,
|
||||
# we're now done with the tag, and we should add a
|
||||
# newline if appropriate.
|
||||
s.append("\n")
|
||||
s = ''.join(s)
|
||||
return s
|
||||
@@ -1062,6 +1110,11 @@ class Tag(PageElement):
|
||||
document contains a <META> tag that mentions the document's
|
||||
encoding.
|
||||
"""
|
||||
# First off, turn a string formatter into a function. This
|
||||
# will stop the lookup from happening over and over again.
|
||||
if not callable(formatter):
|
||||
formatter = self._formatter_for_name(formatter)
|
||||
|
||||
pretty_print = (indent_level is not None)
|
||||
s = []
|
||||
for c in self:
|
||||
@@ -1071,13 +1124,13 @@ class Tag(PageElement):
|
||||
elif isinstance(c, Tag):
|
||||
s.append(c.decode(indent_level, eventual_encoding,
|
||||
formatter))
|
||||
if text and indent_level:
|
||||
if text and indent_level and not self.name == 'pre':
|
||||
text = text.strip()
|
||||
if text:
|
||||
if pretty_print:
|
||||
if pretty_print and not self.name == 'pre':
|
||||
s.append(" " * (indent_level - 1))
|
||||
s.append(text)
|
||||
if pretty_print:
|
||||
if pretty_print and not self.name == 'pre':
|
||||
s.append("\n")
|
||||
return ''.join(s)
|
||||
|
||||
@@ -1120,6 +1173,7 @@ class Tag(PageElement):
|
||||
callable that takes a string and returns whether or not the
|
||||
string matches for some custom definition of 'matches'. The
|
||||
same is true of the tag name."""
|
||||
|
||||
generator = self.descendants
|
||||
if not recursive:
|
||||
generator = self.children
|
||||
@@ -1143,6 +1197,207 @@ class Tag(PageElement):
|
||||
yield current
|
||||
current = current.next_element
|
||||
|
||||
# CSS selector code
|
||||
|
||||
_selector_combinators = ['>', '+', '~']
|
||||
_select_debug = False
|
||||
def select(self, selector, _candidate_generator=None):
|
||||
"""Perform a CSS selection operation on the current element."""
|
||||
tokens = selector.split()
|
||||
current_context = [self]
|
||||
|
||||
if tokens[-1] in self._selector_combinators:
|
||||
raise ValueError(
|
||||
'Final combinator "%s" is missing an argument.' % tokens[-1])
|
||||
if self._select_debug:
|
||||
print 'Running CSS selector "%s"' % selector
|
||||
for index, token in enumerate(tokens):
|
||||
if self._select_debug:
|
||||
print ' Considering token "%s"' % token
|
||||
recursive_candidate_generator = None
|
||||
tag_name = None
|
||||
if tokens[index-1] in self._selector_combinators:
|
||||
# This token was consumed by the previous combinator. Skip it.
|
||||
if self._select_debug:
|
||||
print ' Token was consumed by the previous combinator.'
|
||||
continue
|
||||
# Each operation corresponds to a checker function, a rule
|
||||
# for determining whether a candidate matches the
|
||||
# selector. Candidates are generated by the active
|
||||
# iterator.
|
||||
checker = None
|
||||
|
||||
m = self.attribselect_re.match(token)
|
||||
if m is not None:
|
||||
# Attribute selector
|
||||
tag_name, attribute, operator, value = m.groups()
|
||||
checker = self._attribute_checker(operator, attribute, value)
|
||||
|
||||
elif '#' in token:
|
||||
# ID selector
|
||||
tag_name, tag_id = token.split('#', 1)
|
||||
def id_matches(tag):
|
||||
return tag.get('id', None) == tag_id
|
||||
checker = id_matches
|
||||
|
||||
elif '.' in token:
|
||||
# Class selector
|
||||
tag_name, klass = token.split('.', 1)
|
||||
classes = set(klass.split('.'))
|
||||
def classes_match(candidate):
|
||||
return classes.issubset(candidate.get('class', []))
|
||||
checker = classes_match
|
||||
|
||||
elif ':' in token:
|
||||
# Pseudo-class
|
||||
tag_name, pseudo = token.split(':', 1)
|
||||
if tag_name == '':
|
||||
raise ValueError(
|
||||
"A pseudo-class must be prefixed with a tag name.")
|
||||
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
|
||||
found = []
|
||||
if pseudo_attributes is not None:
|
||||
pseudo_type, pseudo_value = pseudo_attributes.groups()
|
||||
if pseudo_type == 'nth-of-type':
|
||||
try:
|
||||
pseudo_value = int(pseudo_value)
|
||||
except:
|
||||
raise NotImplementedError(
|
||||
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
|
||||
if pseudo_value < 1:
|
||||
raise ValueError(
|
||||
'nth-of-type pseudo-class value must be at least 1.')
|
||||
class Counter(object):
|
||||
def __init__(self, destination):
|
||||
self.count = 0
|
||||
self.destination = destination
|
||||
|
||||
def nth_child_of_type(self, tag):
|
||||
self.count += 1
|
||||
if self.count == self.destination:
|
||||
return True
|
||||
if self.count > self.destination:
|
||||
# Stop the generator that's sending us
|
||||
# these things.
|
||||
raise StopIteration()
|
||||
return False
|
||||
checker = Counter(pseudo_value).nth_child_of_type
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'Only the following pseudo-classes are implemented: nth-of-type.')
|
||||
|
||||
elif token == '*':
|
||||
# Star selector -- matches everything
|
||||
pass
|
||||
elif token == '>':
|
||||
# Run the next token as a CSS selector against the
|
||||
# direct children of each tag in the current context.
|
||||
recursive_candidate_generator = lambda tag: tag.children
|
||||
elif token == '~':
|
||||
# Run the next token as a CSS selector against the
|
||||
# siblings of each tag in the current context.
|
||||
recursive_candidate_generator = lambda tag: tag.next_siblings
|
||||
elif token == '+':
|
||||
# For each tag in the current context, run the next
|
||||
# token as a CSS selector against the tag's next
|
||||
# sibling that's a tag.
|
||||
def next_tag_sibling(tag):
|
||||
yield tag.find_next_sibling(True)
|
||||
recursive_candidate_generator = next_tag_sibling
|
||||
|
||||
elif self.tag_name_re.match(token):
|
||||
# Just a tag name.
|
||||
tag_name = token
|
||||
else:
|
||||
raise ValueError(
|
||||
'Unsupported or invalid CSS selector: "%s"' % token)
|
||||
|
||||
if recursive_candidate_generator:
|
||||
# This happens when the selector looks like "> foo".
|
||||
#
|
||||
# The generator calls select() recursively on every
|
||||
# member of the current context, passing in a different
|
||||
# candidate generator and a different selector.
|
||||
#
|
||||
# In the case of "> foo", the candidate generator is
|
||||
# one that yields a tag's direct children (">"), and
|
||||
# the selector is "foo".
|
||||
next_token = tokens[index+1]
|
||||
def recursive_select(tag):
|
||||
if self._select_debug:
|
||||
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
|
||||
print '-' * 40
|
||||
for i in tag.select(next_token, recursive_candidate_generator):
|
||||
if self._select_debug:
|
||||
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
|
||||
yield i
|
||||
if self._select_debug:
|
||||
print '-' * 40
|
||||
_use_candidate_generator = recursive_select
|
||||
elif _candidate_generator is None:
|
||||
# By default, a tag's candidates are all of its
|
||||
# children. If tag_name is defined, only yield tags
|
||||
# with that name.
|
||||
if self._select_debug:
|
||||
if tag_name:
|
||||
check = "[any]"
|
||||
else:
|
||||
check = tag_name
|
||||
print ' Default candidate generator, tag name="%s"' % check
|
||||
if self._select_debug:
|
||||
# This is redundant with later code, but it stops
|
||||
# a bunch of bogus tags from cluttering up the
|
||||
# debug log.
|
||||
def default_candidate_generator(tag):
|
||||
for child in tag.descendants:
|
||||
if not isinstance(child, Tag):
|
||||
continue
|
||||
if tag_name and not child.name == tag_name:
|
||||
continue
|
||||
yield child
|
||||
_use_candidate_generator = default_candidate_generator
|
||||
else:
|
||||
_use_candidate_generator = lambda tag: tag.descendants
|
||||
else:
|
||||
_use_candidate_generator = _candidate_generator
|
||||
|
||||
new_context = []
|
||||
new_context_ids = set([])
|
||||
for tag in current_context:
|
||||
if self._select_debug:
|
||||
print " Running candidate generator on %s %s" % (
|
||||
tag.name, repr(tag.attrs))
|
||||
for candidate in _use_candidate_generator(tag):
|
||||
if not isinstance(candidate, Tag):
|
||||
continue
|
||||
if tag_name and candidate.name != tag_name:
|
||||
continue
|
||||
if checker is not None:
|
||||
try:
|
||||
result = checker(candidate)
|
||||
except StopIteration:
|
||||
# The checker has decided we should no longer
|
||||
# run the generator.
|
||||
break
|
||||
if checker is None or result:
|
||||
if self._select_debug:
|
||||
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
|
||||
if id(candidate) not in new_context_ids:
|
||||
# If a tag matches a selector more than once,
|
||||
# don't include it in the context more than once.
|
||||
new_context.append(candidate)
|
||||
new_context_ids.add(id(candidate))
|
||||
elif self._select_debug:
|
||||
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
|
||||
|
||||
current_context = new_context
|
||||
|
||||
if self._select_debug:
|
||||
print "Final verdict:"
|
||||
for i in current_context:
|
||||
print " %s %s" % (i.name, i.attrs)
|
||||
return current_context
|
||||
|
||||
# Old names for backwards compatibility
|
||||
def childGenerator(self):
|
||||
return self.children
|
||||
@@ -1150,10 +1405,13 @@ class Tag(PageElement):
|
||||
def recursiveChildGenerator(self):
|
||||
return self.descendants
|
||||
|
||||
# This was kind of misleading because has_key() (attributes) was
|
||||
# different from __in__ (contents). has_key() is gone in Python 3,
|
||||
# anyway.
|
||||
has_key = has_attr
|
||||
def has_key(self, key):
|
||||
"""This was kind of misleading because has_key() (attributes)
|
||||
was different from __in__ (contents). has_key() is gone in
|
||||
Python 3, anyway."""
|
||||
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
|
||||
key))
|
||||
return self.has_attr(key)
|
||||
|
||||
# Next, a couple classes to represent queries and their results.
|
||||
class SoupStrainer(object):
|
||||
@@ -1168,6 +1426,12 @@ class SoupStrainer(object):
|
||||
kwargs['class'] = attrs
|
||||
attrs = None
|
||||
|
||||
if 'class_' in kwargs:
|
||||
# Treat class_="foo" as a search for the 'class'
|
||||
# attribute, overriding any non-dict value for attrs.
|
||||
kwargs['class'] = kwargs['class_']
|
||||
del kwargs['class_']
|
||||
|
||||
if kwargs:
|
||||
if attrs:
|
||||
attrs = attrs.copy()
|
||||
@@ -1342,6 +1606,6 @@ class SoupStrainer(object):
|
||||
class ResultSet(list):
|
||||
"""A ResultSet is just a list that keeps track of the SoupStrainer
|
||||
that created it."""
|
||||
def __init__(self, source):
|
||||
list.__init__([])
|
||||
def __init__(self, source, result=()):
|
||||
super(ResultSet, self).__init__(result)
|
||||
self.source = source
|
||||
|
||||
@@ -81,6 +81,11 @@ class HTMLTreeBuilderSmokeTest(object):
|
||||
self.assertDoctypeHandled(
|
||||
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
|
||||
|
||||
def test_empty_doctype(self):
|
||||
soup = self.soup("<!DOCTYPE>")
|
||||
doctype = soup.contents[0]
|
||||
self.assertEqual("", doctype.strip())
|
||||
|
||||
def test_public_doctype_with_url(self):
|
||||
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
|
||||
self.assertDoctypeHandled(doctype)
|
||||
@@ -159,6 +164,12 @@ class HTMLTreeBuilderSmokeTest(object):
|
||||
comment = soup.find(text="foobar")
|
||||
self.assertEqual(comment.__class__, Comment)
|
||||
|
||||
# The comment is properly integrated into the tree.
|
||||
foo = soup.find(text="foo")
|
||||
self.assertEqual(comment, foo.next_element)
|
||||
baz = soup.find(text="baz")
|
||||
self.assertEqual(comment, baz.previous_element)
|
||||
|
||||
def test_preserved_whitespace_in_pre_and_textarea(self):
|
||||
"""Whitespace must be preserved in <pre> and <textarea> tags."""
|
||||
self.assertSoupEquals("<pre> </pre>")
|
||||
@@ -202,6 +213,14 @@ class HTMLTreeBuilderSmokeTest(object):
|
||||
"<tbody><tr><td>Bar</td></tr></tbody>"
|
||||
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
|
||||
|
||||
def test_deeply_nested_multivalued_attribute(self):
|
||||
# html5lib can set the attributes of the same tag many times
|
||||
# as it rearranges the tree. This has caused problems with
|
||||
# multivalued attributes.
|
||||
markup = '<table><div><div class="css"></div></div></table>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(["css"], soup.div.div['class'])
|
||||
|
||||
def test_angle_brackets_in_attribute_values_are_escaped(self):
|
||||
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
|
||||
|
||||
@@ -209,12 +228,14 @@ class HTMLTreeBuilderSmokeTest(object):
|
||||
expect = u'<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
|
||||
def test_entities_in_text_converted_to_unicode(self):
|
||||
expect = u'<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
|
||||
def test_quot_entity_converted_to_quotation_mark(self):
|
||||
@@ -227,6 +248,12 @@ class HTMLTreeBuilderSmokeTest(object):
|
||||
self.assertSoupEquals("�", expect)
|
||||
self.assertSoupEquals("�", expect)
|
||||
|
||||
def test_multipart_strings(self):
|
||||
"Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
|
||||
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
|
||||
self.assertEqual("p", soup.h2.string.next_element.name)
|
||||
self.assertEqual("p", soup.p.name)
|
||||
|
||||
def test_basic_namespaces(self):
|
||||
"""Parsers don't need to *understand* namespaces, but at the
|
||||
very least they should not choke on namespaces or lose
|
||||
@@ -254,6 +281,14 @@ class HTMLTreeBuilderSmokeTest(object):
|
||||
# to detect any differences between them.
|
||||
#
|
||||
|
||||
def test_can_parse_unicode_document(self):
|
||||
# A seemingly innocuous document... but it's in Unicode! And
|
||||
# it contains characters that can't be represented in the
|
||||
# encoding found in the declaration! The horror!
|
||||
markup = u'<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string)
|
||||
|
||||
def test_soupstrainer(self):
|
||||
"""Parsers should be able to work with SoupStrainers."""
|
||||
strainer = SoupStrainer("b")
|
||||
@@ -445,6 +480,28 @@ class XMLTreeBuilderSmokeTest(object):
|
||||
self.assertEqual(
|
||||
soup.encode("utf-8"), markup)
|
||||
|
||||
def test_formatter_processes_script_tag_for_xml_documents(self):
|
||||
doc = """
|
||||
<script type="text/javascript">
|
||||
</script>
|
||||
"""
|
||||
soup = BeautifulSoup(doc, "xml")
|
||||
# lxml would have stripped this while parsing, but we can add
|
||||
# it later.
|
||||
soup.script.string = 'console.log("< < hey > > ");'
|
||||
encoded = soup.encode()
|
||||
self.assertTrue(b"< < hey > >" in encoded)
|
||||
|
||||
def test_can_parse_unicode_document(self):
|
||||
markup = u'<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string)
|
||||
|
||||
def test_popping_namespaced_tag(self):
|
||||
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
unicode(soup.rss), markup)
|
||||
|
||||
def test_docstring_includes_correct_encoding(self):
|
||||
soup = self.soup("<root/>")
|
||||
@@ -472,6 +529,20 @@ class XMLTreeBuilderSmokeTest(object):
|
||||
self.assertEqual("http://example.com/", root['xmlns:a'])
|
||||
self.assertEqual("http://example.net/", root['xmlns:b'])
|
||||
|
||||
def test_closing_namespaced_tag(self):
|
||||
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(unicode(soup.p), markup)
|
||||
|
||||
def test_namespaced_attributes(self):
|
||||
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(unicode(soup.foo), markup)
|
||||
|
||||
def test_namespaced_attributes_xml_namespace(self):
|
||||
markup = '<foo xml:lang="fr">bar</foo>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(unicode(soup.foo), markup)
|
||||
|
||||
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
|
||||
"""Smoke test for a tree builder that supports HTML5."""
|
||||
@@ -501,6 +572,12 @@ class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
|
||||
self.assertEqual(namespace, soup.math.namespace)
|
||||
self.assertEqual(namespace, soup.msqrt.namespace)
|
||||
|
||||
def test_xml_declaration_becomes_comment(self):
|
||||
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertTrue(isinstance(soup.contents[0], Comment))
|
||||
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
|
||||
self.assertEqual("html", soup.contents[0].next_element.name)
|
||||
|
||||
def skipIf(condition, reason):
|
||||
def nothing(test, *args, **kwargs):
|
||||
|
||||
@@ -1,509 +0,0 @@
|
||||
import re
|
||||
import hashlib
|
||||
import time
|
||||
import StringIO
|
||||
|
||||
__version__ = '0.8'
|
||||
|
||||
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
|
||||
GNTP_INFO_LINE = re.compile(
|
||||
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' +
|
||||
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?' +
|
||||
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
GNTP_INFO_LINE_SHORT = re.compile(
|
||||
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
GNTP_HEADER = re.compile('([\w-]+):(.+)')
|
||||
|
||||
GNTP_EOL = '\r\n'
|
||||
|
||||
|
||||
class BaseError(Exception):
|
||||
def gntp_error(self):
|
||||
error = GNTPError(self.errorcode, self.errordesc)
|
||||
return error.encode()
|
||||
|
||||
|
||||
class ParseError(BaseError):
|
||||
errorcode = 500
|
||||
errordesc = 'Error parsing the message'
|
||||
|
||||
|
||||
class AuthError(BaseError):
|
||||
errorcode = 400
|
||||
errordesc = 'Error with authorization'
|
||||
|
||||
|
||||
class UnsupportedError(BaseError):
|
||||
errorcode = 500
|
||||
errordesc = 'Currently unsupported by gntp.py'
|
||||
|
||||
|
||||
class _GNTPBuffer(StringIO.StringIO):
|
||||
"""GNTP Buffer class"""
|
||||
def writefmt(self, message = "", *args):
|
||||
"""Shortcut function for writing GNTP Headers"""
|
||||
self.write((message % args).encode('utf8', 'replace'))
|
||||
self.write(GNTP_EOL)
|
||||
|
||||
|
||||
class _GNTPBase(object):
|
||||
"""Base initilization
|
||||
|
||||
:param string messagetype: GNTP Message type
|
||||
:param string version: GNTP Protocol version
|
||||
:param string encription: Encryption protocol
|
||||
"""
|
||||
def __init__(self, messagetype = None, version = '1.0', encryption = None):
|
||||
self.info = {
|
||||
'version': version,
|
||||
'messagetype': messagetype,
|
||||
'encryptionAlgorithmID': encryption
|
||||
}
|
||||
self.headers = {}
|
||||
self.resources = {}
|
||||
|
||||
def __str__(self):
|
||||
return self.encode()
|
||||
|
||||
def _parse_info(self, data):
|
||||
"""Parse the first line of a GNTP message to get security and other info values
|
||||
|
||||
:param string data: GNTP Message
|
||||
:return dict: Parsed GNTP Info line
|
||||
"""
|
||||
|
||||
match = GNTP_INFO_LINE.match(data)
|
||||
|
||||
if not match:
|
||||
raise ParseError('ERROR_PARSING_INFO_LINE')
|
||||
|
||||
info = match.groupdict()
|
||||
if info['encryptionAlgorithmID'] == 'NONE':
|
||||
info['encryptionAlgorithmID'] = None
|
||||
|
||||
return info
|
||||
|
||||
def set_password(self, password, encryptAlgo = 'MD5'):
|
||||
"""Set a password for a GNTP Message
|
||||
|
||||
:param string password: Null to clear password
|
||||
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
|
||||
"""
|
||||
hash = {
|
||||
'MD5': hashlib.md5,
|
||||
'SHA1': hashlib.sha1,
|
||||
'SHA256': hashlib.sha256,
|
||||
'SHA512': hashlib.sha512,
|
||||
}
|
||||
|
||||
self.password = password
|
||||
self.encryptAlgo = encryptAlgo.upper()
|
||||
if not password:
|
||||
self.info['encryptionAlgorithmID'] = None
|
||||
self.info['keyHashAlgorithm'] = None
|
||||
return
|
||||
if not self.encryptAlgo in hash.keys():
|
||||
raise UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
|
||||
|
||||
hashfunction = hash.get(self.encryptAlgo)
|
||||
|
||||
password = password.encode('utf8')
|
||||
seed = time.ctime()
|
||||
salt = hashfunction(seed).hexdigest()
|
||||
saltHash = hashfunction(seed).digest()
|
||||
keyBasis = password + saltHash
|
||||
key = hashfunction(keyBasis).digest()
|
||||
keyHash = hashfunction(key).hexdigest()
|
||||
|
||||
self.info['keyHashAlgorithmID'] = self.encryptAlgo
|
||||
self.info['keyHash'] = keyHash.upper()
|
||||
self.info['salt'] = salt.upper()
|
||||
|
||||
def _decode_hex(self, value):
|
||||
"""Helper function to decode hex string to `proper` hex string
|
||||
|
||||
:param string value: Human readable hex string
|
||||
:return string: Hex string
|
||||
"""
|
||||
result = ''
|
||||
for i in range(0, len(value), 2):
|
||||
tmp = int(value[i:i + 2], 16)
|
||||
result += chr(tmp)
|
||||
return result
|
||||
|
||||
def _decode_binary(self, rawIdentifier, identifier):
|
||||
rawIdentifier += '\r\n\r\n'
|
||||
dataLength = int(identifier['Length'])
|
||||
pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier)
|
||||
pointerEnd = pointerStart + dataLength
|
||||
data = self.raw[pointerStart:pointerEnd]
|
||||
if not len(data) == dataLength:
|
||||
raise ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data)))
|
||||
return data
|
||||
|
||||
def _validate_password(self, password):
|
||||
"""Validate GNTP Message against stored password"""
|
||||
self.password = password
|
||||
if password == None:
|
||||
raise AuthError('Missing password')
|
||||
keyHash = self.info.get('keyHash', None)
|
||||
if keyHash is None and self.password is None:
|
||||
return True
|
||||
if keyHash is None:
|
||||
raise AuthError('Invalid keyHash')
|
||||
if self.password is None:
|
||||
raise AuthError('Missing password')
|
||||
|
||||
password = self.password.encode('utf8')
|
||||
saltHash = self._decode_hex(self.info['salt'])
|
||||
|
||||
keyBasis = password + saltHash
|
||||
key = hashlib.md5(keyBasis).digest()
|
||||
keyHash = hashlib.md5(key).hexdigest()
|
||||
|
||||
if not keyHash.upper() == self.info['keyHash'].upper():
|
||||
raise AuthError('Invalid Hash')
|
||||
return True
|
||||
|
||||
def validate(self):
|
||||
"""Verify required headers"""
|
||||
for header in self._requiredHeaders:
|
||||
if not self.headers.get(header, False):
|
||||
raise ParseError('Missing Notification Header: ' + header)
|
||||
|
||||
def _format_info(self):
|
||||
"""Generate info line for GNTP Message
|
||||
|
||||
:return string:
|
||||
"""
|
||||
info = u'GNTP/%s %s' % (
|
||||
self.info.get('version'),
|
||||
self.info.get('messagetype'),
|
||||
)
|
||||
if self.info.get('encryptionAlgorithmID', None):
|
||||
info += ' %s:%s' % (
|
||||
self.info.get('encryptionAlgorithmID'),
|
||||
self.info.get('ivValue'),
|
||||
)
|
||||
else:
|
||||
info += ' NONE'
|
||||
|
||||
if self.info.get('keyHashAlgorithmID', None):
|
||||
info += ' %s:%s.%s' % (
|
||||
self.info.get('keyHashAlgorithmID'),
|
||||
self.info.get('keyHash'),
|
||||
self.info.get('salt')
|
||||
)
|
||||
|
||||
return info
|
||||
|
||||
def _parse_dict(self, data):
|
||||
"""Helper function to parse blocks of GNTP headers into a dictionary
|
||||
|
||||
:param string data:
|
||||
:return dict:
|
||||
"""
|
||||
dict = {}
|
||||
for line in data.split('\r\n'):
|
||||
match = GNTP_HEADER.match(line)
|
||||
if not match:
|
||||
continue
|
||||
|
||||
key = unicode(match.group(1).strip(), 'utf8', 'replace')
|
||||
val = unicode(match.group(2).strip(), 'utf8', 'replace')
|
||||
dict[key] = val
|
||||
return dict
|
||||
|
||||
def add_header(self, key, value):
|
||||
if isinstance(value, unicode):
|
||||
self.headers[key] = value
|
||||
else:
|
||||
self.headers[key] = unicode('%s' % value, 'utf8', 'replace')
|
||||
|
||||
def add_resource(self, data):
|
||||
"""Add binary resource
|
||||
|
||||
:param string data: Binary Data
|
||||
"""
|
||||
identifier = hashlib.md5(data).hexdigest()
|
||||
self.resources[identifier] = data
|
||||
return 'x-growl-resource://%s' % identifier
|
||||
|
||||
def decode(self, data, password = None):
|
||||
"""Decode GNTP Message
|
||||
|
||||
:param string data:
|
||||
"""
|
||||
self.password = password
|
||||
self.raw = data
|
||||
parts = self.raw.split('\r\n\r\n')
|
||||
self.info = self._parse_info(data)
|
||||
self.headers = self._parse_dict(parts[0])
|
||||
|
||||
def encode(self):
|
||||
"""Encode a generic GNTP Message
|
||||
|
||||
:return string: GNTP Message ready to be sent
|
||||
"""
|
||||
|
||||
buffer = _GNTPBuffer()
|
||||
|
||||
buffer.writefmt(self._format_info())
|
||||
|
||||
#Headers
|
||||
for k, v in self.headers.iteritems():
|
||||
buffer.writefmt('%s: %s', k, v)
|
||||
buffer.writefmt()
|
||||
|
||||
#Resources
|
||||
for resource, data in self.resources.iteritems():
|
||||
buffer.writefmt('Identifier: %s', resource)
|
||||
buffer.writefmt('Length: %d', len(data))
|
||||
buffer.writefmt()
|
||||
buffer.write(data)
|
||||
buffer.writefmt()
|
||||
buffer.writefmt()
|
||||
|
||||
return buffer.getvalue()
|
||||
|
||||
|
||||
class GNTPRegister(_GNTPBase):
|
||||
"""Represents a GNTP Registration Command
|
||||
|
||||
:param string data: (Optional) See decode()
|
||||
:param string password: (Optional) Password to use while encoding/decoding messages
|
||||
"""
|
||||
_requiredHeaders = [
|
||||
'Application-Name',
|
||||
'Notifications-Count'
|
||||
]
|
||||
_requiredNotificationHeaders = ['Notification-Name']
|
||||
|
||||
def __init__(self, data = None, password = None):
|
||||
_GNTPBase.__init__(self, 'REGISTER')
|
||||
self.notifications = []
|
||||
|
||||
if data:
|
||||
self.decode(data, password)
|
||||
else:
|
||||
self.set_password(password)
|
||||
self.add_header('Application-Name', 'pygntp')
|
||||
self.add_header('Notifications-Count', 0)
|
||||
|
||||
def validate(self):
|
||||
'''Validate required headers and validate notification headers'''
|
||||
for header in self._requiredHeaders:
|
||||
if not self.headers.get(header, False):
|
||||
raise ParseError('Missing Registration Header: ' + header)
|
||||
for notice in self.notifications:
|
||||
for header in self._requiredNotificationHeaders:
|
||||
if not notice.get(header, False):
|
||||
raise ParseError('Missing Notification Header: ' + header)
|
||||
|
||||
def decode(self, data, password):
|
||||
"""Decode existing GNTP Registration message
|
||||
|
||||
:param string data: Message to decode
|
||||
"""
|
||||
self.raw = data
|
||||
parts = self.raw.split('\r\n\r\n')
|
||||
self.info = self._parse_info(data)
|
||||
self._validate_password(password)
|
||||
self.headers = self._parse_dict(parts[0])
|
||||
|
||||
for i, part in enumerate(parts):
|
||||
if i == 0:
|
||||
continue # Skip Header
|
||||
if part.strip() == '':
|
||||
continue
|
||||
notice = self._parse_dict(part)
|
||||
if notice.get('Notification-Name', False):
|
||||
self.notifications.append(notice)
|
||||
elif notice.get('Identifier', False):
|
||||
notice['Data'] = self._decode_binary(part, notice)
|
||||
#open('register.png','wblol').write(notice['Data'])
|
||||
self.resources[notice.get('Identifier')] = notice
|
||||
|
||||
def add_notification(self, name, enabled = True):
|
||||
"""Add new Notification to Registration message
|
||||
|
||||
:param string name: Notification Name
|
||||
:param boolean enabled: Enable this notification by default
|
||||
"""
|
||||
notice = {}
|
||||
notice['Notification-Name'] = u'%s' % name
|
||||
notice['Notification-Enabled'] = u'%s' % enabled
|
||||
|
||||
self.notifications.append(notice)
|
||||
self.add_header('Notifications-Count', len(self.notifications))
|
||||
|
||||
def encode(self):
|
||||
"""Encode a GNTP Registration Message
|
||||
|
||||
:return string: Encoded GNTP Registration message
|
||||
"""
|
||||
|
||||
buffer = _GNTPBuffer()
|
||||
|
||||
buffer.writefmt(self._format_info())
|
||||
|
||||
#Headers
|
||||
for k, v in self.headers.iteritems():
|
||||
buffer.writefmt('%s: %s', k, v)
|
||||
buffer.writefmt()
|
||||
|
||||
#Notifications
|
||||
if len(self.notifications) > 0:
|
||||
for notice in self.notifications:
|
||||
for k, v in notice.iteritems():
|
||||
buffer.writefmt('%s: %s', k, v)
|
||||
buffer.writefmt()
|
||||
|
||||
#Resources
|
||||
for resource, data in self.resources.iteritems():
|
||||
buffer.writefmt('Identifier: %s', resource)
|
||||
buffer.writefmt('Length: %d', len(data))
|
||||
buffer.writefmt()
|
||||
buffer.write(data)
|
||||
buffer.writefmt()
|
||||
buffer.writefmt()
|
||||
|
||||
return buffer.getvalue()
|
||||
|
||||
|
||||
class GNTPNotice(_GNTPBase):
|
||||
"""Represents a GNTP Notification Command
|
||||
|
||||
:param string data: (Optional) See decode()
|
||||
:param string app: (Optional) Set Application-Name
|
||||
:param string name: (Optional) Set Notification-Name
|
||||
:param string title: (Optional) Set Notification Title
|
||||
:param string password: (Optional) Password to use while encoding/decoding messages
|
||||
"""
|
||||
_requiredHeaders = [
|
||||
'Application-Name',
|
||||
'Notification-Name',
|
||||
'Notification-Title'
|
||||
]
|
||||
|
||||
def __init__(self, data = None, app = None, name = None, title = None, password = None):
|
||||
_GNTPBase.__init__(self, 'NOTIFY')
|
||||
|
||||
if data:
|
||||
self.decode(data, password)
|
||||
else:
|
||||
self.set_password(password)
|
||||
if app:
|
||||
self.add_header('Application-Name', app)
|
||||
if name:
|
||||
self.add_header('Notification-Name', name)
|
||||
if title:
|
||||
self.add_header('Notification-Title', title)
|
||||
|
||||
def decode(self, data, password):
|
||||
"""Decode existing GNTP Notification message
|
||||
|
||||
:param string data: Message to decode.
|
||||
"""
|
||||
self.raw = data
|
||||
parts = self.raw.split('\r\n\r\n')
|
||||
self.info = self._parse_info(data)
|
||||
self._validate_password(password)
|
||||
self.headers = self._parse_dict(parts[0])
|
||||
|
||||
for i, part in enumerate(parts):
|
||||
if i == 0:
|
||||
continue # Skip Header
|
||||
if part.strip() == '':
|
||||
continue
|
||||
notice = self._parse_dict(part)
|
||||
if notice.get('Identifier', False):
|
||||
notice['Data'] = self._decode_binary(part, notice)
|
||||
#open('notice.png','wblol').write(notice['Data'])
|
||||
self.resources[notice.get('Identifier')] = notice
|
||||
|
||||
|
||||
class GNTPSubscribe(_GNTPBase):
|
||||
"""Represents a GNTP Subscribe Command
|
||||
|
||||
:param string data: (Optional) See decode()
|
||||
:param string password: (Optional) Password to use while encoding/decoding messages
|
||||
"""
|
||||
_requiredHeaders = [
|
||||
'Subscriber-ID',
|
||||
'Subscriber-Name',
|
||||
]
|
||||
|
||||
def __init__(self, data = None, password = None):
|
||||
_GNTPBase.__init__(self, 'SUBSCRIBE')
|
||||
if data:
|
||||
self.decode(data, password)
|
||||
else:
|
||||
self.set_password(password)
|
||||
|
||||
|
||||
class GNTPOK(_GNTPBase):
|
||||
"""Represents a GNTP OK Response
|
||||
|
||||
:param string data: (Optional) See _GNTPResponse.decode()
|
||||
:param string action: (Optional) Set type of action the OK Response is for
|
||||
"""
|
||||
_requiredHeaders = ['Response-Action']
|
||||
|
||||
def __init__(self, data = None, action = None):
|
||||
_GNTPBase.__init__(self, '-OK')
|
||||
if data:
|
||||
self.decode(data)
|
||||
if action:
|
||||
self.add_header('Response-Action', action)
|
||||
|
||||
|
||||
class GNTPError(_GNTPBase):
|
||||
"""Represents a GNTP Error response
|
||||
|
||||
:param string data: (Optional) See _GNTPResponse.decode()
|
||||
:param string errorcode: (Optional) Error code
|
||||
:param string errordesc: (Optional) Error Description
|
||||
"""
|
||||
_requiredHeaders = ['Error-Code', 'Error-Description']
|
||||
|
||||
def __init__(self, data = None, errorcode = None, errordesc = None):
|
||||
_GNTPBase.__init__(self, '-ERROR')
|
||||
if data:
|
||||
self.decode(data)
|
||||
if errorcode:
|
||||
self.add_header('Error-Code', errorcode)
|
||||
self.add_header('Error-Description', errordesc)
|
||||
|
||||
def error(self):
|
||||
return (self.headers.get('Error-Code', None),
|
||||
self.headers.get('Error-Description', None))
|
||||
|
||||
|
||||
def parse_gntp(data, password = None):
|
||||
"""Attempt to parse a message as a GNTP message
|
||||
|
||||
:param string data: Message to be parsed
|
||||
:param string password: Optional password to be used to verify the message
|
||||
"""
|
||||
match = GNTP_INFO_LINE_SHORT.match(data)
|
||||
if not match:
|
||||
raise ParseError('INVALID_GNTP_INFO')
|
||||
info = match.groupdict()
|
||||
if info['messagetype'] == 'REGISTER':
|
||||
return GNTPRegister(data, password = password)
|
||||
elif info['messagetype'] == 'NOTIFY':
|
||||
return GNTPNotice(data, password = password)
|
||||
elif info['messagetype'] == 'SUBSCRIBE':
|
||||
return GNTPSubscribe(data, password = password)
|
||||
elif info['messagetype'] == '-OK':
|
||||
return GNTPOK(data)
|
||||
elif info['messagetype'] == '-ERROR':
|
||||
return GNTPError(data)
|
||||
raise ParseError('INVALID_GNTP_MESSAGE')
|
||||
|
||||
141
libs/gntp/cli.py
Normal file
141
libs/gntp/cli.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# Copyright: 2013 Paul Traylor
|
||||
# These sources are released under the terms of the MIT license: see LICENSE
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from optparse import OptionParser, OptionGroup
|
||||
|
||||
from gntp.notifier import GrowlNotifier
|
||||
from gntp.shim import RawConfigParser
|
||||
from gntp.version import __version__
|
||||
|
||||
DEFAULT_CONFIG = os.path.expanduser('~/.gntp')
|
||||
|
||||
config = RawConfigParser({
|
||||
'hostname': 'localhost',
|
||||
'password': None,
|
||||
'port': 23053,
|
||||
})
|
||||
config.read([DEFAULT_CONFIG])
|
||||
if not config.has_section('gntp'):
|
||||
config.add_section('gntp')
|
||||
|
||||
|
||||
class ClientParser(OptionParser):
|
||||
def __init__(self):
|
||||
OptionParser.__init__(self, version="%%prog %s" % __version__)
|
||||
|
||||
group = OptionGroup(self, "Network Options")
|
||||
group.add_option("-H", "--host",
|
||||
dest="host", default=config.get('gntp', 'hostname'),
|
||||
help="Specify a hostname to which to send a remote notification. [%default]")
|
||||
group.add_option("--port",
|
||||
dest="port", default=config.getint('gntp', 'port'), type="int",
|
||||
help="port to listen on [%default]")
|
||||
group.add_option("-P", "--password",
|
||||
dest='password', default=config.get('gntp', 'password'),
|
||||
help="Network password")
|
||||
self.add_option_group(group)
|
||||
|
||||
group = OptionGroup(self, "Notification Options")
|
||||
group.add_option("-n", "--name",
|
||||
dest="app", default='Python GNTP Test Client',
|
||||
help="Set the name of the application [%default]")
|
||||
group.add_option("-s", "--sticky",
|
||||
dest='sticky', default=False, action="store_true",
|
||||
help="Make the notification sticky [%default]")
|
||||
group.add_option("--image",
|
||||
dest="icon", default=None,
|
||||
help="Icon for notification (URL or /path/to/file)")
|
||||
group.add_option("-m", "--message",
|
||||
dest="message", default=None,
|
||||
help="Sets the message instead of using stdin")
|
||||
group.add_option("-p", "--priority",
|
||||
dest="priority", default=0, type="int",
|
||||
help="-2 to 2 [%default]")
|
||||
group.add_option("-d", "--identifier",
|
||||
dest="identifier",
|
||||
help="Identifier for coalescing")
|
||||
group.add_option("-t", "--title",
|
||||
dest="title", default=None,
|
||||
help="Set the title of the notification [%default]")
|
||||
group.add_option("-N", "--notification",
|
||||
dest="name", default='Notification',
|
||||
help="Set the notification name [%default]")
|
||||
group.add_option("--callback",
|
||||
dest="callback",
|
||||
help="URL callback")
|
||||
self.add_option_group(group)
|
||||
|
||||
# Extra Options
|
||||
self.add_option('-v', '--verbose',
|
||||
dest='verbose', default=0, action='count',
|
||||
help="Verbosity levels")
|
||||
|
||||
def parse_args(self, args=None, values=None):
|
||||
values, args = OptionParser.parse_args(self, args, values)
|
||||
|
||||
if values.message is None:
|
||||
print('Enter a message followed by Ctrl-D')
|
||||
try:
|
||||
message = sys.stdin.read()
|
||||
except KeyboardInterrupt:
|
||||
exit()
|
||||
else:
|
||||
message = values.message
|
||||
|
||||
if values.title is None:
|
||||
values.title = ' '.join(args)
|
||||
|
||||
# If we still have an empty title, use the
|
||||
# first bit of the message as the title
|
||||
if values.title == '':
|
||||
values.title = message[:20]
|
||||
|
||||
values.verbose = logging.WARNING - values.verbose * 10
|
||||
|
||||
return values, message
|
||||
|
||||
|
||||
def main():
|
||||
(options, message) = ClientParser().parse_args()
|
||||
logging.basicConfig(level=options.verbose)
|
||||
if not os.path.exists(DEFAULT_CONFIG):
|
||||
logging.info('No config read found at %s', DEFAULT_CONFIG)
|
||||
|
||||
growl = GrowlNotifier(
|
||||
applicationName=options.app,
|
||||
notifications=[options.name],
|
||||
defaultNotifications=[options.name],
|
||||
hostname=options.host,
|
||||
password=options.password,
|
||||
port=options.port,
|
||||
)
|
||||
result = growl.register()
|
||||
if result is not True:
|
||||
exit(result)
|
||||
|
||||
# This would likely be better placed within the growl notifier
|
||||
# class but until I make _checkIcon smarter this is "easier"
|
||||
if options.icon is not None and not options.icon.startswith('http'):
|
||||
logging.info('Loading image %s', options.icon)
|
||||
f = open(options.icon)
|
||||
options.icon = f.read()
|
||||
f.close()
|
||||
|
||||
result = growl.notify(
|
||||
noteType=options.name,
|
||||
title=options.title,
|
||||
description=message,
|
||||
icon=options.icon,
|
||||
sticky=options.sticky,
|
||||
priority=options.priority,
|
||||
callback=options.callback,
|
||||
identifier=options.identifier,
|
||||
)
|
||||
if result is not True:
|
||||
exit(result)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
77
libs/gntp/config.py
Normal file
77
libs/gntp/config.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# Copyright: 2013 Paul Traylor
|
||||
# These sources are released under the terms of the MIT license: see LICENSE
|
||||
|
||||
"""
|
||||
The gntp.config module is provided as an extended GrowlNotifier object that takes
|
||||
advantage of the ConfigParser module to allow us to setup some default values
|
||||
(such as hostname, password, and port) in a more global way to be shared among
|
||||
programs using gntp
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
|
||||
import gntp.notifier
|
||||
import gntp.shim
|
||||
|
||||
__all__ = [
|
||||
'mini',
|
||||
'GrowlNotifier'
|
||||
]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GrowlNotifier(gntp.notifier.GrowlNotifier):
|
||||
"""
|
||||
ConfigParser enhanced GrowlNotifier object
|
||||
|
||||
For right now, we are only interested in letting users overide certain
|
||||
values from ~/.gntp
|
||||
|
||||
::
|
||||
|
||||
[gntp]
|
||||
hostname = ?
|
||||
password = ?
|
||||
port = ?
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
config = gntp.shim.RawConfigParser({
|
||||
'hostname': kwargs.get('hostname', 'localhost'),
|
||||
'password': kwargs.get('password'),
|
||||
'port': kwargs.get('port', 23053),
|
||||
})
|
||||
|
||||
config.read([os.path.expanduser('~/.gntp')])
|
||||
|
||||
# If the file does not exist, then there will be no gntp section defined
|
||||
# and the config.get() lines below will get confused. Since we are not
|
||||
# saving the config, it should be safe to just add it here so the
|
||||
# code below doesn't complain
|
||||
if not config.has_section('gntp'):
|
||||
logger.info('Error reading ~/.gntp config file')
|
||||
config.add_section('gntp')
|
||||
|
||||
kwargs['password'] = config.get('gntp', 'password')
|
||||
kwargs['hostname'] = config.get('gntp', 'hostname')
|
||||
kwargs['port'] = config.getint('gntp', 'port')
|
||||
|
||||
super(GrowlNotifier, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
def mini(description, **kwargs):
|
||||
"""Single notification function
|
||||
|
||||
Simple notification function in one line. Has only one required parameter
|
||||
and attempts to use reasonable defaults for everything else
|
||||
:param string description: Notification message
|
||||
"""
|
||||
kwargs['notifierFactory'] = GrowlNotifier
|
||||
gntp.notifier.mini(description, **kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# If we're running this module directly we're likely running it as a test
|
||||
# so extra debugging is useful
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
mini('Testing mini notification')
|
||||
511
libs/gntp/core.py
Normal file
511
libs/gntp/core.py
Normal file
@@ -0,0 +1,511 @@
|
||||
# Copyright: 2013 Paul Traylor
|
||||
# These sources are released under the terms of the MIT license: see LICENSE
|
||||
|
||||
import hashlib
|
||||
import re
|
||||
import time
|
||||
|
||||
import gntp.shim
|
||||
import gntp.errors as errors
|
||||
|
||||
__all__ = [
|
||||
'GNTPRegister',
|
||||
'GNTPNotice',
|
||||
'GNTPSubscribe',
|
||||
'GNTPOK',
|
||||
'GNTPError',
|
||||
'parse_gntp',
|
||||
]
|
||||
|
||||
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
|
||||
GNTP_INFO_LINE = re.compile(
|
||||
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' +
|
||||
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?' +
|
||||
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
GNTP_INFO_LINE_SHORT = re.compile(
|
||||
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
GNTP_HEADER = re.compile('([\w-]+):(.+)')
|
||||
|
||||
GNTP_EOL = gntp.shim.b('\r\n')
|
||||
GNTP_SEP = gntp.shim.b(': ')
|
||||
|
||||
|
||||
class _GNTPBuffer(gntp.shim.StringIO):
|
||||
"""GNTP Buffer class"""
|
||||
def writeln(self, value=None):
|
||||
if value:
|
||||
self.write(gntp.shim.b(value))
|
||||
self.write(GNTP_EOL)
|
||||
|
||||
def writeheader(self, key, value):
|
||||
if not isinstance(value, str):
|
||||
value = str(value)
|
||||
self.write(gntp.shim.b(key))
|
||||
self.write(GNTP_SEP)
|
||||
self.write(gntp.shim.b(value))
|
||||
self.write(GNTP_EOL)
|
||||
|
||||
|
||||
class _GNTPBase(object):
|
||||
"""Base initilization
|
||||
|
||||
:param string messagetype: GNTP Message type
|
||||
:param string version: GNTP Protocol version
|
||||
:param string encription: Encryption protocol
|
||||
"""
|
||||
def __init__(self, messagetype=None, version='1.0', encryption=None):
|
||||
self.info = {
|
||||
'version': version,
|
||||
'messagetype': messagetype,
|
||||
'encryptionAlgorithmID': encryption
|
||||
}
|
||||
self.hash_algo = {
|
||||
'MD5': hashlib.md5,
|
||||
'SHA1': hashlib.sha1,
|
||||
'SHA256': hashlib.sha256,
|
||||
'SHA512': hashlib.sha512,
|
||||
}
|
||||
self.headers = {}
|
||||
self.resources = {}
|
||||
|
||||
def __str__(self):
|
||||
return self.encode()
|
||||
|
||||
def _parse_info(self, data):
|
||||
"""Parse the first line of a GNTP message to get security and other info values
|
||||
|
||||
:param string data: GNTP Message
|
||||
:return dict: Parsed GNTP Info line
|
||||
"""
|
||||
|
||||
match = GNTP_INFO_LINE.match(data)
|
||||
|
||||
if not match:
|
||||
raise errors.ParseError('ERROR_PARSING_INFO_LINE')
|
||||
|
||||
info = match.groupdict()
|
||||
if info['encryptionAlgorithmID'] == 'NONE':
|
||||
info['encryptionAlgorithmID'] = None
|
||||
|
||||
return info
|
||||
|
||||
def set_password(self, password, encryptAlgo='MD5'):
|
||||
"""Set a password for a GNTP Message
|
||||
|
||||
:param string password: Null to clear password
|
||||
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
|
||||
"""
|
||||
if not password:
|
||||
self.info['encryptionAlgorithmID'] = None
|
||||
self.info['keyHashAlgorithm'] = None
|
||||
return
|
||||
|
||||
self.password = gntp.shim.b(password)
|
||||
self.encryptAlgo = encryptAlgo.upper()
|
||||
|
||||
if not self.encryptAlgo in self.hash_algo:
|
||||
raise errors.UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
|
||||
|
||||
hashfunction = self.hash_algo.get(self.encryptAlgo)
|
||||
|
||||
password = password.encode('utf8')
|
||||
seed = time.ctime().encode('utf8')
|
||||
salt = hashfunction(seed).hexdigest()
|
||||
saltHash = hashfunction(seed).digest()
|
||||
keyBasis = password + saltHash
|
||||
key = hashfunction(keyBasis).digest()
|
||||
keyHash = hashfunction(key).hexdigest()
|
||||
|
||||
self.info['keyHashAlgorithmID'] = self.encryptAlgo
|
||||
self.info['keyHash'] = keyHash.upper()
|
||||
self.info['salt'] = salt.upper()
|
||||
|
||||
def _decode_hex(self, value):
|
||||
"""Helper function to decode hex string to `proper` hex string
|
||||
|
||||
:param string value: Human readable hex string
|
||||
:return string: Hex string
|
||||
"""
|
||||
result = ''
|
||||
for i in range(0, len(value), 2):
|
||||
tmp = int(value[i:i + 2], 16)
|
||||
result += chr(tmp)
|
||||
return result
|
||||
|
||||
def _decode_binary(self, rawIdentifier, identifier):
|
||||
rawIdentifier += '\r\n\r\n'
|
||||
dataLength = int(identifier['Length'])
|
||||
pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier)
|
||||
pointerEnd = pointerStart + dataLength
|
||||
data = self.raw[pointerStart:pointerEnd]
|
||||
if not len(data) == dataLength:
|
||||
raise errors.ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data)))
|
||||
return data
|
||||
|
||||
def _validate_password(self, password):
|
||||
"""Validate GNTP Message against stored password"""
|
||||
self.password = password
|
||||
if password is None:
|
||||
raise errors.AuthError('Missing password')
|
||||
keyHash = self.info.get('keyHash', None)
|
||||
if keyHash is None and self.password is None:
|
||||
return True
|
||||
if keyHash is None:
|
||||
raise errors.AuthError('Invalid keyHash')
|
||||
if self.password is None:
|
||||
raise errors.AuthError('Missing password')
|
||||
|
||||
keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5')
|
||||
|
||||
password = self.password.encode('utf8')
|
||||
saltHash = self._decode_hex(self.info['salt'])
|
||||
|
||||
keyBasis = password + saltHash
|
||||
self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest()
|
||||
keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest()
|
||||
|
||||
if not keyHash.upper() == self.info['keyHash'].upper():
|
||||
raise errors.AuthError('Invalid Hash')
|
||||
return True
|
||||
|
||||
def validate(self):
|
||||
"""Verify required headers"""
|
||||
for header in self._requiredHeaders:
|
||||
if not self.headers.get(header, False):
|
||||
raise errors.ParseError('Missing Notification Header: ' + header)
|
||||
|
||||
def _format_info(self):
|
||||
"""Generate info line for GNTP Message
|
||||
|
||||
:return string:
|
||||
"""
|
||||
info = 'GNTP/%s %s' % (
|
||||
self.info.get('version'),
|
||||
self.info.get('messagetype'),
|
||||
)
|
||||
if self.info.get('encryptionAlgorithmID', None):
|
||||
info += ' %s:%s' % (
|
||||
self.info.get('encryptionAlgorithmID'),
|
||||
self.info.get('ivValue'),
|
||||
)
|
||||
else:
|
||||
info += ' NONE'
|
||||
|
||||
if self.info.get('keyHashAlgorithmID', None):
|
||||
info += ' %s:%s.%s' % (
|
||||
self.info.get('keyHashAlgorithmID'),
|
||||
self.info.get('keyHash'),
|
||||
self.info.get('salt')
|
||||
)
|
||||
|
||||
return info
|
||||
|
||||
def _parse_dict(self, data):
|
||||
"""Helper function to parse blocks of GNTP headers into a dictionary
|
||||
|
||||
:param string data:
|
||||
:return dict: Dictionary of parsed GNTP Headers
|
||||
"""
|
||||
d = {}
|
||||
for line in data.split('\r\n'):
|
||||
match = GNTP_HEADER.match(line)
|
||||
if not match:
|
||||
continue
|
||||
|
||||
key = match.group(1).strip()
|
||||
val = match.group(2).strip()
|
||||
d[key] = val
|
||||
return d
|
||||
|
||||
def add_header(self, key, value):
|
||||
self.headers[key] = value
|
||||
|
||||
def add_resource(self, data):
|
||||
"""Add binary resource
|
||||
|
||||
:param string data: Binary Data
|
||||
"""
|
||||
data = gntp.shim.b(data)
|
||||
identifier = hashlib.md5(data).hexdigest()
|
||||
self.resources[identifier] = data
|
||||
return 'x-growl-resource://%s' % identifier
|
||||
|
||||
def decode(self, data, password=None):
|
||||
"""Decode GNTP Message
|
||||
|
||||
:param string data:
|
||||
"""
|
||||
self.password = password
|
||||
self.raw = gntp.shim.u(data)
|
||||
parts = self.raw.split('\r\n\r\n')
|
||||
self.info = self._parse_info(self.raw)
|
||||
self.headers = self._parse_dict(parts[0])
|
||||
|
||||
def encode(self):
|
||||
"""Encode a generic GNTP Message
|
||||
|
||||
:return string: GNTP Message ready to be sent. Returned as a byte string
|
||||
"""
|
||||
|
||||
buff = _GNTPBuffer()
|
||||
|
||||
buff.writeln(self._format_info())
|
||||
|
||||
#Headers
|
||||
for k, v in self.headers.items():
|
||||
buff.writeheader(k, v)
|
||||
buff.writeln()
|
||||
|
||||
#Resources
|
||||
for resource, data in self.resources.items():
|
||||
buff.writeheader('Identifier', resource)
|
||||
buff.writeheader('Length', len(data))
|
||||
buff.writeln()
|
||||
buff.write(data)
|
||||
buff.writeln()
|
||||
buff.writeln()
|
||||
|
||||
return buff.getvalue()
|
||||
|
||||
|
||||
class GNTPRegister(_GNTPBase):
|
||||
"""Represents a GNTP Registration Command
|
||||
|
||||
:param string data: (Optional) See decode()
|
||||
:param string password: (Optional) Password to use while encoding/decoding messages
|
||||
"""
|
||||
_requiredHeaders = [
|
||||
'Application-Name',
|
||||
'Notifications-Count'
|
||||
]
|
||||
_requiredNotificationHeaders = ['Notification-Name']
|
||||
|
||||
def __init__(self, data=None, password=None):
|
||||
_GNTPBase.__init__(self, 'REGISTER')
|
||||
self.notifications = []
|
||||
|
||||
if data:
|
||||
self.decode(data, password)
|
||||
else:
|
||||
self.set_password(password)
|
||||
self.add_header('Application-Name', 'pygntp')
|
||||
self.add_header('Notifications-Count', 0)
|
||||
|
||||
def validate(self):
|
||||
'''Validate required headers and validate notification headers'''
|
||||
for header in self._requiredHeaders:
|
||||
if not self.headers.get(header, False):
|
||||
raise errors.ParseError('Missing Registration Header: ' + header)
|
||||
for notice in self.notifications:
|
||||
for header in self._requiredNotificationHeaders:
|
||||
if not notice.get(header, False):
|
||||
raise errors.ParseError('Missing Notification Header: ' + header)
|
||||
|
||||
def decode(self, data, password):
|
||||
"""Decode existing GNTP Registration message
|
||||
|
||||
:param string data: Message to decode
|
||||
"""
|
||||
self.raw = gntp.shim.u(data)
|
||||
parts = self.raw.split('\r\n\r\n')
|
||||
self.info = self._parse_info(self.raw)
|
||||
self._validate_password(password)
|
||||
self.headers = self._parse_dict(parts[0])
|
||||
|
||||
for i, part in enumerate(parts):
|
||||
if i == 0:
|
||||
continue # Skip Header
|
||||
if part.strip() == '':
|
||||
continue
|
||||
notice = self._parse_dict(part)
|
||||
if notice.get('Notification-Name', False):
|
||||
self.notifications.append(notice)
|
||||
elif notice.get('Identifier', False):
|
||||
notice['Data'] = self._decode_binary(part, notice)
|
||||
#open('register.png','wblol').write(notice['Data'])
|
||||
self.resources[notice.get('Identifier')] = notice
|
||||
|
||||
def add_notification(self, name, enabled=True):
|
||||
"""Add new Notification to Registration message
|
||||
|
||||
:param string name: Notification Name
|
||||
:param boolean enabled: Enable this notification by default
|
||||
"""
|
||||
notice = {}
|
||||
notice['Notification-Name'] = name
|
||||
notice['Notification-Enabled'] = enabled
|
||||
|
||||
self.notifications.append(notice)
|
||||
self.add_header('Notifications-Count', len(self.notifications))
|
||||
|
||||
def encode(self):
|
||||
"""Encode a GNTP Registration Message
|
||||
|
||||
:return string: Encoded GNTP Registration message. Returned as a byte string
|
||||
"""
|
||||
|
||||
buff = _GNTPBuffer()
|
||||
|
||||
buff.writeln(self._format_info())
|
||||
|
||||
#Headers
|
||||
for k, v in self.headers.items():
|
||||
buff.writeheader(k, v)
|
||||
buff.writeln()
|
||||
|
||||
#Notifications
|
||||
if len(self.notifications) > 0:
|
||||
for notice in self.notifications:
|
||||
for k, v in notice.items():
|
||||
buff.writeheader(k, v)
|
||||
buff.writeln()
|
||||
|
||||
#Resources
|
||||
for resource, data in self.resources.items():
|
||||
buff.writeheader('Identifier', resource)
|
||||
buff.writeheader('Length', len(data))
|
||||
buff.writeln()
|
||||
buff.write(data)
|
||||
buff.writeln()
|
||||
buff.writeln()
|
||||
|
||||
return buff.getvalue()
|
||||
|
||||
|
||||
class GNTPNotice(_GNTPBase):
|
||||
"""Represents a GNTP Notification Command
|
||||
|
||||
:param string data: (Optional) See decode()
|
||||
:param string app: (Optional) Set Application-Name
|
||||
:param string name: (Optional) Set Notification-Name
|
||||
:param string title: (Optional) Set Notification Title
|
||||
:param string password: (Optional) Password to use while encoding/decoding messages
|
||||
"""
|
||||
_requiredHeaders = [
|
||||
'Application-Name',
|
||||
'Notification-Name',
|
||||
'Notification-Title'
|
||||
]
|
||||
|
||||
def __init__(self, data=None, app=None, name=None, title=None, password=None):
|
||||
_GNTPBase.__init__(self, 'NOTIFY')
|
||||
|
||||
if data:
|
||||
self.decode(data, password)
|
||||
else:
|
||||
self.set_password(password)
|
||||
if app:
|
||||
self.add_header('Application-Name', app)
|
||||
if name:
|
||||
self.add_header('Notification-Name', name)
|
||||
if title:
|
||||
self.add_header('Notification-Title', title)
|
||||
|
||||
def decode(self, data, password):
|
||||
"""Decode existing GNTP Notification message
|
||||
|
||||
:param string data: Message to decode.
|
||||
"""
|
||||
self.raw = gntp.shim.u(data)
|
||||
parts = self.raw.split('\r\n\r\n')
|
||||
self.info = self._parse_info(self.raw)
|
||||
self._validate_password(password)
|
||||
self.headers = self._parse_dict(parts[0])
|
||||
|
||||
for i, part in enumerate(parts):
|
||||
if i == 0:
|
||||
continue # Skip Header
|
||||
if part.strip() == '':
|
||||
continue
|
||||
notice = self._parse_dict(part)
|
||||
if notice.get('Identifier', False):
|
||||
notice['Data'] = self._decode_binary(part, notice)
|
||||
#open('notice.png','wblol').write(notice['Data'])
|
||||
self.resources[notice.get('Identifier')] = notice
|
||||
|
||||
|
||||
class GNTPSubscribe(_GNTPBase):
|
||||
"""Represents a GNTP Subscribe Command
|
||||
|
||||
:param string data: (Optional) See decode()
|
||||
:param string password: (Optional) Password to use while encoding/decoding messages
|
||||
"""
|
||||
_requiredHeaders = [
|
||||
'Subscriber-ID',
|
||||
'Subscriber-Name',
|
||||
]
|
||||
|
||||
def __init__(self, data=None, password=None):
|
||||
_GNTPBase.__init__(self, 'SUBSCRIBE')
|
||||
if data:
|
||||
self.decode(data, password)
|
||||
else:
|
||||
self.set_password(password)
|
||||
|
||||
|
||||
class GNTPOK(_GNTPBase):
|
||||
"""Represents a GNTP OK Response
|
||||
|
||||
:param string data: (Optional) See _GNTPResponse.decode()
|
||||
:param string action: (Optional) Set type of action the OK Response is for
|
||||
"""
|
||||
_requiredHeaders = ['Response-Action']
|
||||
|
||||
def __init__(self, data=None, action=None):
|
||||
_GNTPBase.__init__(self, '-OK')
|
||||
if data:
|
||||
self.decode(data)
|
||||
if action:
|
||||
self.add_header('Response-Action', action)
|
||||
|
||||
|
||||
class GNTPError(_GNTPBase):
|
||||
"""Represents a GNTP Error response
|
||||
|
||||
:param string data: (Optional) See _GNTPResponse.decode()
|
||||
:param string errorcode: (Optional) Error code
|
||||
:param string errordesc: (Optional) Error Description
|
||||
"""
|
||||
_requiredHeaders = ['Error-Code', 'Error-Description']
|
||||
|
||||
def __init__(self, data=None, errorcode=None, errordesc=None):
|
||||
_GNTPBase.__init__(self, '-ERROR')
|
||||
if data:
|
||||
self.decode(data)
|
||||
if errorcode:
|
||||
self.add_header('Error-Code', errorcode)
|
||||
self.add_header('Error-Description', errordesc)
|
||||
|
||||
def error(self):
|
||||
return (self.headers.get('Error-Code', None),
|
||||
self.headers.get('Error-Description', None))
|
||||
|
||||
|
||||
def parse_gntp(data, password=None):
|
||||
"""Attempt to parse a message as a GNTP message
|
||||
|
||||
:param string data: Message to be parsed
|
||||
:param string password: Optional password to be used to verify the message
|
||||
"""
|
||||
data = gntp.shim.u(data)
|
||||
match = GNTP_INFO_LINE_SHORT.match(data)
|
||||
if not match:
|
||||
raise errors.ParseError('INVALID_GNTP_INFO')
|
||||
info = match.groupdict()
|
||||
if info['messagetype'] == 'REGISTER':
|
||||
return GNTPRegister(data, password=password)
|
||||
elif info['messagetype'] == 'NOTIFY':
|
||||
return GNTPNotice(data, password=password)
|
||||
elif info['messagetype'] == 'SUBSCRIBE':
|
||||
return GNTPSubscribe(data, password=password)
|
||||
elif info['messagetype'] == '-OK':
|
||||
return GNTPOK(data)
|
||||
elif info['messagetype'] == '-ERROR':
|
||||
return GNTPError(data)
|
||||
raise errors.ParseError('INVALID_GNTP_MESSAGE')
|
||||
25
libs/gntp/errors.py
Normal file
25
libs/gntp/errors.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# Copyright: 2013 Paul Traylor
|
||||
# These sources are released under the terms of the MIT license: see LICENSE
|
||||
|
||||
class BaseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ParseError(BaseError):
|
||||
errorcode = 500
|
||||
errordesc = 'Error parsing the message'
|
||||
|
||||
|
||||
class AuthError(BaseError):
|
||||
errorcode = 400
|
||||
errordesc = 'Error with authorization'
|
||||
|
||||
|
||||
class UnsupportedError(BaseError):
|
||||
errorcode = 500
|
||||
errordesc = 'Currently unsupported by gntp.py'
|
||||
|
||||
|
||||
class NetworkError(BaseError):
|
||||
errorcode = 500
|
||||
errordesc = "Error connecting to growl server"
|
||||
@@ -1,3 +1,6 @@
|
||||
# Copyright: 2013 Paul Traylor
|
||||
# These sources are released under the terms of the MIT license: see LICENSE
|
||||
|
||||
"""
|
||||
The gntp.notifier module is provided as a simple way to send notifications
|
||||
using GNTP
|
||||
@@ -9,10 +12,15 @@ using GNTP
|
||||
`Original Python bindings <http://code.google.com/p/growl/source/browse/Bindings/python/Growl.py>`_
|
||||
|
||||
"""
|
||||
import gntp
|
||||
import socket
|
||||
import logging
|
||||
import platform
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from gntp.version import __version__
|
||||
import gntp.core
|
||||
import gntp.errors as errors
|
||||
import gntp.shim
|
||||
|
||||
__all__ = [
|
||||
'mini',
|
||||
@@ -37,9 +45,9 @@ class GrowlNotifier(object):
|
||||
passwordHash = 'MD5'
|
||||
socketTimeout = 3
|
||||
|
||||
def __init__(self, applicationName = 'Python GNTP', notifications = [],
|
||||
defaultNotifications = None, applicationIcon = None, hostname = 'localhost',
|
||||
password = None, port = 23053):
|
||||
def __init__(self, applicationName='Python GNTP', notifications=[],
|
||||
defaultNotifications=None, applicationIcon=None, hostname='localhost',
|
||||
password=None, port=23053):
|
||||
|
||||
self.applicationName = applicationName
|
||||
self.notifications = list(notifications)
|
||||
@@ -61,7 +69,7 @@ class GrowlNotifier(object):
|
||||
then we return False
|
||||
'''
|
||||
logger.info('Checking icon')
|
||||
return data.startswith('http')
|
||||
return gntp.shim.u(data).startswith('http')
|
||||
|
||||
def register(self):
|
||||
"""Send GNTP Registration
|
||||
@@ -71,7 +79,7 @@ class GrowlNotifier(object):
|
||||
sent a registration message at least once
|
||||
"""
|
||||
logger.info('Sending registration to %s:%s', self.hostname, self.port)
|
||||
register = gntp.GNTPRegister()
|
||||
register = gntp.core.GNTPRegister()
|
||||
register.add_header('Application-Name', self.applicationName)
|
||||
for notification in self.notifications:
|
||||
enabled = notification in self.defaultNotifications
|
||||
@@ -80,16 +88,16 @@ class GrowlNotifier(object):
|
||||
if self._checkIcon(self.applicationIcon):
|
||||
register.add_header('Application-Icon', self.applicationIcon)
|
||||
else:
|
||||
id = register.add_resource(self.applicationIcon)
|
||||
register.add_header('Application-Icon', id)
|
||||
resource = register.add_resource(self.applicationIcon)
|
||||
register.add_header('Application-Icon', resource)
|
||||
if self.password:
|
||||
register.set_password(self.password, self.passwordHash)
|
||||
self.add_origin_info(register)
|
||||
self.register_hook(register)
|
||||
return self._send('register', register)
|
||||
|
||||
def notify(self, noteType, title, description, icon = None, sticky = False,
|
||||
priority = None, callback = None, identifier = None):
|
||||
def notify(self, noteType, title, description, icon=None, sticky=False,
|
||||
priority=None, callback=None, identifier=None, custom={}):
|
||||
"""Send a GNTP notifications
|
||||
|
||||
.. warning::
|
||||
@@ -102,6 +110,8 @@ class GrowlNotifier(object):
|
||||
:param boolean sticky: Sticky notification
|
||||
:param integer priority: Message priority level from -2 to 2
|
||||
:param string callback: URL callback
|
||||
:param dict custom: Custom attributes. Key names should be prefixed with X-
|
||||
according to the spec but this is not enforced by this class
|
||||
|
||||
.. warning::
|
||||
For now, only URL callbacks are supported. In the future, the
|
||||
@@ -109,7 +119,7 @@ class GrowlNotifier(object):
|
||||
"""
|
||||
logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port)
|
||||
assert noteType in self.notifications
|
||||
notice = gntp.GNTPNotice()
|
||||
notice = gntp.core.GNTPNotice()
|
||||
notice.add_header('Application-Name', self.applicationName)
|
||||
notice.add_header('Notification-Name', noteType)
|
||||
notice.add_header('Notification-Title', title)
|
||||
@@ -123,8 +133,8 @@ class GrowlNotifier(object):
|
||||
if self._checkIcon(icon):
|
||||
notice.add_header('Notification-Icon', icon)
|
||||
else:
|
||||
id = notice.add_resource(icon)
|
||||
notice.add_header('Notification-Icon', id)
|
||||
resource = notice.add_resource(icon)
|
||||
notice.add_header('Notification-Icon', resource)
|
||||
|
||||
if description:
|
||||
notice.add_header('Notification-Text', description)
|
||||
@@ -133,6 +143,9 @@ class GrowlNotifier(object):
|
||||
if identifier:
|
||||
notice.add_header('Notification-Coalescing-ID', identifier)
|
||||
|
||||
for key in custom:
|
||||
notice.add_header(key, custom[key])
|
||||
|
||||
self.add_origin_info(notice)
|
||||
self.notify_hook(notice)
|
||||
|
||||
@@ -140,7 +153,7 @@ class GrowlNotifier(object):
|
||||
|
||||
def subscribe(self, id, name, port):
|
||||
"""Send a Subscribe request to a remote machine"""
|
||||
sub = gntp.GNTPSubscribe()
|
||||
sub = gntp.core.GNTPSubscribe()
|
||||
sub.add_header('Subscriber-ID', id)
|
||||
sub.add_header('Subscriber-Name', name)
|
||||
sub.add_header('Subscriber-Port', port)
|
||||
@@ -156,7 +169,7 @@ class GrowlNotifier(object):
|
||||
"""Add optional Origin headers to message"""
|
||||
packet.add_header('Origin-Machine-Name', platform.node())
|
||||
packet.add_header('Origin-Software-Name', 'gntp.py')
|
||||
packet.add_header('Origin-Software-Version', gntp.__version__)
|
||||
packet.add_header('Origin-Software-Version', __version__)
|
||||
packet.add_header('Origin-Platform-Name', platform.system())
|
||||
packet.add_header('Origin-Platform-Version', platform.platform())
|
||||
|
||||
@@ -179,27 +192,33 @@ class GrowlNotifier(object):
|
||||
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(self.socketTimeout)
|
||||
s.connect((self.hostname, self.port))
|
||||
s.send(data)
|
||||
recv_data = s.recv(1024)
|
||||
while not recv_data.endswith("\r\n\r\n"):
|
||||
recv_data += s.recv(1024)
|
||||
response = gntp.parse_gntp(recv_data)
|
||||
try:
|
||||
s.connect((self.hostname, self.port))
|
||||
s.send(data)
|
||||
recv_data = s.recv(1024)
|
||||
while not recv_data.endswith(gntp.shim.b("\r\n\r\n")):
|
||||
recv_data += s.recv(1024)
|
||||
except socket.error:
|
||||
# Python2.5 and Python3 compatibile exception
|
||||
exc = sys.exc_info()[1]
|
||||
raise errors.NetworkError(exc)
|
||||
|
||||
response = gntp.core.parse_gntp(recv_data)
|
||||
s.close()
|
||||
|
||||
logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response)
|
||||
|
||||
if type(response) == gntp.GNTPOK:
|
||||
if type(response) == gntp.core.GNTPOK:
|
||||
return True
|
||||
logger.error('Invalid response: %s', response.error())
|
||||
return response.error()
|
||||
|
||||
|
||||
def mini(description, applicationName = 'PythonMini', noteType = "Message",
|
||||
title = "Mini Message", applicationIcon = None, hostname = 'localhost',
|
||||
password = None, port = 23053, sticky = False, priority = None,
|
||||
callback = None, notificationIcon = None, identifier = None,
|
||||
notifierFactory = GrowlNotifier):
|
||||
def mini(description, applicationName='PythonMini', noteType="Message",
|
||||
title="Mini Message", applicationIcon=None, hostname='localhost',
|
||||
password=None, port=23053, sticky=False, priority=None,
|
||||
callback=None, notificationIcon=None, identifier=None,
|
||||
notifierFactory=GrowlNotifier):
|
||||
"""Single notification function
|
||||
|
||||
Simple notification function in one line. Has only one required parameter
|
||||
@@ -210,32 +229,37 @@ def mini(description, applicationName = 'PythonMini', noteType = "Message",
|
||||
For now, only URL callbacks are supported. In the future, the
|
||||
callback argument will also support a function
|
||||
"""
|
||||
growl = notifierFactory(
|
||||
applicationName = applicationName,
|
||||
notifications = [noteType],
|
||||
defaultNotifications = [noteType],
|
||||
applicationIcon = applicationIcon,
|
||||
hostname = hostname,
|
||||
password = password,
|
||||
port = port,
|
||||
)
|
||||
result = growl.register()
|
||||
if result is not True:
|
||||
return result
|
||||
try:
|
||||
growl = notifierFactory(
|
||||
applicationName=applicationName,
|
||||
notifications=[noteType],
|
||||
defaultNotifications=[noteType],
|
||||
applicationIcon=applicationIcon,
|
||||
hostname=hostname,
|
||||
password=password,
|
||||
port=port,
|
||||
)
|
||||
result = growl.register()
|
||||
if result is not True:
|
||||
return result
|
||||
|
||||
return growl.notify(
|
||||
noteType = noteType,
|
||||
title = title,
|
||||
description = description,
|
||||
icon = notificationIcon,
|
||||
sticky = sticky,
|
||||
priority = priority,
|
||||
callback = callback,
|
||||
identifier = identifier,
|
||||
)
|
||||
return growl.notify(
|
||||
noteType=noteType,
|
||||
title=title,
|
||||
description=description,
|
||||
icon=notificationIcon,
|
||||
sticky=sticky,
|
||||
priority=priority,
|
||||
callback=callback,
|
||||
identifier=identifier,
|
||||
)
|
||||
except Exception:
|
||||
# We want the "mini" function to be simple and swallow Exceptions
|
||||
# in order to be less invasive
|
||||
logger.exception("Growl error")
|
||||
|
||||
if __name__ == '__main__':
|
||||
# If we're running this module directly we're likely running it as a test
|
||||
# so extra debugging is useful
|
||||
logging.basicConfig(level = logging.INFO)
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
mini('Testing mini notification')
|
||||
|
||||
45
libs/gntp/shim.py
Normal file
45
libs/gntp/shim.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# Copyright: 2013 Paul Traylor
|
||||
# These sources are released under the terms of the MIT license: see LICENSE
|
||||
|
||||
"""
|
||||
Python2.5 and Python3.3 compatibility shim
|
||||
|
||||
Heavily inspirted by the "six" library.
|
||||
https://pypi.python.org/pypi/six
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
def b(s):
|
||||
if isinstance(s, bytes):
|
||||
return s
|
||||
return s.encode('utf8', 'replace')
|
||||
|
||||
def u(s):
|
||||
if isinstance(s, bytes):
|
||||
return s.decode('utf8', 'replace')
|
||||
return s
|
||||
|
||||
from io import BytesIO as StringIO
|
||||
from configparser import RawConfigParser
|
||||
else:
|
||||
def b(s):
|
||||
if isinstance(s, unicode):
|
||||
return s.encode('utf8', 'replace')
|
||||
return s
|
||||
|
||||
def u(s):
|
||||
if isinstance(s, unicode):
|
||||
return s
|
||||
if isinstance(s, int):
|
||||
s = str(s)
|
||||
return unicode(s, "utf8", "replace")
|
||||
|
||||
from StringIO import StringIO
|
||||
from ConfigParser import RawConfigParser
|
||||
|
||||
b.__doc__ = "Ensure we have a byte string"
|
||||
u.__doc__ = "Ensure we have a unicode string"
|
||||
4
libs/gntp/version.py
Normal file
4
libs/gntp/version.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copyright: 2013 Paul Traylor
|
||||
# These sources are released under the terms of the MIT license: see LICENSE
|
||||
|
||||
__version__ = '1.0.2'
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '0.7-dev'
|
||||
__version__ = '0.6.2'
|
||||
__all__ = ['Guess', 'Language',
|
||||
'guess_file_info', 'guess_video_info',
|
||||
'guess_movie_info', 'guess_episode_info']
|
||||
@@ -76,6 +76,7 @@ from guessit.language import Language
|
||||
from guessit.matcher import IterativeMatcher
|
||||
from guessit.textutils import clean_string
|
||||
import logging
|
||||
import json
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -105,17 +106,74 @@ def _guess_filename(filename, filetype):
|
||||
|
||||
mtree = IterativeMatcher(filename, filetype=filetype)
|
||||
|
||||
m = mtree.matched()
|
||||
|
||||
second_pass_opts = []
|
||||
second_pass_transfo_opts = {}
|
||||
|
||||
# if there are multiple possible years found, we assume the first one is
|
||||
# part of the title, reparse the tree taking this into account
|
||||
years = set(n.value for n in find_nodes(mtree.match_tree, 'year'))
|
||||
if len(years) >= 2:
|
||||
mtree = IterativeMatcher(filename, filetype=filetype,
|
||||
opts=['skip_first_year'])
|
||||
second_pass_opts.append('skip_first_year')
|
||||
|
||||
to_skip_language_nodes = []
|
||||
|
||||
title_nodes = set(n for n in find_nodes(mtree.match_tree, ['title', 'series']))
|
||||
title_spans = {}
|
||||
for title_node in title_nodes:
|
||||
title_spans[title_node.span[0]] = title_node
|
||||
title_spans[title_node.span[1]] = title_node
|
||||
|
||||
for lang_key in ('language', 'subtitleLanguage'):
|
||||
langs = {}
|
||||
lang_nodes = set(n for n in find_nodes(mtree.match_tree, lang_key))
|
||||
|
||||
for lang_node in lang_nodes:
|
||||
lang = lang_node.guess.get(lang_key, None)
|
||||
if len(lang_node.value) > 3 and (lang_node.span[0] in title_spans.keys() or lang_node.span[1] in title_spans.keys()):
|
||||
# Language is next or before title, and is not a language code. Add to skip for 2nd pass.
|
||||
|
||||
# if filetype is subtitle and the language appears last, just before
|
||||
# the extension, then it is likely a subtitle language
|
||||
parts = clean_string(lang_node.root.value).split()
|
||||
if m['type'] in ['moviesubtitle', 'episodesubtitle'] and (parts.index(lang_node.value) == len(parts) - 2):
|
||||
continue
|
||||
|
||||
to_skip_language_nodes.append(lang_node)
|
||||
elif not lang in langs:
|
||||
langs[lang] = lang_node
|
||||
else:
|
||||
# The same language was found. Keep the more confident one, and add others to skip for 2nd pass.
|
||||
existing_lang_node = langs[lang]
|
||||
to_skip = None
|
||||
if existing_lang_node.guess.confidence('language') >= lang_node.guess.confidence('language'):
|
||||
# lang_node is to remove
|
||||
to_skip = lang_node
|
||||
else:
|
||||
# existing_lang_node is to remove
|
||||
langs[lang] = lang_node
|
||||
to_skip = existing_lang_node
|
||||
to_skip_language_nodes.append(to_skip)
|
||||
|
||||
|
||||
if to_skip_language_nodes:
|
||||
second_pass_transfo_opts['guess_language'] = (
|
||||
((), { 'skip': [ { 'node_idx': node.parent.node_idx,
|
||||
'span': node.span }
|
||||
for node in to_skip_language_nodes ] }))
|
||||
|
||||
if second_pass_opts or second_pass_transfo_opts:
|
||||
# 2nd pass is needed
|
||||
log.info("Running 2nd pass with options: %s" % second_pass_opts)
|
||||
log.info("Transfo options: %s" % second_pass_transfo_opts)
|
||||
mtree = IterativeMatcher(filename, filetype=filetype,
|
||||
opts=second_pass_opts,
|
||||
transfo_opts=second_pass_transfo_opts)
|
||||
|
||||
m = mtree.matched()
|
||||
|
||||
if 'language' not in m and 'subtitleLanguage' not in m:
|
||||
if 'language' not in m and 'subtitleLanguage' not in m or 'title' not in m:
|
||||
return m
|
||||
|
||||
# if we found some language, make sure we didn't cut a title or sth...
|
||||
@@ -123,51 +181,10 @@ def _guess_filename(filename, filetype):
|
||||
opts=['nolanguage', 'nocountry'])
|
||||
m2 = mtree2.matched()
|
||||
|
||||
|
||||
if m.get('title') is None:
|
||||
return m
|
||||
|
||||
if m.get('title') != m2.get('title'):
|
||||
title = next(find_nodes(mtree.match_tree, 'title'))
|
||||
title2 = next(find_nodes(mtree2.match_tree, 'title'))
|
||||
|
||||
langs = list(find_nodes(mtree.match_tree, ['language', 'subtitleLanguage']))
|
||||
if not langs:
|
||||
return warning('A weird error happened with language detection')
|
||||
|
||||
# find the language that is likely more relevant
|
||||
for lng in langs:
|
||||
if lng.value in title2.value:
|
||||
# if the language was detected as part of a potential title,
|
||||
# look at this one in particular
|
||||
lang = lng
|
||||
break
|
||||
else:
|
||||
# pick the first one if we don't have a better choice
|
||||
lang = langs[0]
|
||||
|
||||
|
||||
# language code are rarely part of a title, and those
|
||||
# should be handled by the Language exceptions anyway
|
||||
if len(lang.value) <= 3:
|
||||
return m
|
||||
|
||||
|
||||
# if filetype is subtitle and the language appears last, just before
|
||||
# the extension, then it is likely a subtitle language
|
||||
parts = clean_string(title.root.value).split()
|
||||
if (m['type'] in ['moviesubtitle', 'episodesubtitle'] and
|
||||
parts.index(lang.value) == len(parts) - 2):
|
||||
return m
|
||||
|
||||
# if the language was in the middle of the other potential title,
|
||||
# keep the other title (eg: The Italian Job), except if it is at the
|
||||
# very beginning, in which case we consider it an error
|
||||
if m2['title'].startswith(lang.value):
|
||||
return m
|
||||
elif lang.value in title2.value:
|
||||
return m2
|
||||
|
||||
# if a node is in an explicit group, then the correct title is probably
|
||||
# the other one
|
||||
if title.root.node_at(title.node_idx[:2]).is_explicit():
|
||||
@@ -175,9 +192,6 @@ def _guess_filename(filename, filetype):
|
||||
elif title2.root.node_at(title2.node_idx[:2]).is_explicit():
|
||||
return m
|
||||
|
||||
return warning('Not sure of the title because of the language position')
|
||||
|
||||
|
||||
return m
|
||||
|
||||
|
||||
|
||||
@@ -24,16 +24,19 @@ from guessit import u
|
||||
from guessit import slogging, guess_file_info
|
||||
from optparse import OptionParser
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
import locale
|
||||
|
||||
|
||||
def detect_filename(filename, filetype, info=['filename']):
|
||||
def detect_filename(filename, filetype, info=['filename'], advanced = False):
|
||||
filename = u(filename)
|
||||
|
||||
print('For:', filename)
|
||||
print('GuessIt found:', guess_file_info(filename, filetype, info).nice_string())
|
||||
print('GuessIt found:', guess_file_info(filename, filetype, info).nice_string(advanced))
|
||||
|
||||
|
||||
def run_demo(episodes=True, movies=True):
|
||||
def run_demo(episodes=True, movies=True, advanced=False):
|
||||
# NOTE: tests should not be added here but rather in the tests/ folder
|
||||
# this is just intended as a quick example
|
||||
if episodes:
|
||||
@@ -50,7 +53,7 @@ def run_demo(episodes=True, movies=True):
|
||||
|
||||
for f in testeps:
|
||||
print('-'*80)
|
||||
detect_filename(f, filetype='episode')
|
||||
detect_filename(f, filetype='episode', advanced=advanced)
|
||||
|
||||
|
||||
if movies:
|
||||
@@ -77,12 +80,17 @@ def run_demo(episodes=True, movies=True):
|
||||
|
||||
for f in testmovies:
|
||||
print('-'*80)
|
||||
detect_filename(f, filetype = 'movie')
|
||||
detect_filename(f, filetype = 'movie', advanced = advanced)
|
||||
|
||||
|
||||
def main():
|
||||
slogging.setupLogging()
|
||||
|
||||
# see http://bugs.python.org/issue2128
|
||||
if sys.version_info.major < 3 and os.name == 'nt':
|
||||
for i, a in enumerate(sys.argv):
|
||||
sys.argv[i] = a.decode(locale.getpreferredencoding())
|
||||
|
||||
parser = OptionParser(usage = 'usage: %prog [options] file1 [file2...]')
|
||||
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
|
||||
help = 'display debug output')
|
||||
@@ -92,6 +100,8 @@ def main():
|
||||
'them, comma-separated')
|
||||
parser.add_option('-t', '--type', dest = 'filetype', default = 'autodetect',
|
||||
help = 'the suggested file type: movie, episode or autodetect')
|
||||
parser.add_option('-a', '--advanced', dest = 'advanced', action='store_true', default = False,
|
||||
help = 'display advanced information for filename guesses, as json output')
|
||||
parser.add_option('-d', '--demo', action='store_true', dest='demo', default=False,
|
||||
help = 'run a few builtin tests instead of analyzing a file')
|
||||
|
||||
@@ -100,13 +110,14 @@ def main():
|
||||
logging.getLogger('guessit').setLevel(logging.DEBUG)
|
||||
|
||||
if options.demo:
|
||||
run_demo(episodes=True, movies=True)
|
||||
run_demo(episodes=True, movies=True, advanced=options.advanced)
|
||||
else:
|
||||
if args:
|
||||
for filename in args:
|
||||
detect_filename(filename,
|
||||
filetype = options.filetype,
|
||||
info = options.info.split(','))
|
||||
info = options.info.split(','),
|
||||
advanced = options.advanced)
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
@@ -44,13 +44,14 @@ def split_path(path):
|
||||
result = []
|
||||
while True:
|
||||
head, tail = os.path.split(path)
|
||||
headlen = len(head)
|
||||
|
||||
# on Unix systems, the root folder is '/'
|
||||
if head == '/' and tail == '':
|
||||
if head and head == '/'*headlen and tail == '':
|
||||
return ['/'] + result
|
||||
|
||||
# on Windows, the root folder is a drive letter (eg: 'C:\') or for shares \\
|
||||
if ((len(head) == 3 and head[1:] == ':\\') or (len(head) == 2 and head == '\\\\')) and tail == '':
|
||||
if ((headlen == 3 and head[1:] == ':\\') or (headlen == 2 and head == '\\\\')) and tail == '':
|
||||
return [head] + result
|
||||
|
||||
if head == '' and tail == '':
|
||||
@@ -61,6 +62,7 @@ def split_path(path):
|
||||
path = head
|
||||
continue
|
||||
|
||||
# otherwise, add the last path fragment and keep splitting
|
||||
result = [tail] + result
|
||||
path = head
|
||||
|
||||
|
||||
@@ -41,15 +41,21 @@ class Guess(UnicodeMixin, dict):
|
||||
confidence = kwargs.pop('confidence')
|
||||
except KeyError:
|
||||
confidence = 0
|
||||
|
||||
try:
|
||||
raw = kwargs.pop('raw')
|
||||
except KeyError:
|
||||
raw = None
|
||||
|
||||
dict.__init__(self, *args, **kwargs)
|
||||
|
||||
self._confidence = {}
|
||||
self._raw = {}
|
||||
for prop in self:
|
||||
self._confidence[prop] = confidence
|
||||
|
||||
|
||||
def to_dict(self):
|
||||
self._raw[prop] = raw
|
||||
|
||||
def to_dict(self, advanced=False):
|
||||
data = dict(self)
|
||||
for prop, value in data.items():
|
||||
if isinstance(value, datetime.date):
|
||||
@@ -58,46 +64,65 @@ class Guess(UnicodeMixin, dict):
|
||||
data[prop] = u(value)
|
||||
elif isinstance(value, list):
|
||||
data[prop] = [u(x) for x in value]
|
||||
if advanced:
|
||||
data[prop] = {"value": data[prop], "raw": self.raw(prop), "confidence": self.confidence(prop)}
|
||||
|
||||
return data
|
||||
|
||||
def nice_string(self):
|
||||
data = self.to_dict()
|
||||
|
||||
parts = json.dumps(data, indent=4).split('\n')
|
||||
for i, p in enumerate(parts):
|
||||
if p[:5] != ' "':
|
||||
continue
|
||||
|
||||
prop = p.split('"')[1]
|
||||
parts[i] = (' [%.2f] "' % self.confidence(prop)) + p[5:]
|
||||
|
||||
return '\n'.join(parts)
|
||||
def nice_string(self, advanced=False):
|
||||
if advanced:
|
||||
data = self.to_dict(advanced)
|
||||
return json.dumps(data, indent=4)
|
||||
else:
|
||||
data = self.to_dict()
|
||||
|
||||
parts = json.dumps(data, indent=4).split('\n')
|
||||
for i, p in enumerate(parts):
|
||||
if p[:5] != ' "':
|
||||
continue
|
||||
|
||||
prop = p.split('"')[1]
|
||||
parts[i] = (' [%.2f] "' % self.confidence(prop)) + p[5:]
|
||||
|
||||
return '\n'.join(parts)
|
||||
|
||||
def __unicode__(self):
|
||||
return u(self.to_dict())
|
||||
|
||||
def confidence(self, prop):
|
||||
return self._confidence.get(prop, -1)
|
||||
|
||||
def raw(self, prop):
|
||||
return self._raw.get(prop, None)
|
||||
|
||||
def set(self, prop, value, confidence=None):
|
||||
def set(self, prop, value, confidence=None, raw=None):
|
||||
self[prop] = value
|
||||
if confidence is not None:
|
||||
self._confidence[prop] = confidence
|
||||
if raw is not None:
|
||||
self._raw[prop] = raw
|
||||
|
||||
def set_confidence(self, prop, value):
|
||||
self._confidence[prop] = value
|
||||
|
||||
def set_raw(self, prop, value):
|
||||
self._raw[prop] = value
|
||||
|
||||
def update(self, other, confidence=None):
|
||||
def update(self, other, confidence=None, raw=None):
|
||||
dict.update(self, other)
|
||||
if isinstance(other, Guess):
|
||||
for prop in other:
|
||||
self._confidence[prop] = other.confidence(prop)
|
||||
self._raw[prop] = other.raw(prop)
|
||||
|
||||
if confidence is not None:
|
||||
for prop in other:
|
||||
self._confidence[prop] = confidence
|
||||
|
||||
if raw is not None:
|
||||
for prop in other:
|
||||
self._raw[prop] = raw
|
||||
|
||||
def update_highest_confidence(self, other):
|
||||
"""Update this guess with the values from the given one. In case
|
||||
there is property present in both, only the one with the highest one
|
||||
@@ -110,6 +135,7 @@ class Guess(UnicodeMixin, dict):
|
||||
continue
|
||||
self[prop] = other[prop]
|
||||
self._confidence[prop] = other.confidence(prop)
|
||||
self._raw[prop] = other.raw(prop)
|
||||
|
||||
|
||||
def choose_int(g1, g2):
|
||||
@@ -181,7 +207,7 @@ def choose_string(g1, g2):
|
||||
elif v1l in v2l:
|
||||
return (v1, combined_prob)
|
||||
|
||||
# in case of conflict, return the one with highest priority
|
||||
# in case of conflict, return the one with highest confidence
|
||||
else:
|
||||
if c1 > c2:
|
||||
return (v1, c1 - c2)
|
||||
@@ -288,7 +314,8 @@ def merge_all(guesses, append=None):
|
||||
result.set(prop, result.get(prop, []) + [g[prop]],
|
||||
# TODO: what to do with confidence here? maybe an
|
||||
# arithmetic mean...
|
||||
confidence=g.confidence(prop))
|
||||
confidence=g.confidence(prop),
|
||||
raw=g.raw(prop))
|
||||
|
||||
del g[prop]
|
||||
|
||||
|
||||
@@ -296,7 +296,7 @@ UNDETERMINED = Language('und')
|
||||
ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED])
|
||||
ALL_LANGUAGES_NAMES = lng_all_names
|
||||
|
||||
def search_language(string, lang_filter=None):
|
||||
def search_language(string, lang_filter=None, skip=None):
|
||||
"""Looks for language patterns, and if found return the language object,
|
||||
its group span and an associated confidence.
|
||||
|
||||
@@ -345,6 +345,16 @@ def search_language(string, lang_filter=None):
|
||||
|
||||
if pos != -1:
|
||||
end = pos + len(lang)
|
||||
|
||||
# skip if span in in skip list
|
||||
while skip and (pos - 1, end - 1) in skip:
|
||||
pos = slow.find(lang, end)
|
||||
if pos == -1:
|
||||
continue
|
||||
end = pos + len(lang)
|
||||
if pos == -1:
|
||||
continue
|
||||
|
||||
# make sure our word is always surrounded by separators
|
||||
if slow[pos - 1] not in sep or slow[end] not in sep:
|
||||
continue
|
||||
|
||||
@@ -21,14 +21,14 @@
|
||||
from __future__ import unicode_literals
|
||||
from guessit import PY3, u, base_text_type
|
||||
from guessit.matchtree import MatchTree
|
||||
from guessit.textutils import normalize_unicode
|
||||
from guessit.textutils import normalize_unicode, clean_string
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IterativeMatcher(object):
|
||||
def __init__(self, filename, filetype='autodetect', opts=None):
|
||||
def __init__(self, filename, filetype='autodetect', opts=None, transfo_opts=None):
|
||||
"""An iterative matcher tries to match different patterns that appear
|
||||
in the filename.
|
||||
|
||||
@@ -38,7 +38,8 @@ class IterativeMatcher(object):
|
||||
a movie.
|
||||
|
||||
The recognized 'filetype' values are:
|
||||
[ autodetect, subtitle, movie, moviesubtitle, episode, episodesubtitle ]
|
||||
[ autodetect, subtitle, info, movie, moviesubtitle, movieinfo, episode,
|
||||
episodesubtitle, episodeinfo ]
|
||||
|
||||
|
||||
The IterativeMatcher works mainly in 2 steps:
|
||||
@@ -61,15 +62,20 @@ class IterativeMatcher(object):
|
||||
it corresponds to a video codec, denoted by the letter'v' in the 4th line.
|
||||
(for more info, see guess.matchtree.to_string)
|
||||
|
||||
Second, it tries to merge all this information into a single object
|
||||
containing all the found properties, and does some (basic) conflict
|
||||
resolution when they arise.
|
||||
|
||||
Second, it tries to merge all this information into a single object
|
||||
containing all the found properties, and does some (basic) conflict
|
||||
resolution when they arise.
|
||||
|
||||
When you create the Matcher, you can pass it:
|
||||
- a list 'opts' of option names, that act as global flags
|
||||
- a dict 'transfo_opts' of { transfo_name: (transfo_args, transfo_kwargs) }
|
||||
with which to call the transfo.process() function.
|
||||
"""
|
||||
|
||||
valid_filetypes = ('autodetect', 'subtitle', 'video',
|
||||
'movie', 'moviesubtitle',
|
||||
'episode', 'episodesubtitle')
|
||||
valid_filetypes = ('autodetect', 'subtitle', 'info', 'video',
|
||||
'movie', 'moviesubtitle', 'movieinfo',
|
||||
'episode', 'episodesubtitle', 'episodeinfo')
|
||||
if filetype not in valid_filetypes:
|
||||
raise ValueError("filetype needs to be one of %s" % valid_filetypes)
|
||||
if not PY3 and not isinstance(filename, unicode):
|
||||
@@ -80,10 +86,22 @@ class IterativeMatcher(object):
|
||||
|
||||
if opts is None:
|
||||
opts = []
|
||||
elif isinstance(opts, base_text_type):
|
||||
opts = opts.split()
|
||||
if not isinstance(opts, list):
|
||||
raise ValueError('opts must be a list of option names! Received: type=%s val=%s',
|
||||
type(opts), opts)
|
||||
|
||||
if transfo_opts is None:
|
||||
transfo_opts = {}
|
||||
if not isinstance(transfo_opts, dict):
|
||||
raise ValueError('transfo_opts must be a dict of { transfo_name: (args, kwargs) }. '+
|
||||
'Received: type=%s val=%s', type(transfo_opts), transfo_opts)
|
||||
|
||||
self.match_tree = MatchTree(filename)
|
||||
|
||||
# sanity check: make sure we don't process a (mostly) empty string
|
||||
if clean_string(filename) == '':
|
||||
return
|
||||
|
||||
mtree = self.match_tree
|
||||
mtree.guess.set('type', filetype, confidence=1.0)
|
||||
|
||||
@@ -91,7 +109,11 @@ class IterativeMatcher(object):
|
||||
transfo = __import__('guessit.transfo.' + transfo_name,
|
||||
globals=globals(), locals=locals(),
|
||||
fromlist=['process'], level=0)
|
||||
transfo.process(mtree, *args, **kwargs)
|
||||
default_args, default_kwargs = transfo_opts.get(transfo_name, ((), {}))
|
||||
all_args = args or default_args
|
||||
all_kwargs = dict(default_kwargs)
|
||||
all_kwargs.update(kwargs) # keep all kwargs merged together
|
||||
transfo.process(mtree, *all_args, **all_kwargs)
|
||||
|
||||
# 1- first split our path into dirs + basename + ext
|
||||
apply_transfo('split_path_components')
|
||||
@@ -111,7 +133,7 @@ class IterativeMatcher(object):
|
||||
# - language before episodes_rexps
|
||||
# - properties before language (eg: he-aac vs hebrew)
|
||||
# - release_group before properties (eg: XviD-?? vs xvid)
|
||||
if mtree.guess['type'] in ('episode', 'episodesubtitle'):
|
||||
if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'):
|
||||
strategy = [ 'guess_date', 'guess_website', 'guess_release_group',
|
||||
'guess_properties', 'guess_language',
|
||||
'guess_video_rexps',
|
||||
@@ -124,6 +146,7 @@ class IterativeMatcher(object):
|
||||
if 'nolanguage' in opts:
|
||||
strategy.remove('guess_language')
|
||||
|
||||
|
||||
for name in strategy:
|
||||
apply_transfo(name)
|
||||
|
||||
@@ -143,7 +166,7 @@ class IterativeMatcher(object):
|
||||
|
||||
# 5- try to identify the remaining unknown groups by looking at their
|
||||
# position relative to other known elements
|
||||
if mtree.guess['type'] in ('episode', 'episodesubtitle'):
|
||||
if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'):
|
||||
apply_transfo('guess_episode_info_from_position')
|
||||
else:
|
||||
apply_transfo('guess_movie_title_from_position')
|
||||
|
||||
@@ -25,6 +25,8 @@ import re
|
||||
|
||||
subtitle_exts = [ 'srt', 'idx', 'sub', 'ssa' ]
|
||||
|
||||
info_exts = [ 'nfo' ]
|
||||
|
||||
video_exts = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2',
|
||||
'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm',
|
||||
'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'wav', 'webm', 'wma', 'wmv']
|
||||
@@ -32,7 +34,7 @@ video_exts = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2',
|
||||
group_delimiters = [ '()', '[]', '{}' ]
|
||||
|
||||
# separator character regexp
|
||||
sep = r'[][)(}{+ /\._-]' # regexp art, hehe :D
|
||||
sep = r'[][,)(}{+ /\._-]' # regexp art, hehe :D
|
||||
|
||||
# character used to represent a deleted char (when matching groups)
|
||||
deleted = '_'
|
||||
@@ -49,7 +51,7 @@ episode_rexps = [ # ... Season 2 ...
|
||||
#(r'[Ss](?P<season>[0-9]{1,3})[^0-9]?(?P<bonusNumber>(?:-?[xX-][0-9]{1,3})+)[^0-9]', 1.0, (0, -1)),
|
||||
|
||||
# ... 2x13 ...
|
||||
(r'[^0-9](?P<season>[0-9]{1,2})[^0-9]?(?P<episodeNumber>(?:-?[xX][0-9]{1,3})+)[^0-9]', 1.0, (1, -1)),
|
||||
(r'[^0-9](?P<season>[0-9]{1,2})[^0-9 .-]?(?P<episodeNumber>(?:-?[xX][0-9]{1,3})+)[^0-9]', 1.0, (1, -1)),
|
||||
|
||||
# ... s02 ...
|
||||
#(sep + r's(?P<season>[0-9]{1,2})' + sep, 0.6, (1, -1)),
|
||||
@@ -122,9 +124,12 @@ prop_multi = { 'format': { 'DVD': [ 'DVD', 'DVD-Rip', 'VIDEO-TS', 'DVDivX' ],
|
||||
'VHS': [ 'VHS' ],
|
||||
'WEB-DL': [ 'WEB-DL' ] },
|
||||
|
||||
'is3D': { True: [ '3D' ] },
|
||||
|
||||
'screenSize': { '480p': [ '480[pi]?' ],
|
||||
'720p': [ '720[pi]?' ],
|
||||
'1080p': [ '1080[pi]?' ] },
|
||||
'1080i': [ '1080i' ],
|
||||
'1080p': [ '1080p', '1080[^i]' ] },
|
||||
|
||||
'videoCodec': { 'XviD': [ 'Xvid' ],
|
||||
'DivX': [ 'DVDivX', 'DivX' ],
|
||||
@@ -140,7 +145,7 @@ prop_multi = { 'format': { 'DVD': [ 'DVD', 'DVD-Rip', 'VIDEO-TS', 'DVDivX' ],
|
||||
'DTS': [ 'DTS' ],
|
||||
'AAC': [ 'He-AAC', 'AAC-He', 'AAC' ] },
|
||||
|
||||
'audioChannels': { '5.1': [ r'5\.1', 'DD5[\._ ]1', '5ch' ] },
|
||||
'audioChannels': { '5.1': [ r'5\.1', 'DD5[._ ]1', '5ch' ] },
|
||||
|
||||
'episodeFormat': { 'Minisode': [ 'Minisodes?' ] }
|
||||
|
||||
@@ -170,7 +175,7 @@ prop_single = { 'releaseGroup': [ 'ESiR', 'WAF', 'SEPTiC', r'\[XCT\]', 'iNT', 'P
|
||||
}
|
||||
|
||||
_dash = '-'
|
||||
_psep = '[-\. _]?'
|
||||
_psep = '[-. _]?'
|
||||
|
||||
def _to_rexp(prop):
|
||||
return re.compile(prop.replace(_dash, _psep), re.IGNORECASE)
|
||||
@@ -237,8 +242,9 @@ def canonical_form(string):
|
||||
def compute_canonical_form(property_name, value):
|
||||
"""Return the canonical form of a property given its type if it is a valid
|
||||
one, None otherwise."""
|
||||
for canonical_form, rexps in properties_rexps[property_name].items():
|
||||
for rexp in rexps:
|
||||
if rexp.match(value):
|
||||
return canonical_form
|
||||
if isinstance(value, basestring):
|
||||
for canonical_form, rexps in properties_rexps[property_name].items():
|
||||
for rexp in rexps:
|
||||
if rexp.match(value):
|
||||
return canonical_form
|
||||
return None
|
||||
|
||||
@@ -31,14 +31,15 @@ RED_FONT = "\x1B[0;31m"
|
||||
RESET_FONT = "\x1B[0m"
|
||||
|
||||
|
||||
def setupLogging(colored=True, with_time=False, with_thread=False, filename=None):
|
||||
def setupLogging(colored=True, with_time=False, with_thread=False, filename=None, with_lineno=False):
|
||||
"""Set up a nice colored logger as the main application logger."""
|
||||
|
||||
class SimpleFormatter(logging.Formatter):
|
||||
def __init__(self, with_time, with_thread):
|
||||
self.fmt = (('%(asctime)s ' if with_time else '') +
|
||||
'%(levelname)-8s ' +
|
||||
'[%(name)s:%(funcName)s]' +
|
||||
'[%(name)s:%(funcName)s' +
|
||||
(':%(lineno)s' if with_lineno else '') + ']' +
|
||||
('[%(threadName)s]' if with_thread else '') +
|
||||
' -- %(message)s')
|
||||
logging.Formatter.__init__(self, self.fmt)
|
||||
@@ -47,7 +48,8 @@ def setupLogging(colored=True, with_time=False, with_thread=False, filename=None
|
||||
def __init__(self, with_time, with_thread):
|
||||
self.fmt = (('%(asctime)s ' if with_time else '') +
|
||||
'-CC-%(levelname)-8s ' +
|
||||
BLUE_FONT + '[%(name)s:%(funcName)s]' +
|
||||
BLUE_FONT + '[%(name)s:%(funcName)s' +
|
||||
(':%(lineno)s' if with_lineno else '') + ']' +
|
||||
RESET_FONT + ('[%(threadName)s]' if with_thread else '') +
|
||||
' -- %(message)s')
|
||||
|
||||
|
||||
@@ -43,10 +43,13 @@ def strip_brackets(s):
|
||||
return s
|
||||
|
||||
|
||||
def clean_string(s):
|
||||
for c in sep[:-2]: # do not remove dashes ('-')
|
||||
s = s.replace(c, ' ')
|
||||
parts = s.split()
|
||||
def clean_string(st):
|
||||
for c in sep:
|
||||
# do not remove certain chars
|
||||
if c in ['-', ',']:
|
||||
continue
|
||||
st = st.replace(c, ' ')
|
||||
parts = st.split()
|
||||
result = ' '.join(p for p in parts if p != '')
|
||||
|
||||
# now also remove dashes on the outer part of the string
|
||||
|
||||
@@ -28,7 +28,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def found_property(node, name, confidence):
|
||||
node.guess = Guess({name: node.clean_value}, confidence=confidence)
|
||||
node.guess = Guess({name: node.clean_value}, confidence=confidence, raw=node.value)
|
||||
log.debug('Found with confidence %.2f: %s' % (confidence, node.guess))
|
||||
|
||||
|
||||
@@ -52,11 +52,17 @@ def format_guess(guess):
|
||||
|
||||
def find_and_split_node(node, strategy, logger):
|
||||
string = ' %s ' % node.value # add sentinels
|
||||
for matcher, confidence in strategy:
|
||||
for matcher, confidence, args, kwargs in strategy:
|
||||
all_args = [string]
|
||||
if getattr(matcher, 'use_node', False):
|
||||
result, span = matcher(string, node)
|
||||
all_args.append(node)
|
||||
if args:
|
||||
all_args.append(args)
|
||||
|
||||
if kwargs:
|
||||
result, span = matcher(*all_args, **kwargs)
|
||||
else:
|
||||
result, span = matcher(string)
|
||||
result, span = matcher(*all_args)
|
||||
|
||||
if result:
|
||||
# readjust span to compensate for sentinels
|
||||
@@ -69,7 +75,7 @@ def find_and_split_node(node, strategy, logger):
|
||||
if confidence is None:
|
||||
confidence = 1.0
|
||||
|
||||
guess = format_guess(Guess(result, confidence=confidence))
|
||||
guess = format_guess(Guess(result, confidence=confidence, raw=string[span[0] + 1:span[1] + 1]))
|
||||
msg = 'Found with confidence %.2f: %s' % (confidence, guess)
|
||||
(logger or log).debug(msg)
|
||||
|
||||
@@ -84,10 +90,12 @@ def find_and_split_node(node, strategy, logger):
|
||||
|
||||
|
||||
class SingleNodeGuesser(object):
|
||||
def __init__(self, guess_func, confidence, logger=None):
|
||||
def __init__(self, guess_func, confidence, logger, *args, **kwargs):
|
||||
self.guess_func = guess_func
|
||||
self.confidence = confidence
|
||||
self.logger = logger
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def process(self, mtree):
|
||||
# strategy is a list of pairs (guesser, confidence)
|
||||
@@ -95,7 +103,7 @@ class SingleNodeGuesser(object):
|
||||
# it will override it, otherwise it will leave the guess confidence
|
||||
# - if the guesser returns a simple dict as a guess and confidence is
|
||||
# specified, it will use it, or 1.0 otherwise
|
||||
strategy = [ (self.guess_func, self.confidence) ]
|
||||
strategy = [ (self.guess_func, self.confidence, self.args, self.kwargs) ]
|
||||
|
||||
for node in mtree.unidentified_leaves():
|
||||
find_and_split_node(node, strategy, self.logger)
|
||||
|
||||
@@ -45,4 +45,4 @@ def process(mtree):
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
node.guess = Guess(country=country, confidence=1.0)
|
||||
node.guess = Guess(country=country, confidence=1.0, raw=c)
|
||||
|
||||
@@ -40,27 +40,22 @@ def guess_episodes_rexps(string):
|
||||
for rexp, confidence, span_adjust in episode_rexps:
|
||||
match = re.search(rexp, string, re.IGNORECASE)
|
||||
if match:
|
||||
guess = Guess(match.groupdict(), confidence=confidence)
|
||||
span = (match.start() + span_adjust[0],
|
||||
span = (match.start() + span_adjust[0],
|
||||
match.end() + span_adjust[1])
|
||||
|
||||
# episodes which have a season > 30 are most likely errors
|
||||
# (Simpsons is at 24!)
|
||||
if int(guess.get('season', 0)) > 30:
|
||||
continue
|
||||
guess = Guess(match.groupdict(), confidence=confidence, raw=string[span[0]:span[1]])
|
||||
|
||||
# decide whether we have only a single episode number or an
|
||||
# episode list
|
||||
if guess.get('episodeNumber'):
|
||||
eplist = number_list(guess['episodeNumber'])
|
||||
guess.set('episodeNumber', eplist[0], confidence=confidence)
|
||||
guess.set('episodeNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]])
|
||||
|
||||
if len(eplist) > 1:
|
||||
guess.set('episodeList', eplist, confidence=confidence)
|
||||
guess.set('episodeList', eplist, confidence=confidence, raw=string[span[0]:span[1]])
|
||||
|
||||
if guess.get('bonusNumber'):
|
||||
eplist = number_list(guess['bonusNumber'])
|
||||
guess.set('bonusNumber', eplist[0], confidence=confidence)
|
||||
guess.set('bonusNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]])
|
||||
|
||||
return guess, span
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from guessit import Guess
|
||||
from guessit.patterns import (subtitle_exts, video_exts, episode_rexps,
|
||||
from guessit.patterns import (subtitle_exts, info_exts, video_exts, episode_rexps,
|
||||
find_properties, compute_canonical_form)
|
||||
from guessit.date import valid_year
|
||||
from guessit.textutils import clean_string
|
||||
@@ -53,12 +53,16 @@ def guess_filetype(mtree, filetype):
|
||||
filetype_container[0] = 'episode'
|
||||
elif filetype_container[0] == 'subtitle':
|
||||
filetype_container[0] = 'episodesubtitle'
|
||||
elif filetype_container[0] == 'info':
|
||||
filetype_container[0] = 'episodeinfo'
|
||||
|
||||
def upgrade_movie():
|
||||
if filetype_container[0] == 'video':
|
||||
filetype_container[0] = 'movie'
|
||||
elif filetype_container[0] == 'subtitle':
|
||||
filetype_container[0] = 'moviesubtitle'
|
||||
elif filetype_container[0] == 'info':
|
||||
filetype_container[0] = 'movieinfo'
|
||||
|
||||
def upgrade_subtitle():
|
||||
if 'movie' in filetype_container[0]:
|
||||
@@ -68,6 +72,14 @@ def guess_filetype(mtree, filetype):
|
||||
else:
|
||||
filetype_container[0] = 'subtitle'
|
||||
|
||||
def upgrade_info():
|
||||
if 'movie' in filetype_container[0]:
|
||||
filetype_container[0] = 'movieinfo'
|
||||
elif 'episode' in filetype_container[0]:
|
||||
filetype_container[0] = 'episodeinfo'
|
||||
else:
|
||||
filetype_container[0] = 'info'
|
||||
|
||||
def upgrade(type='unknown'):
|
||||
if filetype_container[0] == 'autodetect':
|
||||
filetype_container[0] = type
|
||||
@@ -78,6 +90,9 @@ def guess_filetype(mtree, filetype):
|
||||
if fileext in subtitle_exts:
|
||||
upgrade_subtitle()
|
||||
other = { 'container': fileext }
|
||||
elif fileext in info_exts:
|
||||
upgrade_info()
|
||||
other = { 'container': fileext }
|
||||
elif fileext in video_exts:
|
||||
upgrade(type='video')
|
||||
other = { 'container': fileext }
|
||||
@@ -104,17 +119,20 @@ def guess_filetype(mtree, filetype):
|
||||
fname = clean_string(filename).lower()
|
||||
for m in MOVIES:
|
||||
if m in fname:
|
||||
log.debug('Found in exception list of movies -> type = movie')
|
||||
upgrade_movie()
|
||||
for s in SERIES:
|
||||
if s in fname:
|
||||
log.debug('Found in exception list of series -> type = episode')
|
||||
upgrade_episode()
|
||||
|
||||
# now look whether there are some specific hints for episode vs movie
|
||||
if filetype_container[0] in ('video', 'subtitle'):
|
||||
if filetype_container[0] in ('video', 'subtitle', 'info'):
|
||||
# if we have an episode_rexp (eg: s02e13), it is an episode
|
||||
for rexp, _, _ in episode_rexps:
|
||||
match = re.search(rexp, filename, re.IGNORECASE)
|
||||
if match:
|
||||
log.debug('Found matching regexp: "%s" (string = "%s") -> type = episode', rexp, match.group())
|
||||
upgrade_episode()
|
||||
break
|
||||
|
||||
@@ -133,24 +151,29 @@ def guess_filetype(mtree, filetype):
|
||||
possible = False
|
||||
|
||||
if possible:
|
||||
log.debug('Found possible episode number: %s (from string "%s") -> type = episode', epnumber, match.group())
|
||||
upgrade_episode()
|
||||
|
||||
# if we have certain properties characteristic of episodes, it is an ep
|
||||
for prop, value, _, _ in find_properties(filename):
|
||||
log.debug('prop: %s = %s' % (prop, value))
|
||||
if prop == 'episodeFormat':
|
||||
log.debug('Found characteristic property of episodes: %s = "%s"', prop, value)
|
||||
upgrade_episode()
|
||||
break
|
||||
|
||||
elif compute_canonical_form('format', value) == 'DVB':
|
||||
log.debug('Found characteristic property of episodes: %s = "%s"', prop, value)
|
||||
upgrade_episode()
|
||||
break
|
||||
|
||||
# origin-specific type
|
||||
if 'tvu.org.ru' in filename:
|
||||
log.debug('Found characteristic property of episodes: %s = "%s"', prop, value)
|
||||
upgrade_episode()
|
||||
|
||||
# if no episode info found, assume it's a movie
|
||||
log.debug('Nothing characteristic found, assuming type = movie')
|
||||
upgrade_movie()
|
||||
|
||||
filetype = filetype_container[0]
|
||||
|
||||
@@ -22,22 +22,34 @@ from __future__ import unicode_literals
|
||||
from guessit import Guess
|
||||
from guessit.transfo import SingleNodeGuesser
|
||||
from guessit.language import search_language
|
||||
from guessit.textutils import clean_string, find_words
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def guess_language(string):
|
||||
language, span, confidence = search_language(string)
|
||||
def guess_language(string, node, skip=None):
|
||||
if skip:
|
||||
relative_skip = []
|
||||
for entry in skip:
|
||||
node_idx = entry['node_idx']
|
||||
span = entry['span']
|
||||
if node_idx == node.node_idx[:len(node_idx)]:
|
||||
relative_span = (span[0] - node.offset + 1, span[1] - node.offset + 1)
|
||||
relative_skip.append(relative_span)
|
||||
skip = relative_skip
|
||||
|
||||
language, span, confidence = search_language(string, skip=skip)
|
||||
if language:
|
||||
return (Guess({'language': language},
|
||||
confidence=confidence),
|
||||
confidence=confidence,
|
||||
raw= string[span[0]:span[1]]),
|
||||
span)
|
||||
|
||||
return None, None
|
||||
|
||||
guess_language.use_node = True
|
||||
|
||||
def process(mtree):
|
||||
SingleNodeGuesser(guess_language, None, log).process(mtree)
|
||||
|
||||
def process(mtree, *args, **kwargs):
|
||||
SingleNodeGuesser(guess_language, None, log, *args, **kwargs).process(mtree)
|
||||
# Note: 'language' is promoted to 'subtitleLanguage' in the post_process transfo
|
||||
|
||||
@@ -29,7 +29,8 @@ log = logging.getLogger(__name__)
|
||||
def process(mtree):
|
||||
def found_property(node, name, value, confidence):
|
||||
node.guess = Guess({ name: value },
|
||||
confidence=confidence)
|
||||
confidence=confidence,
|
||||
raw=value)
|
||||
log.debug('Found with confidence %.2f: %s' % (confidence, node.guess))
|
||||
|
||||
def found_title(node, confidence):
|
||||
|
||||
@@ -38,9 +38,10 @@ def guess_video_rexps(string):
|
||||
# the soonest that we can catch it)
|
||||
if metadata.get('cdNumberTotal', -1) is None:
|
||||
del metadata['cdNumberTotal']
|
||||
return (Guess(metadata, confidence=confidence),
|
||||
(match.start() + span_adjust[0],
|
||||
match.end() + span_adjust[1] - 2))
|
||||
span = (match.start() + span_adjust[0],
|
||||
match.end() + span_adjust[1] - 2)
|
||||
return (Guess(metadata, confidence=confidence, raw=string[span[0]:span[1]]),
|
||||
span)
|
||||
|
||||
return None, None
|
||||
|
||||
|
||||
@@ -48,9 +48,9 @@ def guess_weak_episodes_rexps(string, node):
|
||||
continue
|
||||
return Guess({ 'season': season,
|
||||
'episodeNumber': epnum },
|
||||
confidence=0.6), span
|
||||
confidence=0.6, raw=string[span[0]:span[1]]), span
|
||||
else:
|
||||
return Guess(metadata, confidence=0.3), span
|
||||
return Guess(metadata, confidence=0.3, raw=string[span[0]:span[1]]), span
|
||||
|
||||
return None, None
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""
|
||||
"""
|
||||
HTML parsing library based on the WHATWG "HTML5"
|
||||
specification. The parser is designed to be compatible with existing
|
||||
HTML found in the wild and implements well-defined error recovery that
|
||||
@@ -8,10 +8,16 @@ Example usage:
|
||||
|
||||
import html5lib
|
||||
f = open("my_document.html")
|
||||
tree = html5lib.parse(f)
|
||||
tree = html5lib.parse(f)
|
||||
"""
|
||||
__version__ = "0.95-dev"
|
||||
from html5parser import HTMLParser, parse, parseFragment
|
||||
from treebuilders import getTreeBuilder
|
||||
from treewalkers import getTreeWalker
|
||||
from serializer import serialize
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from .html5parser import HTMLParser, parse, parseFragment
|
||||
from .treebuilders import getTreeBuilder
|
||||
from .treewalkers import getTreeWalker
|
||||
from .serializer import serialize
|
||||
|
||||
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
|
||||
"getTreeWalker", "serialize"]
|
||||
__version__ = "0.99"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,5 @@
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
|
||||
class Filter(object):
|
||||
def __init__(self, source):
|
||||
|
||||
20
libs/html5lib/filters/alphabeticalattributes.py
Normal file
20
libs/html5lib/filters/alphabeticalattributes.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from . import _base
|
||||
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from ordereddict import OrderedDict
|
||||
|
||||
|
||||
class Filter(_base.Filter):
|
||||
def __iter__(self):
|
||||
for token in _base.Filter.__iter__(self):
|
||||
if token["type"] in ("StartTag", "EmptyTag"):
|
||||
attrs = OrderedDict()
|
||||
for name, value in sorted(token["data"].items(),
|
||||
key=lambda x: x[0]):
|
||||
attrs[name] = value
|
||||
token["data"] = attrs
|
||||
yield token
|
||||
@@ -1,127 +0,0 @@
|
||||
#
|
||||
# The goal is to finally have a form filler where you pass data for
|
||||
# each form, using the algorithm for "Seeding a form with initial values"
|
||||
# See http://www.whatwg.org/specs/web-forms/current-work/#seeding
|
||||
#
|
||||
|
||||
import _base
|
||||
|
||||
from html5lib.constants import spaceCharacters
|
||||
spaceCharacters = u"".join(spaceCharacters)
|
||||
|
||||
class SimpleFilter(_base.Filter):
|
||||
def __init__(self, source, fieldStorage):
|
||||
_base.Filter.__init__(self, source)
|
||||
self.fieldStorage = fieldStorage
|
||||
|
||||
def __iter__(self):
|
||||
field_indices = {}
|
||||
state = None
|
||||
field_name = None
|
||||
for token in _base.Filter.__iter__(self):
|
||||
type = token["type"]
|
||||
if type in ("StartTag", "EmptyTag"):
|
||||
name = token["name"].lower()
|
||||
if name == "input":
|
||||
field_name = None
|
||||
field_type = None
|
||||
input_value_index = -1
|
||||
input_checked_index = -1
|
||||
for i,(n,v) in enumerate(token["data"]):
|
||||
n = n.lower()
|
||||
if n == u"name":
|
||||
field_name = v.strip(spaceCharacters)
|
||||
elif n == u"type":
|
||||
field_type = v.strip(spaceCharacters)
|
||||
elif n == u"checked":
|
||||
input_checked_index = i
|
||||
elif n == u"value":
|
||||
input_value_index = i
|
||||
|
||||
value_list = self.fieldStorage.getlist(field_name)
|
||||
field_index = field_indices.setdefault(field_name, 0)
|
||||
if field_index < len(value_list):
|
||||
value = value_list[field_index]
|
||||
else:
|
||||
value = ""
|
||||
|
||||
if field_type in (u"checkbox", u"radio"):
|
||||
if value_list:
|
||||
if token["data"][input_value_index][1] == value:
|
||||
if input_checked_index < 0:
|
||||
token["data"].append((u"checked", u""))
|
||||
field_indices[field_name] = field_index + 1
|
||||
elif input_checked_index >= 0:
|
||||
del token["data"][input_checked_index]
|
||||
|
||||
elif field_type not in (u"button", u"submit", u"reset"):
|
||||
if input_value_index >= 0:
|
||||
token["data"][input_value_index] = (u"value", value)
|
||||
else:
|
||||
token["data"].append((u"value", value))
|
||||
field_indices[field_name] = field_index + 1
|
||||
|
||||
field_type = None
|
||||
field_name = None
|
||||
|
||||
elif name == "textarea":
|
||||
field_type = "textarea"
|
||||
field_name = dict((token["data"])[::-1])["name"]
|
||||
|
||||
elif name == "select":
|
||||
field_type = "select"
|
||||
attributes = dict(token["data"][::-1])
|
||||
field_name = attributes.get("name")
|
||||
is_select_multiple = "multiple" in attributes
|
||||
is_selected_option_found = False
|
||||
|
||||
elif field_type == "select" and field_name and name == "option":
|
||||
option_selected_index = -1
|
||||
option_value = None
|
||||
for i,(n,v) in enumerate(token["data"]):
|
||||
n = n.lower()
|
||||
if n == "selected":
|
||||
option_selected_index = i
|
||||
elif n == "value":
|
||||
option_value = v.strip(spaceCharacters)
|
||||
if option_value is None:
|
||||
raise NotImplementedError("<option>s without a value= attribute")
|
||||
else:
|
||||
value_list = self.fieldStorage.getlist(field_name)
|
||||
if value_list:
|
||||
field_index = field_indices.setdefault(field_name, 0)
|
||||
if field_index < len(value_list):
|
||||
value = value_list[field_index]
|
||||
else:
|
||||
value = ""
|
||||
if (is_select_multiple or not is_selected_option_found) and option_value == value:
|
||||
if option_selected_index < 0:
|
||||
token["data"].append((u"selected", u""))
|
||||
field_indices[field_name] = field_index + 1
|
||||
is_selected_option_found = True
|
||||
elif option_selected_index >= 0:
|
||||
del token["data"][option_selected_index]
|
||||
|
||||
elif field_type is not None and field_name and type == "EndTag":
|
||||
name = token["name"].lower()
|
||||
if name == field_type:
|
||||
if name == "textarea":
|
||||
value_list = self.fieldStorage.getlist(field_name)
|
||||
if value_list:
|
||||
field_index = field_indices.setdefault(field_name, 0)
|
||||
if field_index < len(value_list):
|
||||
value = value_list[field_index]
|
||||
else:
|
||||
value = ""
|
||||
yield {"type": "Characters", "data": value}
|
||||
field_indices[field_name] = field_index + 1
|
||||
|
||||
field_name = None
|
||||
|
||||
elif name == "option" and field_type == "select":
|
||||
pass # TODO: part of "option without value= attribute" processing
|
||||
|
||||
elif field_type == "textarea":
|
||||
continue # ignore token
|
||||
|
||||
yield token
|
||||
@@ -1,4 +1,7 @@
|
||||
import _base
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from . import _base
|
||||
|
||||
|
||||
class Filter(_base.Filter):
|
||||
def __init__(self, source, encoding):
|
||||
@@ -13,44 +16,44 @@ class Filter(_base.Filter):
|
||||
for token in _base.Filter.__iter__(self):
|
||||
type = token["type"]
|
||||
if type == "StartTag":
|
||||
if token["name"].lower() == u"head":
|
||||
if token["name"].lower() == "head":
|
||||
state = "in_head"
|
||||
|
||||
elif type == "EmptyTag":
|
||||
if token["name"].lower() == u"meta":
|
||||
# replace charset with actual encoding
|
||||
has_http_equiv_content_type = False
|
||||
for (namespace,name),value in token["data"].iteritems():
|
||||
if namespace != None:
|
||||
continue
|
||||
elif name.lower() == u'charset':
|
||||
token["data"][(namespace,name)] = self.encoding
|
||||
meta_found = True
|
||||
break
|
||||
elif name == u'http-equiv' and value.lower() == u'content-type':
|
||||
has_http_equiv_content_type = True
|
||||
else:
|
||||
if has_http_equiv_content_type and (None, u"content") in token["data"]:
|
||||
token["data"][(None, u"content")] = u'text/html; charset=%s' % self.encoding
|
||||
meta_found = True
|
||||
if token["name"].lower() == "meta":
|
||||
# replace charset with actual encoding
|
||||
has_http_equiv_content_type = False
|
||||
for (namespace, name), value in token["data"].items():
|
||||
if namespace is not None:
|
||||
continue
|
||||
elif name.lower() == 'charset':
|
||||
token["data"][(namespace, name)] = self.encoding
|
||||
meta_found = True
|
||||
break
|
||||
elif name == 'http-equiv' and value.lower() == 'content-type':
|
||||
has_http_equiv_content_type = True
|
||||
else:
|
||||
if has_http_equiv_content_type and (None, "content") in token["data"]:
|
||||
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
|
||||
meta_found = True
|
||||
|
||||
elif token["name"].lower() == u"head" and not meta_found:
|
||||
elif token["name"].lower() == "head" and not meta_found:
|
||||
# insert meta into empty head
|
||||
yield {"type": "StartTag", "name": u"head",
|
||||
yield {"type": "StartTag", "name": "head",
|
||||
"data": token["data"]}
|
||||
yield {"type": "EmptyTag", "name": u"meta",
|
||||
"data": {(None, u"charset"): self.encoding}}
|
||||
yield {"type": "EndTag", "name": u"head"}
|
||||
yield {"type": "EmptyTag", "name": "meta",
|
||||
"data": {(None, "charset"): self.encoding}}
|
||||
yield {"type": "EndTag", "name": "head"}
|
||||
meta_found = True
|
||||
continue
|
||||
|
||||
elif type == "EndTag":
|
||||
if token["name"].lower() == u"head" and pending:
|
||||
if token["name"].lower() == "head" and pending:
|
||||
# insert meta into head (if necessary) and flush pending queue
|
||||
yield pending.pop(0)
|
||||
if not meta_found:
|
||||
yield {"type": "EmptyTag", "name": u"meta",
|
||||
"data": {(None, u"charset"): self.encoding}}
|
||||
yield {"type": "EmptyTag", "name": "meta",
|
||||
"data": {(None, "charset"): self.encoding}}
|
||||
while pending:
|
||||
yield pending.pop(0)
|
||||
meta_found = True
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from gettext import gettext
|
||||
_ = gettext
|
||||
|
||||
import _base
|
||||
from html5lib.constants import cdataElements, rcdataElements, voidElements
|
||||
from . import _base
|
||||
from ..constants import cdataElements, rcdataElements, voidElements
|
||||
|
||||
from html5lib.constants import spaceCharacters
|
||||
spaceCharacters = u"".join(spaceCharacters)
|
||||
from ..constants import spaceCharacters
|
||||
spaceCharacters = "".join(spaceCharacters)
|
||||
|
||||
|
||||
class LintError(Exception):
|
||||
pass
|
||||
|
||||
class LintError(Exception): pass
|
||||
|
||||
class Filter(_base.Filter):
|
||||
def __iter__(self):
|
||||
@@ -18,24 +23,24 @@ class Filter(_base.Filter):
|
||||
if type in ("StartTag", "EmptyTag"):
|
||||
name = token["name"]
|
||||
if contentModelFlag != "PCDATA":
|
||||
raise LintError(_("StartTag not in PCDATA content model flag: %s") % name)
|
||||
if not isinstance(name, unicode):
|
||||
raise LintError(_(u"Tag name is not a string: %r") % name)
|
||||
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
|
||||
if not isinstance(name, str):
|
||||
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
|
||||
if not name:
|
||||
raise LintError(_(u"Empty tag name"))
|
||||
raise LintError(_("Empty tag name"))
|
||||
if type == "StartTag" and name in voidElements:
|
||||
raise LintError(_(u"Void element reported as StartTag token: %s") % name)
|
||||
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
|
||||
elif type == "EmptyTag" and name not in voidElements:
|
||||
raise LintError(_(u"Non-void element reported as EmptyTag token: %s") % token["name"])
|
||||
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
|
||||
if type == "StartTag":
|
||||
open_elements.append(name)
|
||||
for name, value in token["data"]:
|
||||
if not isinstance(name, unicode):
|
||||
raise LintError(_("Attribute name is not a string: %r") % name)
|
||||
if not isinstance(name, str):
|
||||
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
|
||||
if not name:
|
||||
raise LintError(_(u"Empty attribute name"))
|
||||
if not isinstance(value, unicode):
|
||||
raise LintError(_("Attribute value is not a string: %r") % value)
|
||||
raise LintError(_("Empty attribute name"))
|
||||
if not isinstance(value, str):
|
||||
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
|
||||
if name in cdataElements:
|
||||
contentModelFlag = "CDATA"
|
||||
elif name in rcdataElements:
|
||||
@@ -45,15 +50,15 @@ class Filter(_base.Filter):
|
||||
|
||||
elif type == "EndTag":
|
||||
name = token["name"]
|
||||
if not isinstance(name, unicode):
|
||||
raise LintError(_(u"Tag name is not a string: %r") % name)
|
||||
if not isinstance(name, str):
|
||||
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
|
||||
if not name:
|
||||
raise LintError(_(u"Empty tag name"))
|
||||
raise LintError(_("Empty tag name"))
|
||||
if name in voidElements:
|
||||
raise LintError(_(u"Void element reported as EndTag token: %s") % name)
|
||||
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
|
||||
start_name = open_elements.pop()
|
||||
if start_name != name:
|
||||
raise LintError(_(u"EndTag (%s) does not match StartTag (%s)") % (name, start_name))
|
||||
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
|
||||
contentModelFlag = "PCDATA"
|
||||
|
||||
elif type == "Comment":
|
||||
@@ -62,27 +67,27 @@ class Filter(_base.Filter):
|
||||
|
||||
elif type in ("Characters", "SpaceCharacters"):
|
||||
data = token["data"]
|
||||
if not isinstance(data, unicode):
|
||||
raise LintError(_("Attribute name is not a string: %r") % data)
|
||||
if not isinstance(data, str):
|
||||
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
|
||||
if not data:
|
||||
raise LintError(_(u"%s token with empty data") % type)
|
||||
raise LintError(_("%(type)s token with empty data") % {"type": type})
|
||||
if type == "SpaceCharacters":
|
||||
data = data.strip(spaceCharacters)
|
||||
if data:
|
||||
raise LintError(_(u"Non-space character(s) found in SpaceCharacters token: ") % data)
|
||||
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
|
||||
|
||||
elif type == "Doctype":
|
||||
name = token["name"]
|
||||
if contentModelFlag != "PCDATA":
|
||||
raise LintError(_("Doctype not in PCDATA content model flag: %s") % name)
|
||||
if not isinstance(name, unicode):
|
||||
raise LintError(_(u"Tag name is not a string: %r") % name)
|
||||
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
|
||||
if not isinstance(name, str):
|
||||
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
|
||||
# XXX: what to do with token["data"] ?
|
||||
|
||||
elif type in ("ParseError", "SerializeError"):
|
||||
pass
|
||||
|
||||
else:
|
||||
raise LintError(_(u"Unknown token type: %s") % type)
|
||||
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
|
||||
|
||||
yield token
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import _base
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from . import _base
|
||||
|
||||
|
||||
class Filter(_base.Filter):
|
||||
def slider(self):
|
||||
@@ -14,8 +17,8 @@ class Filter(_base.Filter):
|
||||
for previous, token, next in self.slider():
|
||||
type = token["type"]
|
||||
if type == "StartTag":
|
||||
if (token["data"] or
|
||||
not self.is_optional_start(token["name"], previous, next)):
|
||||
if (token["data"] or
|
||||
not self.is_optional_start(token["name"], previous, next)):
|
||||
yield token
|
||||
elif type == "EndTag":
|
||||
if not self.is_optional_end(token["name"], next):
|
||||
@@ -73,7 +76,7 @@ class Filter(_base.Filter):
|
||||
# omit the thead and tfoot elements' end tag when they are
|
||||
# immediately followed by a tbody element. See is_optional_end.
|
||||
if previous and previous['type'] == 'EndTag' and \
|
||||
previous['name'] in ('tbody','thead','tfoot'):
|
||||
previous['name'] in ('tbody', 'thead', 'tfoot'):
|
||||
return False
|
||||
return next["name"] == 'tr'
|
||||
else:
|
||||
@@ -121,10 +124,10 @@ class Filter(_base.Filter):
|
||||
# there is no more content in the parent element.
|
||||
if type in ("StartTag", "EmptyTag"):
|
||||
return next["name"] in ('address', 'article', 'aside',
|
||||
'blockquote', 'datagrid', 'dialog',
|
||||
'blockquote', 'datagrid', 'dialog',
|
||||
'dir', 'div', 'dl', 'fieldset', 'footer',
|
||||
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
|
||||
'header', 'hr', 'menu', 'nav', 'ol',
|
||||
'header', 'hr', 'menu', 'nav', 'ol',
|
||||
'p', 'pre', 'section', 'table', 'ul')
|
||||
else:
|
||||
return type == "EndTag" or type is None
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
import _base
|
||||
from html5lib.sanitizer import HTMLSanitizerMixin
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from . import _base
|
||||
from ..sanitizer import HTMLSanitizerMixin
|
||||
|
||||
|
||||
class Filter(_base.Filter, HTMLSanitizerMixin):
|
||||
def __iter__(self):
|
||||
for token in _base.Filter.__iter__(self):
|
||||
token = self.sanitize_token(token)
|
||||
if token: yield token
|
||||
if token:
|
||||
yield token
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
try:
|
||||
frozenset
|
||||
except NameError:
|
||||
# Import from the sets module for python 2.3
|
||||
from sets import ImmutableSet as frozenset
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
import _base
|
||||
from html5lib.constants import rcdataElements, spaceCharacters
|
||||
spaceCharacters = u"".join(spaceCharacters)
|
||||
from . import _base
|
||||
from ..constants import rcdataElements, spaceCharacters
|
||||
spaceCharacters = "".join(spaceCharacters)
|
||||
|
||||
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
|
||||
|
||||
SPACES_REGEX = re.compile(u"[%s]+" % spaceCharacters)
|
||||
|
||||
class Filter(_base.Filter):
|
||||
|
||||
@@ -21,7 +18,7 @@ class Filter(_base.Filter):
|
||||
for token in _base.Filter.__iter__(self):
|
||||
type = token["type"]
|
||||
if type == "StartTag" \
|
||||
and (preserve or token["name"] in self.spacePreserveElements):
|
||||
and (preserve or token["name"] in self.spacePreserveElements):
|
||||
preserve += 1
|
||||
|
||||
elif type == "EndTag" and preserve:
|
||||
@@ -29,13 +26,13 @@ class Filter(_base.Filter):
|
||||
|
||||
elif not preserve and type == "SpaceCharacters" and token["data"]:
|
||||
# Test on token["data"] above to not introduce spaces where there were not
|
||||
token["data"] = u" "
|
||||
token["data"] = " "
|
||||
|
||||
elif not preserve and type == "Characters":
|
||||
token["data"] = collapse_spaces(token["data"])
|
||||
|
||||
yield token
|
||||
|
||||
|
||||
def collapse_spaces(text):
|
||||
return SPACES_REGEX.sub(' ', text)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,105 @@
|
||||
import re
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
baseChar = """[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
|
||||
import re
|
||||
import warnings
|
||||
|
||||
from .constants import DataLossWarning
|
||||
|
||||
baseChar = """
|
||||
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
|
||||
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
|
||||
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
|
||||
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
|
||||
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
|
||||
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
|
||||
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
|
||||
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
|
||||
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
|
||||
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
|
||||
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
|
||||
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
|
||||
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
|
||||
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
|
||||
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
|
||||
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
|
||||
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
|
||||
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
|
||||
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
|
||||
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
|
||||
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
|
||||
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
|
||||
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
|
||||
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
|
||||
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
|
||||
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
|
||||
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
|
||||
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
|
||||
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
|
||||
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
|
||||
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
|
||||
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
|
||||
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
|
||||
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
|
||||
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
|
||||
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
|
||||
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
|
||||
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
|
||||
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
|
||||
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
|
||||
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
|
||||
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
|
||||
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
|
||||
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
|
||||
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
|
||||
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
|
||||
|
||||
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
|
||||
|
||||
combiningCharacter = """[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309A"""
|
||||
combiningCharacter = """
|
||||
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
|
||||
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
|
||||
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
|
||||
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
|
||||
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
|
||||
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
|
||||
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
|
||||
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
|
||||
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
|
||||
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
|
||||
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
|
||||
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
|
||||
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
|
||||
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
|
||||
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
|
||||
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
|
||||
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
|
||||
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
|
||||
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
|
||||
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
|
||||
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
|
||||
#x3099 | #x309A"""
|
||||
|
||||
digit = """[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
|
||||
digit = """
|
||||
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
|
||||
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
|
||||
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
|
||||
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
|
||||
|
||||
extender = """#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | [#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
|
||||
extender = """
|
||||
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
|
||||
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
|
||||
|
||||
letter = " | ".join([baseChar, ideographic])
|
||||
|
||||
#Without the
|
||||
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
|
||||
extender])
|
||||
# Without the
|
||||
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
|
||||
extender])
|
||||
nameFirst = " | ".join([letter, "_"])
|
||||
|
||||
reChar = re.compile(r"#x([\d|A-F]{4,4})")
|
||||
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
|
||||
|
||||
|
||||
def charStringToList(chars):
|
||||
charRanges = [item.strip() for item in chars.split(" | ")]
|
||||
rv = []
|
||||
@@ -30,16 +110,17 @@ def charStringToList(chars):
|
||||
if match is not None:
|
||||
rv.append([hexToInt(item) for item in match.groups()])
|
||||
if len(rv[-1]) == 1:
|
||||
rv[-1] = rv[-1]*2
|
||||
rv[-1] = rv[-1] * 2
|
||||
foundMatch = True
|
||||
break
|
||||
if not foundMatch:
|
||||
assert len(item) == 1
|
||||
|
||||
|
||||
rv.append([ord(item)] * 2)
|
||||
rv = normaliseCharList(rv)
|
||||
return rv
|
||||
|
||||
|
||||
def normaliseCharList(charList):
|
||||
charList = sorted(charList)
|
||||
for item in charList:
|
||||
@@ -49,61 +130,69 @@ def normaliseCharList(charList):
|
||||
while i < len(charList):
|
||||
j = 1
|
||||
rv.append(charList[i])
|
||||
while i + j < len(charList) and charList[i+j][0] <= rv[-1][1] + 1:
|
||||
rv[-1][1] = charList[i+j][1]
|
||||
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
|
||||
rv[-1][1] = charList[i + j][1]
|
||||
j += 1
|
||||
i += j
|
||||
return rv
|
||||
|
||||
#We don't really support characters above the BMP :(
|
||||
# We don't really support characters above the BMP :(
|
||||
max_unicode = int("FFFF", 16)
|
||||
|
||||
|
||||
|
||||
def missingRanges(charList):
|
||||
rv = []
|
||||
if charList[0] != 0:
|
||||
rv.append([0, charList[0][0] - 1])
|
||||
for i, item in enumerate(charList[:-1]):
|
||||
rv.append([item[1]+1, charList[i+1][0] - 1])
|
||||
rv.append([item[1] + 1, charList[i + 1][0] - 1])
|
||||
if charList[-1][1] != max_unicode:
|
||||
rv.append([charList[-1][1] + 1, max_unicode])
|
||||
return rv
|
||||
|
||||
|
||||
def listToRegexpStr(charList):
|
||||
rv = []
|
||||
for item in charList:
|
||||
if item[0] == item[1]:
|
||||
rv.append(escapeRegexp(unichr(item[0])))
|
||||
rv.append(escapeRegexp(chr(item[0])))
|
||||
else:
|
||||
rv.append(escapeRegexp(unichr(item[0])) + "-" +
|
||||
escapeRegexp(unichr(item[1])))
|
||||
return "[%s]"%"".join(rv)
|
||||
rv.append(escapeRegexp(chr(item[0])) + "-" +
|
||||
escapeRegexp(chr(item[1])))
|
||||
return "[%s]" % "".join(rv)
|
||||
|
||||
|
||||
def hexToInt(hex_str):
|
||||
return int(hex_str, 16)
|
||||
|
||||
|
||||
def escapeRegexp(string):
|
||||
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
|
||||
"[", "]", "|", "(", ")", "-")
|
||||
"[", "]", "|", "(", ")", "-")
|
||||
for char in specialCharacters:
|
||||
string = string.replace(char, "\\" + char)
|
||||
if char in string:
|
||||
print string
|
||||
|
||||
return string
|
||||
|
||||
#output from the above
|
||||
nonXmlNameBMPRegexp = re.compile(u'[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
|
||||
# output from the above
|
||||
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
|
||||
|
||||
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
|
||||
|
||||
# Simpler things
|
||||
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
|
||||
|
||||
nonXmlNameFirstBMPRegexp = re.compile(u'[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
|
||||
|
||||
class InfosetFilter(object):
|
||||
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
|
||||
def __init__(self, replaceChars = None,
|
||||
dropXmlnsLocalName = False,
|
||||
dropXmlnsAttrNs = False,
|
||||
preventDoubleDashComments = False,
|
||||
preventDashAtCommentEnd = False,
|
||||
replaceFormFeedCharacters = True):
|
||||
|
||||
def __init__(self, replaceChars=None,
|
||||
dropXmlnsLocalName=False,
|
||||
dropXmlnsAttrNs=False,
|
||||
preventDoubleDashComments=False,
|
||||
preventDashAtCommentEnd=False,
|
||||
replaceFormFeedCharacters=True,
|
||||
preventSingleQuotePubid=False):
|
||||
|
||||
self.dropXmlnsLocalName = dropXmlnsLocalName
|
||||
self.dropXmlnsAttrNs = dropXmlnsAttrNs
|
||||
@@ -113,14 +202,17 @@ class InfosetFilter(object):
|
||||
|
||||
self.replaceFormFeedCharacters = replaceFormFeedCharacters
|
||||
|
||||
self.preventSingleQuotePubid = preventSingleQuotePubid
|
||||
|
||||
self.replaceCache = {}
|
||||
|
||||
def coerceAttribute(self, name, namespace=None):
|
||||
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
|
||||
#Need a datalosswarning here
|
||||
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
|
||||
return None
|
||||
elif (self.dropXmlnsAttrNs and
|
||||
elif (self.dropXmlnsAttrNs and
|
||||
namespace == "http://www.w3.org/2000/xmlns/"):
|
||||
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
|
||||
return None
|
||||
else:
|
||||
return self.toXmlName(name)
|
||||
@@ -131,20 +223,35 @@ class InfosetFilter(object):
|
||||
def coerceComment(self, data):
|
||||
if self.preventDoubleDashComments:
|
||||
while "--" in data:
|
||||
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
|
||||
data = data.replace("--", "- -")
|
||||
return data
|
||||
|
||||
|
||||
def coerceCharacters(self, data):
|
||||
if self.replaceFormFeedCharacters:
|
||||
for i in range(data.count("\x0C")):
|
||||
warnings.warn("Text cannot contain U+000C", DataLossWarning)
|
||||
data = data.replace("\x0C", " ")
|
||||
#Other non-xml characters
|
||||
# Other non-xml characters
|
||||
return data
|
||||
|
||||
def coercePubid(self, data):
|
||||
dataOutput = data
|
||||
for char in nonPubidCharRegexp.findall(data):
|
||||
warnings.warn("Coercing non-XML pubid", DataLossWarning)
|
||||
replacement = self.getReplacementCharacter(char)
|
||||
dataOutput = dataOutput.replace(char, replacement)
|
||||
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
|
||||
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
|
||||
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
|
||||
return dataOutput
|
||||
|
||||
def toXmlName(self, name):
|
||||
nameFirst = name[0]
|
||||
nameRest = name[1:]
|
||||
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
|
||||
if m:
|
||||
warnings.warn("Coercing non-XML name", DataLossWarning)
|
||||
nameFirstOutput = self.getReplacementCharacter(nameFirst)
|
||||
else:
|
||||
nameFirstOutput = nameFirst
|
||||
@@ -152,10 +259,11 @@ class InfosetFilter(object):
|
||||
nameRestOutput = nameRest
|
||||
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
|
||||
for char in replaceChars:
|
||||
warnings.warn("Coercing non-XML name", DataLossWarning)
|
||||
replacement = self.getReplacementCharacter(char)
|
||||
nameRestOutput = nameRestOutput.replace(char, replacement)
|
||||
return nameFirstOutput + nameRestOutput
|
||||
|
||||
|
||||
def getReplacementCharacter(self, char):
|
||||
if char in self.replaceCache:
|
||||
replacement = self.replaceCache[char]
|
||||
@@ -169,9 +277,9 @@ class InfosetFilter(object):
|
||||
return name
|
||||
|
||||
def escapeChar(self, char):
|
||||
replacement = "U" + hex(ord(char))[2:].upper().rjust(5, "0")
|
||||
replacement = "U%05X" % ord(char)
|
||||
self.replaceCache[char] = replacement
|
||||
return replacement
|
||||
|
||||
def unescapeChar(self, charcode):
|
||||
return unichr(int(charcode[1:], 16))
|
||||
return chr(int(charcode[1:], 16))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,142 +1,145 @@
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
import re
|
||||
from xml.sax.saxutils import escape, unescape
|
||||
|
||||
from tokenizer import HTMLTokenizer
|
||||
from constants import tokenTypes
|
||||
from .tokenizer import HTMLTokenizer
|
||||
from .constants import tokenTypes
|
||||
|
||||
|
||||
class HTMLSanitizerMixin(object):
|
||||
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
|
||||
|
||||
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
|
||||
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
|
||||
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
|
||||
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
|
||||
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
|
||||
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
|
||||
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
|
||||
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
|
||||
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
|
||||
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
|
||||
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
|
||||
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
|
||||
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
|
||||
|
||||
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
|
||||
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
|
||||
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
|
||||
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
|
||||
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
|
||||
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
|
||||
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
|
||||
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
|
||||
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
|
||||
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
|
||||
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
|
||||
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
|
||||
|
||||
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
|
||||
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
|
||||
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
|
||||
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
|
||||
'munderover', 'none']
|
||||
|
||||
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
|
||||
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
|
||||
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
|
||||
'munderover', 'none']
|
||||
|
||||
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
|
||||
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
|
||||
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
|
||||
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
|
||||
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
|
||||
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
|
||||
|
||||
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
|
||||
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
|
||||
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
|
||||
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
|
||||
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
|
||||
|
||||
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
|
||||
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
|
||||
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
|
||||
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
|
||||
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
|
||||
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
|
||||
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
|
||||
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
|
||||
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
|
||||
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
|
||||
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
|
||||
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
|
||||
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
|
||||
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
|
||||
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
|
||||
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
|
||||
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
|
||||
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
|
||||
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
|
||||
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
|
||||
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
|
||||
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
|
||||
'width', 'wrap', 'xml:lang']
|
||||
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
|
||||
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
|
||||
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
|
||||
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
|
||||
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
|
||||
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
|
||||
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
|
||||
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
|
||||
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
|
||||
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
|
||||
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
|
||||
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
|
||||
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
|
||||
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
|
||||
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
|
||||
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
|
||||
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
|
||||
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
|
||||
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
|
||||
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
|
||||
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
|
||||
'width', 'wrap', 'xml:lang']
|
||||
|
||||
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
|
||||
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
|
||||
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
|
||||
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
|
||||
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
|
||||
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
|
||||
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
|
||||
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
|
||||
'xlink:type', 'xmlns', 'xmlns:xlink']
|
||||
|
||||
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
|
||||
'arabic-form', 'ascent', 'attributeName', 'attributeType',
|
||||
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
|
||||
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
|
||||
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
|
||||
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
|
||||
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
|
||||
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
|
||||
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
|
||||
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
|
||||
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
|
||||
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
|
||||
'opacity', 'orient', 'origin', 'overline-position',
|
||||
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
|
||||
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
|
||||
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
|
||||
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
|
||||
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
|
||||
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
|
||||
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
|
||||
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
|
||||
'transform', 'type', 'u1', 'u2', 'underline-position',
|
||||
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
|
||||
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
|
||||
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
|
||||
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
|
||||
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
|
||||
'y1', 'y2', 'zoomAndPan']
|
||||
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
|
||||
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
|
||||
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
|
||||
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
|
||||
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
|
||||
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
|
||||
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
|
||||
'xlink:type', 'xmlns', 'xmlns:xlink']
|
||||
|
||||
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
|
||||
'xlink:href', 'xml:base']
|
||||
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
|
||||
'arabic-form', 'ascent', 'attributeName', 'attributeType',
|
||||
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
|
||||
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
|
||||
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
|
||||
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
|
||||
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
|
||||
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
|
||||
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
|
||||
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
|
||||
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
|
||||
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
|
||||
'opacity', 'orient', 'origin', 'overline-position',
|
||||
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
|
||||
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
|
||||
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
|
||||
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
|
||||
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
|
||||
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
|
||||
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
|
||||
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
|
||||
'transform', 'type', 'u1', 'u2', 'underline-position',
|
||||
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
|
||||
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
|
||||
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
|
||||
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
|
||||
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
|
||||
'y1', 'y2', 'zoomAndPan']
|
||||
|
||||
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
|
||||
'xlink:href', 'xml:base']
|
||||
|
||||
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
|
||||
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
|
||||
'mask', 'stroke']
|
||||
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
|
||||
'mask', 'stroke']
|
||||
|
||||
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
|
||||
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
|
||||
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
|
||||
'set', 'use']
|
||||
|
||||
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
|
||||
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
|
||||
'set', 'use']
|
||||
|
||||
acceptable_css_properties = ['azimuth', 'background-color',
|
||||
'border-bottom-color', 'border-collapse', 'border-color',
|
||||
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
|
||||
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
|
||||
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
|
||||
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
|
||||
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
|
||||
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
|
||||
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
|
||||
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
|
||||
'white-space', 'width']
|
||||
|
||||
'border-bottom-color', 'border-collapse', 'border-color',
|
||||
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
|
||||
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
|
||||
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
|
||||
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
|
||||
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
|
||||
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
|
||||
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
|
||||
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
|
||||
'white-space', 'width']
|
||||
|
||||
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
|
||||
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
|
||||
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
|
||||
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
|
||||
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
|
||||
'transparent', 'underline', 'white', 'yellow']
|
||||
|
||||
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
|
||||
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
|
||||
'stroke-opacity']
|
||||
|
||||
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
|
||||
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
|
||||
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
|
||||
'ssh', 'sftp', 'rtsp', 'afs' ]
|
||||
|
||||
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
|
||||
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
|
||||
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
|
||||
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
|
||||
'transparent', 'underline', 'white', 'yellow']
|
||||
|
||||
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
|
||||
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
|
||||
'stroke-opacity']
|
||||
|
||||
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
|
||||
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
|
||||
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
|
||||
'ssh', 'sftp', 'rtsp', 'afs']
|
||||
|
||||
# subclasses may define their own versions of these constants
|
||||
allowed_elements = acceptable_elements + mathml_elements + svg_elements
|
||||
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
|
||||
@@ -160,94 +163,104 @@ class HTMLSanitizerMixin(object):
|
||||
|
||||
# accommodate filters which use token_type differently
|
||||
token_type = token["type"]
|
||||
if token_type in tokenTypes.keys():
|
||||
token_type = tokenTypes[token_type]
|
||||
if token_type in list(tokenTypes.keys()):
|
||||
token_type = tokenTypes[token_type]
|
||||
|
||||
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
|
||||
tokenTypes["EmptyTag"]):
|
||||
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
|
||||
tokenTypes["EmptyTag"]):
|
||||
if token["name"] in self.allowed_elements:
|
||||
if token.has_key("data"):
|
||||
attrs = dict([(name,val) for name,val in
|
||||
token["data"][::-1]
|
||||
if name in self.allowed_attributes])
|
||||
for attr in self.attr_val_is_uri:
|
||||
if not attrs.has_key(attr):
|
||||
continue
|
||||
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
|
||||
unescape(attrs[attr])).lower()
|
||||
#remove replacement characters from unescaped characters
|
||||
val_unescaped = val_unescaped.replace(u"\ufffd", "")
|
||||
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
|
||||
(val_unescaped.split(':')[0] not in
|
||||
self.allowed_protocols)):
|
||||
del attrs[attr]
|
||||
for attr in self.svg_attr_val_allows_ref:
|
||||
if attr in attrs:
|
||||
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
|
||||
' ',
|
||||
unescape(attrs[attr]))
|
||||
if (token["name"] in self.svg_allow_local_href and
|
||||
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
|
||||
attrs['xlink:href'])):
|
||||
del attrs['xlink:href']
|
||||
if attrs.has_key('style'):
|
||||
attrs['style'] = self.sanitize_css(attrs['style'])
|
||||
token["data"] = [[name,val] for name,val in attrs.items()]
|
||||
return token
|
||||
return self.allowed_token(token, token_type)
|
||||
else:
|
||||
if token_type == tokenTypes["EndTag"]:
|
||||
token["data"] = "</%s>" % token["name"]
|
||||
elif token["data"]:
|
||||
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
|
||||
token["data"] = "<%s%s>" % (token["name"],attrs)
|
||||
else:
|
||||
token["data"] = "<%s>" % token["name"]
|
||||
if token.get("selfClosing"):
|
||||
token["data"]=token["data"][:-1] + "/>"
|
||||
|
||||
if token["type"] in tokenTypes.keys():
|
||||
token["type"] = "Characters"
|
||||
else:
|
||||
token["type"] = tokenTypes["Characters"]
|
||||
|
||||
del token["name"]
|
||||
return token
|
||||
return self.disallowed_token(token, token_type)
|
||||
elif token_type == tokenTypes["Comment"]:
|
||||
pass
|
||||
else:
|
||||
return token
|
||||
|
||||
def allowed_token(self, token, token_type):
|
||||
if "data" in token:
|
||||
attrs = dict([(name, val) for name, val in
|
||||
token["data"][::-1]
|
||||
if name in self.allowed_attributes])
|
||||
for attr in self.attr_val_is_uri:
|
||||
if attr not in attrs:
|
||||
continue
|
||||
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
|
||||
unescape(attrs[attr])).lower()
|
||||
# remove replacement characters from unescaped characters
|
||||
val_unescaped = val_unescaped.replace("\ufffd", "")
|
||||
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
|
||||
(val_unescaped.split(':')[0] not in
|
||||
self.allowed_protocols)):
|
||||
del attrs[attr]
|
||||
for attr in self.svg_attr_val_allows_ref:
|
||||
if attr in attrs:
|
||||
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
|
||||
' ',
|
||||
unescape(attrs[attr]))
|
||||
if (token["name"] in self.svg_allow_local_href and
|
||||
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
|
||||
attrs['xlink:href'])):
|
||||
del attrs['xlink:href']
|
||||
if 'style' in attrs:
|
||||
attrs['style'] = self.sanitize_css(attrs['style'])
|
||||
token["data"] = [[name, val] for name, val in list(attrs.items())]
|
||||
return token
|
||||
|
||||
def disallowed_token(self, token, token_type):
|
||||
if token_type == tokenTypes["EndTag"]:
|
||||
token["data"] = "</%s>" % token["name"]
|
||||
elif token["data"]:
|
||||
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
|
||||
token["data"] = "<%s%s>" % (token["name"], attrs)
|
||||
else:
|
||||
token["data"] = "<%s>" % token["name"]
|
||||
if token.get("selfClosing"):
|
||||
token["data"] = token["data"][:-1] + "/>"
|
||||
|
||||
if token["type"] in list(tokenTypes.keys()):
|
||||
token["type"] = "Characters"
|
||||
else:
|
||||
token["type"] = tokenTypes["Characters"]
|
||||
|
||||
del token["name"]
|
||||
return token
|
||||
|
||||
def sanitize_css(self, style):
|
||||
# disallow urls
|
||||
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
|
||||
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
|
||||
|
||||
# gauntlet
|
||||
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
|
||||
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
|
||||
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
|
||||
return ''
|
||||
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
|
||||
return ''
|
||||
|
||||
clean = []
|
||||
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
|
||||
if not value: continue
|
||||
if prop.lower() in self.allowed_css_properties:
|
||||
clean.append(prop + ': ' + value + ';')
|
||||
elif prop.split('-')[0].lower() in ['background','border','margin',
|
||||
'padding']:
|
||||
for keyword in value.split():
|
||||
if not keyword in self.acceptable_css_keywords and \
|
||||
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
|
||||
break
|
||||
else:
|
||||
clean.append(prop + ': ' + value + ';')
|
||||
elif prop.lower() in self.allowed_svg_properties:
|
||||
clean.append(prop + ': ' + value + ';')
|
||||
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
|
||||
if not value:
|
||||
continue
|
||||
if prop.lower() in self.allowed_css_properties:
|
||||
clean.append(prop + ': ' + value + ';')
|
||||
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
|
||||
'padding']:
|
||||
for keyword in value.split():
|
||||
if not keyword in self.acceptable_css_keywords and \
|
||||
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
|
||||
break
|
||||
else:
|
||||
clean.append(prop + ': ' + value + ';')
|
||||
elif prop.lower() in self.allowed_svg_properties:
|
||||
clean.append(prop + ': ' + value + ';')
|
||||
|
||||
return ' '.join(clean)
|
||||
|
||||
|
||||
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
|
||||
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
|
||||
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
|
||||
#Change case matching defaults as we only output lowercase html anyway
|
||||
#This solution doesn't seem ideal...
|
||||
# Change case matching defaults as we only output lowercase html anyway
|
||||
# This solution doesn't seem ideal...
|
||||
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
|
||||
lowercaseElementName, lowercaseAttrName, parser=parser)
|
||||
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from html5lib import treewalkers
|
||||
from .. import treewalkers
|
||||
|
||||
from htmlserializer import HTMLSerializer
|
||||
from xhtmlserializer import XHTMLSerializer
|
||||
from .htmlserializer import HTMLSerializer
|
||||
|
||||
def serialize(input, tree="simpletree", format="html", encoding=None,
|
||||
|
||||
def serialize(input, tree="etree", format="html", encoding=None,
|
||||
**serializer_opts):
|
||||
# XXX: Should we cache this?
|
||||
walker = treewalkers.getTreeWalker(tree)
|
||||
walker = treewalkers.getTreeWalker(tree)
|
||||
if format == "html":
|
||||
s = HTMLSerializer(**serializer_opts)
|
||||
elif format == "xhtml":
|
||||
s = XHTMLSerializer(**serializer_opts)
|
||||
else:
|
||||
raise ValueError, "type must be either html or xhtml"
|
||||
raise ValueError("type must be html")
|
||||
return s.render(walker(input), encoding)
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
try:
|
||||
frozenset
|
||||
except NameError:
|
||||
# Import from the sets module for python 2.3
|
||||
from sets import ImmutableSet as frozenset
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
from six import text_type
|
||||
|
||||
import gettext
|
||||
_ = gettext.gettext
|
||||
|
||||
from html5lib.constants import voidElements, booleanAttributes, spaceCharacters
|
||||
from html5lib.constants import rcdataElements, entities, xmlEntities
|
||||
from html5lib import utils
|
||||
try:
|
||||
from functools import reduce
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ..constants import voidElements, booleanAttributes, spaceCharacters
|
||||
from ..constants import rcdataElements, entities, xmlEntities
|
||||
from .. import utils
|
||||
from xml.sax.saxutils import escape
|
||||
|
||||
spaceCharacters = u"".join(spaceCharacters)
|
||||
spaceCharacters = "".join(spaceCharacters)
|
||||
|
||||
try:
|
||||
from codecs import register_error, xmlcharrefreplace_errors
|
||||
@@ -21,24 +23,18 @@ except ImportError:
|
||||
else:
|
||||
unicode_encode_errors = "htmlentityreplace"
|
||||
|
||||
from html5lib.constants import entities
|
||||
|
||||
encode_entity_map = {}
|
||||
is_ucs4 = len(u"\U0010FFFF") == 1
|
||||
for k, v in entities.items():
|
||||
#skip multi-character entities
|
||||
is_ucs4 = len("\U0010FFFF") == 1
|
||||
for k, v in list(entities.items()):
|
||||
# skip multi-character entities
|
||||
if ((is_ucs4 and len(v) > 1) or
|
||||
(not is_ucs4 and len(v) > 2)):
|
||||
(not is_ucs4 and len(v) > 2)):
|
||||
continue
|
||||
if v != "&":
|
||||
if len(v) == 2:
|
||||
v = utils.surrogatePairToCodepoint(v)
|
||||
else:
|
||||
try:
|
||||
v = ord(v)
|
||||
except:
|
||||
print v
|
||||
raise
|
||||
v = ord(v)
|
||||
if not v in encode_entity_map or k.islower():
|
||||
# prefer < over < and similarly for &, >, etc.
|
||||
encode_entity_map[v] = k
|
||||
@@ -53,8 +49,8 @@ else:
|
||||
skip = False
|
||||
continue
|
||||
index = i + exc.start
|
||||
if utils.isSurrogatePair(exc.object[index:min([exc.end, index+2])]):
|
||||
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index+2])
|
||||
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
|
||||
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
|
||||
skip = True
|
||||
else:
|
||||
codepoint = ord(c)
|
||||
@@ -67,8 +63,8 @@ else:
|
||||
if not e.endswith(";"):
|
||||
res.append(";")
|
||||
else:
|
||||
res.append("&#x%s;"%(hex(cp)[2:]))
|
||||
return (u"".join(res), exc.end)
|
||||
res.append("&#x%s;" % (hex(cp)[2:]))
|
||||
return ("".join(res), exc.end)
|
||||
else:
|
||||
return xmlcharrefreplace_errors(exc)
|
||||
|
||||
@@ -81,7 +77,7 @@ class HTMLSerializer(object):
|
||||
|
||||
# attribute quoting options
|
||||
quote_attr_values = False
|
||||
quote_char = u'"'
|
||||
quote_char = '"'
|
||||
use_best_quote_char = True
|
||||
|
||||
# tag syntax options
|
||||
@@ -96,15 +92,17 @@ class HTMLSerializer(object):
|
||||
resolve_entities = True
|
||||
|
||||
# miscellaneous options
|
||||
alphabetical_attributes = False
|
||||
inject_meta_charset = True
|
||||
strip_whitespace = False
|
||||
sanitize = False
|
||||
|
||||
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
|
||||
"minimize_boolean_attributes", "use_trailing_solidus",
|
||||
"space_before_trailing_solidus", "omit_optional_tags",
|
||||
"strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs",
|
||||
"escape_rcdata", "resolve_entities", "sanitize")
|
||||
"omit_optional_tags", "minimize_boolean_attributes",
|
||||
"use_trailing_solidus", "space_before_trailing_solidus",
|
||||
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
|
||||
"alphabetical_attributes", "inject_meta_charset",
|
||||
"strip_whitespace", "sanitize")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Initialize HTMLSerializer.
|
||||
@@ -147,10 +145,12 @@ class HTMLSerializer(object):
|
||||
See `html5lib user documentation`_
|
||||
omit_optional_tags=True|False
|
||||
Omit start/end tags that are optional.
|
||||
alphabetical_attributes=False|True
|
||||
Reorder attributes to be in alphabetical order.
|
||||
|
||||
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
|
||||
"""
|
||||
if kwargs.has_key('quote_char'):
|
||||
if 'quote_char' in kwargs:
|
||||
self.use_best_quote_char = False
|
||||
for attr in self.options:
|
||||
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
|
||||
@@ -158,14 +158,14 @@ class HTMLSerializer(object):
|
||||
self.strict = False
|
||||
|
||||
def encode(self, string):
|
||||
assert(isinstance(string, unicode))
|
||||
assert(isinstance(string, text_type))
|
||||
if self.encoding:
|
||||
return string.encode(self.encoding, unicode_encode_errors)
|
||||
else:
|
||||
return string
|
||||
|
||||
def encodeStrict(self, string):
|
||||
assert(isinstance(string, unicode))
|
||||
assert(isinstance(string, text_type))
|
||||
if self.encoding:
|
||||
return string.encode(self.encoding, "strict")
|
||||
else:
|
||||
@@ -175,39 +175,46 @@ class HTMLSerializer(object):
|
||||
self.encoding = encoding
|
||||
in_cdata = False
|
||||
self.errors = []
|
||||
|
||||
if encoding and self.inject_meta_charset:
|
||||
from html5lib.filters.inject_meta_charset import Filter
|
||||
from ..filters.inject_meta_charset import Filter
|
||||
treewalker = Filter(treewalker, encoding)
|
||||
# XXX: WhitespaceFilter should be used before OptionalTagFilter
|
||||
# WhitespaceFilter should be used before OptionalTagFilter
|
||||
# for maximum efficiently of this latter filter
|
||||
if self.strip_whitespace:
|
||||
from html5lib.filters.whitespace import Filter
|
||||
from ..filters.whitespace import Filter
|
||||
treewalker = Filter(treewalker)
|
||||
if self.sanitize:
|
||||
from html5lib.filters.sanitizer import Filter
|
||||
from ..filters.sanitizer import Filter
|
||||
treewalker = Filter(treewalker)
|
||||
if self.omit_optional_tags:
|
||||
from html5lib.filters.optionaltags import Filter
|
||||
from ..filters.optionaltags import Filter
|
||||
treewalker = Filter(treewalker)
|
||||
# Alphabetical attributes must be last, as other filters
|
||||
# could add attributes and alter the order
|
||||
if self.alphabetical_attributes:
|
||||
from ..filters.alphabeticalattributes import Filter
|
||||
treewalker = Filter(treewalker)
|
||||
|
||||
for token in treewalker:
|
||||
type = token["type"]
|
||||
if type == "Doctype":
|
||||
doctype = u"<!DOCTYPE %s" % token["name"]
|
||||
|
||||
doctype = "<!DOCTYPE %s" % token["name"]
|
||||
|
||||
if token["publicId"]:
|
||||
doctype += u' PUBLIC "%s"' % token["publicId"]
|
||||
doctype += ' PUBLIC "%s"' % token["publicId"]
|
||||
elif token["systemId"]:
|
||||
doctype += u" SYSTEM"
|
||||
if token["systemId"]:
|
||||
if token["systemId"].find(u'"') >= 0:
|
||||
if token["systemId"].find(u"'") >= 0:
|
||||
doctype += " SYSTEM"
|
||||
if token["systemId"]:
|
||||
if token["systemId"].find('"') >= 0:
|
||||
if token["systemId"].find("'") >= 0:
|
||||
self.serializeError(_("System identifer contains both single and double quote characters"))
|
||||
quote_char = u"'"
|
||||
quote_char = "'"
|
||||
else:
|
||||
quote_char = u'"'
|
||||
doctype += u" %s%s%s" % (quote_char, token["systemId"], quote_char)
|
||||
|
||||
doctype += u">"
|
||||
quote_char = '"'
|
||||
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
|
||||
|
||||
doctype += ">"
|
||||
yield self.encodeStrict(doctype)
|
||||
|
||||
elif type in ("Characters", "SpaceCharacters"):
|
||||
@@ -220,41 +227,41 @@ class HTMLSerializer(object):
|
||||
|
||||
elif type in ("StartTag", "EmptyTag"):
|
||||
name = token["name"]
|
||||
yield self.encodeStrict(u"<%s" % name)
|
||||
yield self.encodeStrict("<%s" % name)
|
||||
if name in rcdataElements and not self.escape_rcdata:
|
||||
in_cdata = True
|
||||
elif in_cdata:
|
||||
self.serializeError(_("Unexpected child element of a CDATA element"))
|
||||
attributes = []
|
||||
for (attr_namespace,attr_name),attr_value in sorted(token["data"].items()):
|
||||
#TODO: Add namespace support here
|
||||
for (attr_namespace, attr_name), attr_value in token["data"].items():
|
||||
# TODO: Add namespace support here
|
||||
k = attr_name
|
||||
v = attr_value
|
||||
yield self.encodeStrict(u' ')
|
||||
yield self.encodeStrict(' ')
|
||||
|
||||
yield self.encodeStrict(k)
|
||||
if not self.minimize_boolean_attributes or \
|
||||
(k not in booleanAttributes.get(name, tuple()) \
|
||||
and k not in booleanAttributes.get("", tuple())):
|
||||
yield self.encodeStrict(u"=")
|
||||
(k not in booleanAttributes.get(name, tuple())
|
||||
and k not in booleanAttributes.get("", tuple())):
|
||||
yield self.encodeStrict("=")
|
||||
if self.quote_attr_values or not v:
|
||||
quote_attr = True
|
||||
else:
|
||||
quote_attr = reduce(lambda x,y: x or (y in v),
|
||||
spaceCharacters + u">\"'=", False)
|
||||
v = v.replace(u"&", u"&")
|
||||
if self.escape_lt_in_attrs: v = v.replace(u"<", u"<")
|
||||
quote_attr = reduce(lambda x, y: x or (y in v),
|
||||
spaceCharacters + ">\"'=", False)
|
||||
v = v.replace("&", "&")
|
||||
if self.escape_lt_in_attrs:
|
||||
v = v.replace("<", "<")
|
||||
if quote_attr:
|
||||
quote_char = self.quote_char
|
||||
if self.use_best_quote_char:
|
||||
if u"'" in v and u'"' not in v:
|
||||
quote_char = u'"'
|
||||
elif u'"' in v and u"'" not in v:
|
||||
quote_char = u"'"
|
||||
if quote_char == u"'":
|
||||
v = v.replace(u"'", u"'")
|
||||
if "'" in v and '"' not in v:
|
||||
quote_char = '"'
|
||||
elif '"' in v and "'" not in v:
|
||||
quote_char = "'"
|
||||
if quote_char == "'":
|
||||
v = v.replace("'", "'")
|
||||
else:
|
||||
v = v.replace(u'"', u""")
|
||||
v = v.replace('"', """)
|
||||
yield self.encodeStrict(quote_char)
|
||||
yield self.encode(v)
|
||||
yield self.encodeStrict(quote_char)
|
||||
@@ -262,10 +269,10 @@ class HTMLSerializer(object):
|
||||
yield self.encode(v)
|
||||
if name in voidElements and self.use_trailing_solidus:
|
||||
if self.space_before_trailing_solidus:
|
||||
yield self.encodeStrict(u" /")
|
||||
yield self.encodeStrict(" /")
|
||||
else:
|
||||
yield self.encodeStrict(u"/")
|
||||
yield self.encode(u">")
|
||||
yield self.encodeStrict("/")
|
||||
yield self.encode(">")
|
||||
|
||||
elif type == "EndTag":
|
||||
name = token["name"]
|
||||
@@ -273,13 +280,13 @@ class HTMLSerializer(object):
|
||||
in_cdata = False
|
||||
elif in_cdata:
|
||||
self.serializeError(_("Unexpected child element of a CDATA element"))
|
||||
yield self.encodeStrict(u"</%s>" % name)
|
||||
yield self.encodeStrict("</%s>" % name)
|
||||
|
||||
elif type == "Comment":
|
||||
data = token["data"]
|
||||
if data.find("--") >= 0:
|
||||
self.serializeError(_("Comment contains --"))
|
||||
yield self.encodeStrict(u"<!--%s-->" % token["data"])
|
||||
yield self.encodeStrict("<!--%s-->" % token["data"])
|
||||
|
||||
elif type == "Entity":
|
||||
name = token["name"]
|
||||
@@ -289,7 +296,7 @@ class HTMLSerializer(object):
|
||||
if self.resolve_entities and key not in xmlEntities:
|
||||
data = entities[key]
|
||||
else:
|
||||
data = u"&%s;" % name
|
||||
data = "&%s;" % name
|
||||
yield self.encodeStrict(data)
|
||||
|
||||
else:
|
||||
@@ -297,9 +304,9 @@ class HTMLSerializer(object):
|
||||
|
||||
def render(self, treewalker, encoding=None):
|
||||
if encoding:
|
||||
return "".join(list(self.serialize(treewalker, encoding)))
|
||||
return b"".join(list(self.serialize(treewalker, encoding)))
|
||||
else:
|
||||
return u"".join(list(self.serialize(treewalker)))
|
||||
return "".join(list(self.serialize(treewalker)))
|
||||
|
||||
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
|
||||
# XXX The idea is to make data mandatory.
|
||||
@@ -307,6 +314,7 @@ class HTMLSerializer(object):
|
||||
if self.strict:
|
||||
raise SerializeError
|
||||
|
||||
|
||||
def SerializeError(Exception):
|
||||
"""Error in serialized tree"""
|
||||
pass
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user