a",n=d.getElementsByTagName("*")||[],r=d.getElementsByTagName("a")[0],!r||!r.style||!n.length)return t;s=a.createElement("select"),u=s.appendChild(a.createElement("option")),o=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t.getSetAttribute="t"!==d.className,t.leadingWhitespace=3===d.firstChild.nodeType,t.tbody=!d.getElementsByTagName("tbody").length,t.htmlSerialize=!!d.getElementsByTagName("link").length,t.style=/top/.test(r.getAttribute("style")),t.hrefNormalized="/a"===r.getAttribute("href"),t.opacity=/^0.5/.test(r.style.opacity),t.cssFloat=!!r.style.cssFloat,t.checkOn=!!o.value,t.optSelected=u.selected,t.enctype=!!a.createElement("form").enctype,t.html5Clone="<:nav>"!==a.createElement("nav").cloneNode(!0).outerHTML,t.inlineBlockNeedsLayout=!1,t.shrinkWrapBlocks=!1,t.pixelPosition=!1,t.deleteExpando=!0,t.noCloneEvent=!0,t.reliableMarginRight=!0,t.boxSizingReliable=!0,o.checked=!0,t.noCloneChecked=o.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!u.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}o=a.createElement("input"),o.setAttribute("value",""),t.input=""===o.getAttribute("value"),o.value="t",o.setAttribute("type","radio"),t.radioValue="t"===o.value,o.setAttribute("checked","t"),o.setAttribute("name","t"),l=a.createDocumentFragment(),l.appendChild(o),t.appendChecked=o.checked,t.checkClone=l.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.cloneNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip;for(f in x(t))break;return t.ownLast="0"!==f,x(function(){var n,r,o,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",l=a.getElementsByTagName("body")[0];l&&(n=a.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",l.appendChild(n).appendChild(d),d.innerHTML="
a",n=d.getElementsByTagName("*")||[],r=d.getElementsByTagName("a")[0],!r||!r.style||!n.length)return t;s=a.createElement("select"),u=s.appendChild(a.createElement("option")),o=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t.getSetAttribute="t"!==d.className,t.leadingWhitespace=3===d.firstChild.nodeType,t.tbody=!d.getElementsByTagName("tbody").length,t.htmlSerialize=!!d.getElementsByTagName("link").length,t.style=/top/.test(r.getAttribute("style")),t.hrefNormalized="/a"===r.getAttribute("href"),t.opacity=/^0.5/.test(r.style.opacity),t.cssFloat=!!r.style.cssFloat,t.checkOn=!!o.value,t.optSelected=u.selected,t.enctype=!!a.createElement("form").enctype,t.html5Clone="<:nav>"!==a.createElement("nav").cloneNode(!0).outerHTML,t.inlineBlockNeedsLayout=!1,t.shrinkWrapBlocks=!1,t.pixelPosition=!1,t.deleteExpando=!0,t.noCloneEvent=!0,t.reliableMarginRight=!0,t.boxSizingReliable=!0,o.checked=!0,t.noCloneChecked=o.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!u.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}o=a.createElement("input"),o.setAttribute("value",""),t.input=""===o.getAttribute("value"),o.value="t",o.setAttribute("type","radio"),t.radioValue="t"===o.value,o.setAttribute("checked","t"),o.setAttribute("name","t"),l=a.createDocumentFragment(),l.appendChild(o),t.appendChecked=o.checked,t.checkClone=l.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.cloneNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip;for(f in x(t))break;return t.ownLast="0"!==f,x(function(){var n,r,o,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",l=a.getElementsByTagName("body")[0];l&&(n=a.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",l.appendChild(n).appendChild(d),d.innerHTML="
").append(m.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,e||[a.responseText,b,a])}),this},m.expr.filters.animated=function(a){return m.grep(m.timers,function(b){return a===b.elem}).length};var cd=a.document.documentElement;function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}m.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=m.css(a,"position"),l=m(a),n={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=m.css(a,"top"),i=m.css(a,"left"),j=("absolute"===k||"fixed"===k)&&m.inArray("auto",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),m.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(n.top=b.top-h.top+g),null!=b.left&&(n.left=b.left-h.left+e),"using"in b?b.using.call(a,n):l.css(n)}},m.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){m.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,m.contains(b,e)?(typeof e.getBoundingClientRect!==K&&(d=e.getBoundingClientRect()),c=dd(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return"fixed"===m.css(d,"position")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),m.nodeName(a[0],"html")||(c=a.offset()),c.top+=m.css(a[0],"borderTopWidth",!0),c.left+=m.css(a[0],"borderLeftWidth",!0)),{top:b.top-c.top-m.css(d,"marginTop",!0),left:b.left-c.left-m.css(d,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||cd;while(a&&!m.nodeName(a,"html")&&"static"===m.css(a,"position"))a=a.offsetParent;return a||cd})}}),m.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c=/Y/.test(b);m.fn[a]=function(d){return V(this,function(a,d,e){var f=dd(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?m(f).scrollLeft():e,c?e:m(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),m.each(["top","left"],function(a,b){m.cssHooks[b]=Lb(k.pixelPosition,function(a,c){return c?(c=Jb(a,b),Hb.test(c)?m(a).position()[b]+"px":c):void 0})}),m.each({Height:"height",Width:"width"},function(a,b){m.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){m.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return V(this,function(b,c,d){var e;return m.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?m.css(b,c,g):m.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),m.fn.size=function(){return this.length},m.fn.andSelf=m.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return m});var ed=a.jQuery,fd=a.$;return m.noConflict=function(b){return a.$===m&&(a.$=fd),b&&a.jQuery===m&&(a.jQuery=ed),m},typeof b===K&&(a.jQuery=a.$=m),m});
diff --git a/applications/welcome/static/js/web2py.js b/applications/welcome/static/js/web2py.js
index e11610cb..4219060f 100644
--- a/applications/welcome/static/js/web2py.js
+++ b/applications/welcome/static/js/web2py.js
@@ -545,8 +545,11 @@
};
$('[data-show-trigger]', target).each(function () {
var name = $(this).attr('data-show-trigger');
- if(!triggers[name]) triggers[name] = [];
- triggers[name].push($(this).attr('id'));
+ // The field exists only when creating/editing a row
+ if ($('#' + name).length) {
+ if(!triggers[name]) triggers[name] = [];
+ triggers[name].push($(this).attr('id'));
+ }
});
for(var name in triggers) {
$('#' + name, target).change(show_if).keyup(show_if);
diff --git a/gluon/cache.py b/gluon/cache.py
index 9c85a962..8cbe14b2 100644
--- a/gluon/cache.py
+++ b/gluon/cache.py
@@ -1,628 +1,660 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-| This file is part of the web2py Web Framework
-| Copyrighted by Massimo Di Pierro
-| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-Basic caching classes and methods
----------------------------------
-
-- Cache - The generic caching object interfacing with the others
-- CacheInRam - providing caching in ram
-- CacheOnDisk - provides caches on disk
-
-Memcache is also available via a different module (see gluon.contrib.memcache)
-
-When web2py is running on Google App Engine,
-caching will be provided by the GAE memcache
-(see gluon.contrib.gae_memcache)
-"""
-import time
-import portalocker
-import shelve
-import thread
-import os
-import logging
-import re
-import hashlib
-import datetime
-try:
- from gluon import settings
- have_settings = True
-except ImportError:
- have_settings = False
-
-logger = logging.getLogger("web2py.cache")
-
-__all__ = ['Cache', 'lazy_cache']
-
-
-DEFAULT_TIME_EXPIRE = 300
-
-
-
-class CacheAbstract(object):
- """
- Abstract class for cache implementations.
- Main function just provides referenced api documentation.
-
- Use CacheInRam or CacheOnDisk instead which are derived from this class.
-
- Note:
- Michele says: there are signatures inside gdbm files that are used
- directly by the python gdbm adapter that often are lagging behind in the
- detection code in python part.
- On every occasion that a gdbm store is probed by the python adapter,
- the probe fails, because gdbm file version is newer.
- Using gdbm directly from C would work, because there is backward
- compatibility, but not from python!
- The .shelve file is discarded and a new one created (with new
- signature) and it works until it is probed again...
- The possible consequences are memory leaks and broken sessions.
- """
-
- cache_stats_name = 'web2py_cache_statistics'
-
- def __init__(self, request=None):
- """Initializes the object
-
- Args:
- request: the global request object
- """
- raise NotImplementedError
-
- def __call__(self, key, f,
- time_expire=DEFAULT_TIME_EXPIRE):
- """
- Tries to retrieve the value corresponding to `key` from the cache if the
- object exists and if it did not expire, else it calls the function `f`
- and stores the output in the cache corresponding to `key`. It always
- returns the function that is returned.
-
- Args:
- key(str): the key of the object to be stored or retrieved
- f(function): the function whose output is to be cached.
-
- If `f` is `None` the cache is cleared.
- time_expire(int): expiration of the cache in seconds.
-
- It's used to compare the current time with the time
- when the requested object was last saved in cache. It does not
- affect future requests. Setting `time_expire` to 0 or negative
- value forces the cache to refresh.
- """
- raise NotImplementedError
-
- def clear(self, regex=None):
- """
- Clears the cache of all keys that match the provided regular expression.
- If no regular expression is provided, it clears all entries in cache.
-
- Args:
- regex: if provided, only keys matching the regex will be cleared,
- otherwise all keys are cleared.
- """
-
- raise NotImplementedError
-
- def increment(self, key, value=1):
- """
- Increments the cached value for the given key by the amount in value
-
- Args:
- key(str): key for the cached object to be incremeneted
- value(int): amount of the increment (defaults to 1, can be negative)
- """
- raise NotImplementedError
-
- def _clear(self, storage, regex):
- """
- Auxiliary function called by `clear` to search and clear cache entries
- """
- r = re.compile(regex)
- for (key, value) in storage.items():
- if r.match(str(key)):
- del storage[key]
- break
-
-
-class CacheInRam(CacheAbstract):
- """
- Ram based caching
-
- This is implemented as global (per process, shared by all threads)
- dictionary.
- A mutex-lock mechanism avoid conflicts.
- """
-
- locker = thread.allocate_lock()
- meta_storage = {}
-
- def __init__(self, request=None):
- self.initialized = False
- self.request = request
- self.storage = {}
-
- def initialize(self):
- if self.initialized:
- return
- else:
- self.initialized = True
- self.locker.acquire()
- request = self.request
- if request:
- app = request.application
- else:
- app = ''
- if not app in self.meta_storage:
- self.storage = self.meta_storage[app] = {
- CacheAbstract.cache_stats_name: {'hit_total': 0, 'misses': 0}}
- else:
- self.storage = self.meta_storage[app]
- self.locker.release()
-
- def clear(self, regex=None):
- self.initialize()
- self.locker.acquire()
- storage = self.storage
- if regex is None:
- storage.clear()
- else:
- self._clear(storage, regex)
-
- if not CacheAbstract.cache_stats_name in storage.keys():
- storage[CacheAbstract.cache_stats_name] = {
- 'hit_total': 0, 'misses': 0}
-
- self.locker.release()
-
- def __call__(self, key, f,
- time_expire=DEFAULT_TIME_EXPIRE,
- destroyer=None):
- """
- Attention! cache.ram does not copy the cached object.
- It just stores a reference to it. Turns out the deepcopying the object
- has some problems:
-
- - would break backward compatibility
- - would be limiting because people may want to cache live objects
- - would work unless we deepcopy no storage and retrival which would make
- things slow.
-
- Anyway. You can deepcopy explicitly in the function generating the value
- to be cached.
- """
- self.initialize()
-
- dt = time_expire
- now = time.time()
-
- self.locker.acquire()
- item = self.storage.get(key, None)
- if item and f is None:
- del self.storage[key]
- if destroyer:
- destroyer(item[1])
- self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1
- self.locker.release()
-
- if f is None:
- return None
- if item and (dt is None or item[0] > now - dt):
- return item[1]
- elif item and (item[0] < now - dt) and destroyer:
- destroyer(item[1])
- value = f()
-
- self.locker.acquire()
- self.storage[key] = (now, value)
- self.storage[CacheAbstract.cache_stats_name]['misses'] += 1
- self.locker.release()
- return value
-
- def increment(self, key, value=1):
- self.initialize()
- self.locker.acquire()
- try:
- if key in self.storage:
- value = self.storage[key][1] + value
- self.storage[key] = (time.time(), value)
- except BaseException, e:
- self.locker.release()
- raise e
- self.locker.release()
- return value
-
-
-class CacheOnDisk(CacheAbstract):
- """
- Disk based cache
-
- This is implemented as a shelve object and it is shared by multiple web2py
- processes (and threads) as long as they share the same filesystem.
- The file is locked when accessed.
-
- Disk cache provides persistance when web2py is started/stopped but it slower
- than `CacheInRam`
-
- Values stored in disk cache must be pickable.
- """
-
- def _close_shelve_and_unlock(self):
- try:
- if self.storage:
- self.storage.close()
- except ValueError:
- pass
- finally:
- self.storage = None
- if self.locker and self.locked:
- portalocker.unlock(self.locker)
- self.locker.close()
- self.locked = False
-
- def _open_shelve_and_lock(self):
- """Open and return a shelf object, obtaining an exclusive lock
- on self.locker first. Replaces the close method of the
- returned shelf instance with one that releases the lock upon
- closing."""
-
- storage = None
- locker = None
- locked = False
- try:
- locker = locker = open(self.locker_name, 'a')
- portalocker.lock(locker, portalocker.LOCK_EX)
- locked = True
- try:
- storage = shelve.open(self.shelve_name)
- except:
- logger.error('corrupted cache file %s, will try rebuild it'
- % self.shelve_name)
- storage = None
- if storage is None:
- if os.path.exists(self.shelve_name):
- os.unlink(self.shelve_name)
- storage = shelve.open(self.shelve_name)
- if not CacheAbstract.cache_stats_name in storage.keys():
- storage[CacheAbstract.cache_stats_name] = {
- 'hit_total': 0, 'misses': 0}
- storage.sync()
- except Exception, e:
- if storage:
- storage.close()
- storage = None
- if locked:
- portalocker.unlock(locker)
- locker.close()
- locked = False
- raise RuntimeError(
- 'unable to create/re-create cache file %s' % self.shelve_name)
- self.locker = locker
- self.locked = locked
- self.storage = storage
- return storage
-
- def __init__(self, request=None, folder=None):
- self.initialized = False
- self.request = request
- self.folder = folder
- self.storage = {}
-
- def initialize(self):
- if self.initialized:
- return
- else:
- self.initialized = True
- folder = self.folder
- request = self.request
-
- # Lets test if the cache folder exists, if not
- # we are going to create it
- folder = folder or os.path.join(request.folder, 'cache')
-
- if not os.path.exists(folder):
- os.mkdir(folder)
-
- ### we need this because of a possible bug in shelve that may
- ### or may not lock
- self.locker_name = os.path.join(folder, 'cache.lock')
- self.shelve_name = os.path.join(folder, 'cache.shelve')
-
- def clear(self, regex=None):
- self.initialize()
- storage = self._open_shelve_and_lock()
- try:
- if regex is None:
- storage.clear()
- else:
- self._clear(storage, regex)
- storage.sync()
- finally:
- self._close_shelve_and_unlock()
-
- def __call__(self, key, f,
- time_expire=DEFAULT_TIME_EXPIRE):
- self.initialize()
- dt = time_expire
- storage = self._open_shelve_and_lock()
- try:
- item = storage.get(key, None)
- storage[CacheAbstract.cache_stats_name]['hit_total'] += 1
- if item and f is None:
- del storage[key]
- storage.sync()
- now = time.time()
- if f is None:
- value = None
- elif item and (dt is None or item[0] > now - dt):
- value = item[1]
- else:
- value = f()
- storage[key] = (now, value)
- storage[CacheAbstract.cache_stats_name]['misses'] += 1
- storage.sync()
- finally:
- self._close_shelve_and_unlock()
-
- return value
-
- def increment(self, key, value=1):
- self.initialize()
- storage = self._open_shelve_and_lock()
- try:
- if key in storage:
- value = storage[key][1] + value
- storage[key] = (time.time(), value)
- storage.sync()
- finally:
- self._close_shelve_and_unlock()
- return value
-
-class CacheAction(object):
- def __init__(self, func, key, time_expire, cache, cache_model):
- self.__name__ = func.__name__
- self.__doc__ = func.__doc__
- self.func = func
- self.key = key
- self.time_expire = time_expire
- self.cache = cache
- self.cache_model = cache_model
-
- def __call__(self, *a, **b):
- if not self.key:
- key2 = self.__name__ + ':' + repr(a) + ':' + repr(b)
- else:
- key2 = self.key.replace('%(name)s', self.__name__)\
- .replace('%(args)s', str(a)).replace('%(vars)s', str(b))
- cache_model = self.cache_model
- if not cache_model or isinstance(cache_model, str):
- cache_model = getattr(self.cache, cache_model or 'ram')
- return cache_model(key2,
- lambda a=a, b=b: self.func(*a, **b),
- self.time_expire)
-
-
-class Cache(object):
- """
- Sets up generic caching, creating an instance of both CacheInRam and
- CacheOnDisk.
- In case of GAE will make use of gluon.contrib.gae_memcache.
-
- - self.ram is an instance of CacheInRam
- - self.disk is an instance of CacheOnDisk
- """
-
- autokey = ':%(name)s:%(args)s:%(vars)s'
-
- def __init__(self, request):
- """
- Args:
- request: the global request object
- """
- # GAE will have a special caching
- if have_settings and settings.global_settings.web2py_runtime_gae:
- from gluon.contrib.gae_memcache import MemcacheClient
- self.ram = self.disk = MemcacheClient(request)
- else:
- # Otherwise use ram (and try also disk)
- self.ram = CacheInRam(request)
- try:
- self.disk = CacheOnDisk(request)
- except IOError:
- logger.warning('no cache.disk (IOError)')
- except AttributeError:
- # normally not expected anymore, as GAE has already
- # been accounted for
- logger.warning('no cache.disk (AttributeError)')
-
- def action(self, time_expire=DEFAULT_TIME_EXPIRE, cache_model=None,
- prefix=None, session=False, vars=True, lang=True,
- user_agent=False, public=True, valid_statuses=None,
- quick=None):
- """Better fit for caching an action
-
- Warning:
- Experimental!
-
- Currently only HTTP 1.1 compliant
- reference : http://code.google.com/p/doctype-mirror/wiki/ArticleHttpCaching
-
- Args:
- time_expire(int): same as @cache
- cache_model(str): same as @cache
- prefix(str): add a prefix to the calculated key
- session(bool): adds response.session_id to the key
- vars(bool): adds request.env.query_string
- lang(bool): adds T.accepted_language
- user_agent(bool or dict): if True, adds is_mobile and is_tablet to the key.
- Pass a dict to use all the needed values (uses str(.items()))
- (e.g. user_agent=request.user_agent()). Used only if session is
- not True
- public(bool): if False forces the Cache-Control to be 'private'
- valid_statuses: by default only status codes starting with 1,2,3 will be cached.
- pass an explicit list of statuses on which turn the cache on
- quick: Session,Vars,Lang,User-agent,Public:
- fast overrides with initials, e.g. 'SVLP' or 'VLP', or 'VLP'
- """
- from gluon import current
- from gluon.http import HTTP
- def wrap(func):
- def wrapped_f():
- if current.request.env.request_method != 'GET':
- return func()
- if time_expire:
- cache_control = 'max-age=%(time_expire)s, s-maxage=%(time_expire)s' % dict(time_expire=time_expire)
- if quick:
- session_ = True if 'S' in quick else False
- vars_ = True if 'V' in quick else False
- lang_ = True if 'L' in quick else False
- user_agent_ = True if 'U' in quick else False
- public_ = True if 'P' in quick else False
- else:
- session_, vars_, lang_, user_agent_, public_ = session, vars, lang, user_agent, public
- if not session_ and public_:
- cache_control += ', public'
- expires = (current.request.utcnow + datetime.timedelta(seconds=time_expire)).strftime('%a, %d %b %Y %H:%M:%S GMT')
- else:
- cache_control += ', private'
- expires = 'Fri, 01 Jan 1990 00:00:00 GMT'
- if cache_model:
- #figure out the correct cache key
- cache_key = [current.request.env.path_info, current.response.view]
- if session_:
- cache_key.append(current.response.session_id)
- elif user_agent_:
- if user_agent_ is True:
- cache_key.append("%(is_mobile)s_%(is_tablet)s" % current.request.user_agent())
- else:
- cache_key.append(str(user_agent_.items()))
- if vars_:
- cache_key.append(current.request.env.query_string)
- if lang_:
- cache_key.append(current.T.accepted_language)
- cache_key = hashlib.md5('__'.join(cache_key)).hexdigest()
- if prefix:
- cache_key = prefix + cache_key
- try:
- #action returns something
- rtn = cache_model(cache_key, lambda : func(), time_expire=time_expire)
- http, status = None, current.response.status
- except HTTP, e:
- #action raises HTTP (can still be valid)
- rtn = cache_model(cache_key, lambda : e.body, time_expire=time_expire)
- http, status = HTTP(e.status, rtn, **e.headers), e.status
- else:
- #action raised a generic exception
- http = None
- else:
- #no server-cache side involved
- try:
- #action returns something
- rtn = func()
- http, status = None, current.response.status
- except HTTP, e:
- #action raises HTTP (can still be valid)
- status = e.status
- http = HTTP(e.status, e.body, **e.headers)
- else:
- #action raised a generic exception
- http = None
- send_headers = False
- if http and isinstance(valid_statuses, list):
- if status in valid_statuses:
- send_headers = True
- elif valid_statuses is None:
- if str(status)[0] in '123':
- send_headers = True
- if send_headers:
- headers = {
- 'Pragma' : None,
- 'Expires' : expires,
- 'Cache-Control' : cache_control
- }
- current.response.headers.update(headers)
- if cache_model and not send_headers:
- #we cached already the value, but the status is not valid
- #so we need to delete the cached value
- cache_model(cache_key, None)
- if http:
- if send_headers:
- http.headers.update(current.response.headers)
- raise http
- return rtn
- wrapped_f.__name__ = func.__name__
- wrapped_f.__doc__ = func.__doc__
- return wrapped_f
- return wrap
-
- def __call__(self,
- key=None,
- time_expire=DEFAULT_TIME_EXPIRE,
- cache_model=None):
- """
- Decorator function that can be used to cache any function/method.
-
- Args:
- key(str) : the key of the object to be store or retrieved
- time_expire(int) : expiration of the cache in seconds
- `time_expire` is used to compare the current time with the time
- when the requested object was last saved in cache.
- It does not affect future requests.
- Setting `time_expire` to 0 or negative value forces the cache to
- refresh.
- cache_model(str): can be "ram", "disk" or other (like "memcache").
- Defaults to "ram"
-
- When the function `f` is called, web2py tries to retrieve
- the value corresponding to `key` from the cache if the
- object exists and if it did not expire, else it calles the function `f`
- and stores the output in the cache corresponding to `key`. In the case
- the output of the function is returned.
-
- Example: ::
-
- @cache('key', 5000, cache.ram)
- def f():
- return time.ctime()
-
- Note:
- If the function `f` is an action, we suggest using
- @cache.action instead
- """
-
- def tmp(func, cache=self, cache_model=cache_model):
- return CacheAction(func, key, time_expire, self, cache_model)
- return tmp
-
- @staticmethod
- def with_prefix(cache_model, prefix):
- """
- allow replacing cache.ram with cache.with_prefix(cache.ram,'prefix')
- it will add prefix to all the cache keys used.
- """
- return lambda key, f, time_expire=DEFAULT_TIME_EXPIRE, prefix=prefix:\
- cache_model(prefix + key, f, time_expire)
-
-
-def lazy_cache(key=None, time_expire=None, cache_model='ram'):
- """
- Can be used to cache any function including ones in modules,
- as long as the cached function is only called within a web2py request
-
- If a key is not provided, one is generated from the function name
- `time_expire` defaults to None (no cache expiration)
-
- If cache_model is "ram" then the model is current.cache.ram, etc.
- """
- def decorator(f, key=key, time_expire=time_expire, cache_model=cache_model):
- key = key or repr(f)
-
- def g(*c, **d):
- from gluon import current
- return current.cache(key, time_expire, cache_model)(f)(*c, **d)
- g.__name__ = f.__name__
- return g
- return decorator
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+| This file is part of the web2py Web Framework
+| Copyrighted by Massimo Di Pierro
+| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
+
+Basic caching classes and methods
+---------------------------------
+
+- Cache - The generic caching object interfacing with the others
+- CacheInRam - providing caching in ram
+- CacheOnDisk - provides caches on disk
+
+Memcache is also available via a different module (see gluon.contrib.memcache)
+
+When web2py is running on Google App Engine,
+caching will be provided by the GAE memcache
+(see gluon.contrib.gae_memcache)
+"""
+import time
+import thread
+import os
+import sys
+import logging
+import re
+import hashlib
+import datetime
+import tempfile
+from gluon import recfile
+try:
+ from gluon import settings
+ have_settings = True
+except ImportError:
+ have_settings = False
+
+try:
+ import cPickle as pickle
+except:
+ import pickle
+
+logger = logging.getLogger("web2py.cache")
+
+__all__ = ['Cache', 'lazy_cache']
+
+
+DEFAULT_TIME_EXPIRE = 300
+
+
+
+class CacheAbstract(object):
+ """
+ Abstract class for cache implementations.
+ Main function just provides referenced api documentation.
+
+ Use CacheInRam or CacheOnDisk instead which are derived from this class.
+
+ Note:
+ Michele says: there are signatures inside gdbm files that are used
+ directly by the python gdbm adapter that often are lagging behind in the
+ detection code in python part.
+ On every occasion that a gdbm store is probed by the python adapter,
+ the probe fails, because gdbm file version is newer.
+ Using gdbm directly from C would work, because there is backward
+ compatibility, but not from python!
+ The .shelve file is discarded and a new one created (with new
+ signature) and it works until it is probed again...
+ The possible consequences are memory leaks and broken sessions.
+ """
+
+ cache_stats_name = 'web2py_cache_statistics'
+
+ def __init__(self, request=None):
+ """Initializes the object
+
+ Args:
+ request: the global request object
+ """
+ raise NotImplementedError
+
+ def __call__(self, key, f,
+ time_expire=DEFAULT_TIME_EXPIRE):
+ """
+ Tries to retrieve the value corresponding to `key` from the cache if the
+ object exists and if it did not expire, else it calls the function `f`
+ and stores the output in the cache corresponding to `key`. It always
+ returns the function that is returned.
+
+ Args:
+ key(str): the key of the object to be stored or retrieved
+ f(function): the function whose output is to be cached.
+
+ If `f` is `None` the cache is cleared.
+ time_expire(int): expiration of the cache in seconds.
+
+ It's used to compare the current time with the time
+ when the requested object was last saved in cache. It does not
+ affect future requests. Setting `time_expire` to 0 or negative
+ value forces the cache to refresh.
+ """
+ raise NotImplementedError
+
+ def clear(self, regex=None):
+ """
+ Clears the cache of all keys that match the provided regular expression.
+ If no regular expression is provided, it clears all entries in cache.
+
+ Args:
+ regex: if provided, only keys matching the regex will be cleared,
+ otherwise all keys are cleared.
+ """
+
+ raise NotImplementedError
+
+ def increment(self, key, value=1):
+ """
+ Increments the cached value for the given key by the amount in value
+
+ Args:
+ key(str): key for the cached object to be incremeneted
+ value(int): amount of the increment (defaults to 1, can be negative)
+ """
+ raise NotImplementedError
+
+ def _clear(self, storage, regex):
+ """
+ Auxiliary function called by `clear` to search and clear cache entries
+ """
+ r = re.compile(regex)
+ for key in storage:
+ if r.match(str(key)):
+ del storage[key]
+ break
+
+
+class CacheInRam(CacheAbstract):
+ """
+ Ram based caching
+
+ This is implemented as global (per process, shared by all threads)
+ dictionary.
+ A mutex-lock mechanism avoid conflicts.
+ """
+
+ locker = thread.allocate_lock()
+ meta_storage = {}
+
+ def __init__(self, request=None):
+ self.initialized = False
+ self.request = request
+ self.storage = {}
+
+ def initialize(self):
+ if self.initialized:
+ return
+ else:
+ self.initialized = True
+ self.locker.acquire()
+ request = self.request
+ if request:
+ app = request.application
+ else:
+ app = ''
+ if not app in self.meta_storage:
+ self.storage = self.meta_storage[app] = {
+ CacheAbstract.cache_stats_name: {'hit_total': 0, 'misses': 0}}
+ else:
+ self.storage = self.meta_storage[app]
+ self.locker.release()
+
+ def clear(self, regex=None):
+ self.initialize()
+ self.locker.acquire()
+ storage = self.storage
+ if regex is None:
+ storage.clear()
+ else:
+ self._clear(storage, regex)
+
+ if not CacheAbstract.cache_stats_name in storage.keys():
+ storage[CacheAbstract.cache_stats_name] = {
+ 'hit_total': 0, 'misses': 0}
+
+ self.locker.release()
+
+ def __call__(self, key, f,
+ time_expire=DEFAULT_TIME_EXPIRE,
+ destroyer=None):
+ """
+ Attention! cache.ram does not copy the cached object.
+ It just stores a reference to it. Turns out the deepcopying the object
+ has some problems:
+
+ - would break backward compatibility
+ - would be limiting because people may want to cache live objects
+ - would work unless we deepcopy no storage and retrival which would make
+ things slow.
+
+ Anyway. You can deepcopy explicitly in the function generating the value
+ to be cached.
+ """
+ self.initialize()
+
+ dt = time_expire
+ now = time.time()
+
+ self.locker.acquire()
+ item = self.storage.get(key, None)
+ if item and f is None:
+ del self.storage[key]
+ if destroyer:
+ destroyer(item[1])
+ self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1
+ self.locker.release()
+
+ if f is None:
+ return None
+ if item and (dt is None or item[0] > now - dt):
+ return item[1]
+ elif item and (item[0] < now - dt) and destroyer:
+ destroyer(item[1])
+ value = f()
+
+ self.locker.acquire()
+ self.storage[key] = (now, value)
+ self.storage[CacheAbstract.cache_stats_name]['misses'] += 1
+ self.locker.release()
+ return value
+
+ def increment(self, key, value=1):
+ self.initialize()
+ self.locker.acquire()
+ try:
+ if key in self.storage:
+ value = self.storage[key][1] + value
+ self.storage[key] = (time.time(), value)
+ except BaseException, e:
+ self.locker.release()
+ raise e
+ self.locker.release()
+ return value
+
+
+class CacheOnDisk(CacheAbstract):
+ """
+ Disk based cache
+
+ This is implemented as a shelve object and it is shared by multiple web2py
+ processes (and threads) as long as they share the same filesystem.
+
+ Disk cache provides persistance when web2py is started/stopped but it slower
+ than `CacheInRam`
+
+ Values stored in disk cache must be pickable.
+ """
+
+ class PersistentStorage(object):
+ """
+ Implements a key based storage in disk.
+ """
+ def __init__(self, folder):
+ self.folder = folder
+ # Check the best way to do atomic file replacement.
+ if sys.version_info >= (3, 3):
+ self.replace = os.replace
+ elif sys.platform == "win32":
+ import ctypes
+ from ctypes import wintypes
+ ReplaceFile = ctypes.windll.kernel32.ReplaceFileW
+ ReplaceFile.restype = wintypes.BOOL
+ ReplaceFile.argtypes = [
+ wintypes.LPWSTR,
+ wintypes.LPWSTR,
+ wintypes.LPWSTR,
+ wintypes.DWORD,
+ wintypes.LPVOID,
+ wintypes.LPVOID,
+ ]
+
+ def replace_windows(src, dst):
+ if not ReplaceFile(dst, src, None, 0, 0, 0):
+ os.rename(src, dst)
+
+ self.replace = replace_windows
+ else:
+ # POSIX rename() is always atomic
+ self.replace = os.rename
+
+
+ def __setitem__(self, key, value):
+ tmp_name, tmp_path = tempfile.mkstemp(dir=self.folder)
+ tmp = os.fdopen(tmp_name, 'wb')
+ try:
+ pickle.dump((time.time(), value), tmp, pickle.HIGHEST_PROTOCOL)
+ finally:
+ tmp.close()
+ fullfilename = os.path.join(self.folder, recfile.generate(key))
+ if not os.path.exists(os.path.dirname(fullfilename)):
+ os.makedirs(os.path.dirname(fullfilename))
+ self.replace(tmp_path, fullfilename)
+
+
+ def __getitem__(self, key):
+ if recfile.exists(key, path=self.folder):
+ timestamp, value = pickle.load(recfile.open(key, 'rb', path=self.folder))
+ return value
+ else:
+ raise KeyError
+
+ def __contains__(self, key):
+ return recfile.exists(key, path=self.folder)
+
+
+ def __delitem__(self, key):
+ recfile.remove(key, path=self.folder)
+
+
+ def __iter__(self):
+ for dirpath, dirnames, filenames in os.walk(self.folder):
+ for filename in filenames:
+ yield filename
+
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+
+ def clear(self):
+ for key in self:
+ del self[key]
+
+ def __init__(self, request=None, folder=None):
+ self.initialized = False
+ self.request = request
+ self.folder = folder
+ self.storage = None
+
+
+ def initialize(self):
+ if self.initialized:
+ return
+ else:
+ self.initialized = True
+
+ folder = self.folder
+ request = self.request
+
+ # Lets test if the cache folder exists, if not
+ # we are going to create it
+ folder = os.path.join(folder or request.folder, 'cache')
+
+ if not os.path.exists(folder):
+ os.mkdir(folder)
+
+ self.storage = CacheOnDisk.PersistentStorage(folder)
+
+ if not CacheAbstract.cache_stats_name in self.storage:
+ self.storage[CacheAbstract.cache_stats_name] = {'hit_total': 0, 'misses': 0}
+
+
+ def __call__(self, key, f,
+ time_expire=DEFAULT_TIME_EXPIRE):
+ self.initialize()
+
+ dt = time_expire
+ item = self.storage.get(key)
+ self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1
+
+ if item and f is None:
+ del self.storage[key]
+
+ if f is None:
+ return None
+
+ now = time.time()
+
+ if item and ((dt is None) or (item[0] > now - dt)):
+ value = item[1]
+ else:
+ value = f()
+ self.storage[key] = (now, value)
+ self.storage[CacheAbstract.cache_stats_name]['misses'] += 1
+
+ return value
+
+ def clear(self, regex=None):
+ self.initialize()
+ storage = self.storage
+ if regex is None:
+ storage.clear()
+ else:
+ self._clear(storage, regex)
+
+ if not CacheAbstract.cache_stats_name in storage:
+ storage[CacheAbstract.cache_stats_name] = {
+ 'hit_total': 0, 'misses': 0}
+
+
+ def increment(self, key, value=1):
+ self.initialize()
+ storage = self.storage
+ try:
+ if key in storage:
+ value = storage[key][1] + value
+ storage[key] = (time.time(), value)
+ except:
+ pass
+ return value
+
+
+
+class CacheAction(object):
+ def __init__(self, func, key, time_expire, cache, cache_model):
+ self.__name__ = func.__name__
+ self.__doc__ = func.__doc__
+ self.func = func
+ self.key = key
+ self.time_expire = time_expire
+ self.cache = cache
+ self.cache_model = cache_model
+
+ def __call__(self, *a, **b):
+ if not self.key:
+ key2 = self.__name__ + ':' + repr(a) + ':' + repr(b)
+ else:
+ key2 = self.key.replace('%(name)s', self.__name__)\
+ .replace('%(args)s', str(a)).replace('%(vars)s', str(b))
+ cache_model = self.cache_model
+ if not cache_model or isinstance(cache_model, str):
+ cache_model = getattr(self.cache, cache_model or 'ram')
+ return cache_model(key2,
+ lambda a=a, b=b: self.func(*a, **b),
+ self.time_expire)
+
+
+class Cache(object):
+ """
+ Sets up generic caching, creating an instance of both CacheInRam and
+ CacheOnDisk.
+ In case of GAE will make use of gluon.contrib.gae_memcache.
+
+ - self.ram is an instance of CacheInRam
+ - self.disk is an instance of CacheOnDisk
+ """
+
+ autokey = ':%(name)s:%(args)s:%(vars)s'
+
+ def __init__(self, request):
+ """
+ Args:
+ request: the global request object
+ """
+ # GAE will have a special caching
+ if have_settings and settings.global_settings.web2py_runtime_gae:
+ from gluon.contrib.gae_memcache import MemcacheClient
+ self.ram = self.disk = MemcacheClient(request)
+ else:
+ # Otherwise use ram (and try also disk)
+ self.ram = CacheInRam(request)
+ try:
+ self.disk = CacheOnDisk(request)
+ except IOError:
+ logger.warning('no cache.disk (IOError)')
+ except AttributeError:
+ # normally not expected anymore, as GAE has already
+ # been accounted for
+ logger.warning('no cache.disk (AttributeError)')
+
+ def action(self, time_expire=DEFAULT_TIME_EXPIRE, cache_model=None,
+ prefix=None, session=False, vars=True, lang=True,
+ user_agent=False, public=True, valid_statuses=None,
+ quick=None):
+ """Better fit for caching an action
+
+ Warning:
+ Experimental!
+
+ Currently only HTTP 1.1 compliant
+ reference : http://code.google.com/p/doctype-mirror/wiki/ArticleHttpCaching
+
+ Args:
+ time_expire(int): same as @cache
+ cache_model(str): same as @cache
+ prefix(str): add a prefix to the calculated key
+ session(bool): adds response.session_id to the key
+ vars(bool): adds request.env.query_string
+ lang(bool): adds T.accepted_language
+ user_agent(bool or dict): if True, adds is_mobile and is_tablet to the key.
+ Pass a dict to use all the needed values (uses str(.items()))
+ (e.g. user_agent=request.user_agent()). Used only if session is
+ not True
+ public(bool): if False forces the Cache-Control to be 'private'
+ valid_statuses: by default only status codes starting with 1,2,3 will be cached.
+ pass an explicit list of statuses on which turn the cache on
+ quick: Session,Vars,Lang,User-agent,Public:
+ fast overrides with initials, e.g. 'SVLP' or 'VLP', or 'VLP'
+ """
+ from gluon import current
+ from gluon.http import HTTP
+ def wrap(func):
+ def wrapped_f():
+ if current.request.env.request_method != 'GET':
+ return func()
+ if time_expire:
+ cache_control = 'max-age=%(time_expire)s, s-maxage=%(time_expire)s' % dict(time_expire=time_expire)
+ if quick:
+ session_ = True if 'S' in quick else False
+ vars_ = True if 'V' in quick else False
+ lang_ = True if 'L' in quick else False
+ user_agent_ = True if 'U' in quick else False
+ public_ = True if 'P' in quick else False
+ else:
+ session_, vars_, lang_, user_agent_, public_ = session, vars, lang, user_agent, public
+ if not session_ and public_:
+ cache_control += ', public'
+ expires = (current.request.utcnow + datetime.timedelta(seconds=time_expire)).strftime('%a, %d %b %Y %H:%M:%S GMT')
+ else:
+ cache_control += ', private'
+ expires = 'Fri, 01 Jan 1990 00:00:00 GMT'
+ if cache_model:
+ #figure out the correct cache key
+ cache_key = [current.request.env.path_info, current.response.view]
+ if session_:
+ cache_key.append(current.response.session_id)
+ elif user_agent_:
+ if user_agent_ is True:
+ cache_key.append("%(is_mobile)s_%(is_tablet)s" % current.request.user_agent())
+ else:
+ cache_key.append(str(user_agent_.items()))
+ if vars_:
+ cache_key.append(current.request.env.query_string)
+ if lang_:
+ cache_key.append(current.T.accepted_language)
+ cache_key = hashlib.md5('__'.join(cache_key)).hexdigest()
+ if prefix:
+ cache_key = prefix + cache_key
+ try:
+ #action returns something
+ rtn = cache_model(cache_key, lambda : func(), time_expire=time_expire)
+ http, status = None, current.response.status
+ except HTTP, e:
+ #action raises HTTP (can still be valid)
+ rtn = cache_model(cache_key, lambda : e.body, time_expire=time_expire)
+ http, status = HTTP(e.status, rtn, **e.headers), e.status
+ else:
+ #action raised a generic exception
+ http = None
+ else:
+ #no server-cache side involved
+ try:
+ #action returns something
+ rtn = func()
+ http, status = None, current.response.status
+ except HTTP, e:
+ #action raises HTTP (can still be valid)
+ status = e.status
+ http = HTTP(e.status, e.body, **e.headers)
+ else:
+ #action raised a generic exception
+ http = None
+ send_headers = False
+ if http and isinstance(valid_statuses, list):
+ if status in valid_statuses:
+ send_headers = True
+ elif valid_statuses is None:
+ if str(status)[0] in '123':
+ send_headers = True
+ if send_headers:
+ headers = {
+ 'Pragma' : None,
+ 'Expires' : expires,
+ 'Cache-Control' : cache_control
+ }
+ current.response.headers.update(headers)
+ if cache_model and not send_headers:
+ #we cached already the value, but the status is not valid
+ #so we need to delete the cached value
+ cache_model(cache_key, None)
+ if http:
+ if send_headers:
+ http.headers.update(current.response.headers)
+ raise http
+ return rtn
+ wrapped_f.__name__ = func.__name__
+ wrapped_f.__doc__ = func.__doc__
+ return wrapped_f
+ return wrap
+
+ def __call__(self,
+ key=None,
+ time_expire=DEFAULT_TIME_EXPIRE,
+ cache_model=None):
+ """
+ Decorator function that can be used to cache any function/method.
+
+ Args:
+ key(str) : the key of the object to be store or retrieved
+ time_expire(int) : expiration of the cache in seconds
+ `time_expire` is used to compare the current time with the time
+ when the requested object was last saved in cache.
+ It does not affect future requests.
+ Setting `time_expire` to 0 or negative value forces the cache to
+ refresh.
+ cache_model(str): can be "ram", "disk" or other (like "memcache").
+ Defaults to "ram"
+
+ When the function `f` is called, web2py tries to retrieve
+ the value corresponding to `key` from the cache if the
+ object exists and if it did not expire, else it calles the function `f`
+ and stores the output in the cache corresponding to `key`. In the case
+ the output of the function is returned.
+
+ Example: ::
+
+ @cache('key', 5000, cache.ram)
+ def f():
+ return time.ctime()
+
+ Note:
+ If the function `f` is an action, we suggest using
+ @cache.action instead
+ """
+
+ def tmp(func, cache=self, cache_model=cache_model):
+ return CacheAction(func, key, time_expire, self, cache_model)
+ return tmp
+
+ @staticmethod
+ def with_prefix(cache_model, prefix):
+ """
+ allow replacing cache.ram with cache.with_prefix(cache.ram,'prefix')
+ it will add prefix to all the cache keys used.
+ """
+ return lambda key, f, time_expire=DEFAULT_TIME_EXPIRE, prefix=prefix:\
+ cache_model(prefix + key, f, time_expire)
+
+
+def lazy_cache(key=None, time_expire=None, cache_model='ram'):
+ """
+ Can be used to cache any function including ones in modules,
+ as long as the cached function is only called within a web2py request
+
+ If a key is not provided, one is generated from the function name
+ `time_expire` defaults to None (no cache expiration)
+
+ If cache_model is "ram" then the model is current.cache.ram, etc.
+ """
+ def decorator(f, key=key, time_expire=time_expire, cache_model=cache_model):
+ key = key or repr(f)
+
+ def g(*c, **d):
+ from gluon import current
+ return current.cache(key, time_expire, cache_model)(f)(*c, **d)
+ g.__name__ = f.__name__
+ return g
+ return decorator
diff --git a/gluon/compileapp.py b/gluon/compileapp.py
index a9c4b621..e12bb200 100644
--- a/gluon/compileapp.py
+++ b/gluon/compileapp.py
@@ -39,6 +39,7 @@ import marshal
import shutil
import imp
import logging
+import types
logger = logging.getLogger("web2py")
from gluon import rewrite
from custom_import import custom_import_install
@@ -212,7 +213,7 @@ def LOAD(c=None, f='index', args=None, vars=None,
request.env.path_info
other_request.cid = target
other_request.env.http_web2py_component_element = target
- other_request.restful = request.restful # Needed when you call LOAD() on a controller who has some actions decorates with @request.restful()
+ other_request.restful = types.MethodType(request.restful.im_func, other_request) # A bit nasty but needed to use LOAD on action decorates with @request.restful()
other_response.view = '%s/%s.%s' % (c, f, other_request.extension)
other_environment = copy.copy(current.globalenv) # NASTY
diff --git a/gluon/contrib/generics.py b/gluon/contrib/generics.py
index e4fdd70b..abaa95f6 100644
--- a/gluon/contrib/generics.py
+++ b/gluon/contrib/generics.py
@@ -1,12 +1,8 @@
# fix response
-import re
import os
-import cPickle
-import gluon.serializers
from gluon import current, HTTP
from gluon.html import markmin_serializer, TAG, HTML, BODY, UL, XML, H1
-from gluon.contenttype import contenttype
from gluon.contrib.fpdf import FPDF, HTMLMixin
from gluon.sanitizer import sanitize
from gluon.contrib.markmin.markmin2latex import markmin2latex
diff --git a/gluon/contrib/markdown/markdown2.py b/gluon/contrib/markdown/markdown2.py
index 175d4864..c03eb111 100644
--- a/gluon/contrib/markdown/markdown2.py
+++ b/gluon/contrib/markdown/markdown2.py
@@ -1,7 +1,10 @@
#!/usr/bin/env python
+# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
+from __future__ import generators
+
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
@@ -30,22 +33,57 @@ Module usage:
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
-.
+.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
+
+Supported extra syntax options (see -x|--extras option below and
+see for details):
+
+* code-friendly: Disable _ and __ for em and strong.
+* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
+* fenced-code-blocks: Allows a code block to not have to be indented
+ by fencing it with '```' on a line before and after. Based on
+ with support for
+ syntax highlighting.
+* footnotes: Support footnotes as in use on daringfireball.net and
+ implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
+* header-ids: Adds "id" attributes to headers. The id value is a slug of
+ the header text.
+* html-classes: Takes a dict mapping html tag names (lowercase) to a
+ string to use for a "class" tag attribute. Currently only supports
+ "pre" and "code" tags. Add an issue if you require this for other tags.
+* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
+ have markdown processing be done on its contents. Similar to
+ but with
+ some limitations.
+* metadata: Extract metadata from a leading '---'-fenced block.
+ See for details.
+* nofollow: Add `rel="nofollow"` to add `` tags with an href. See
+ .
+* pyshell: Treats unindented Python interactive shell sessions as
+ blocks.
+* link-patterns: Auto-link given regex patterns in text (e.g. bug number
+ references, revision number references).
+* smarty-pants: Replaces ' and " with curly quotation marks or curly
+ apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
+ and ellipses.
+* toc: The returned HTML string gets a new "toc_html" attribute which is
+ a Table of Contents for the document. (experimental)
+* xml: Passes one-liner processing instructions and namespaced XML tags.
+* wiki-tables: Google Code Wiki-style tables. See
+ .
"""
# Dev Notes:
-# - There is already a Python markdown processor
-# (http://www.freewisdom.org/projects/python-markdown/).
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
-__version_info__ = (1, 0, 1, 16) # first three nums match Markdown.pl
-__version__ = '1.0.1.16'
+__version_info__ = (2, 2, 4)
+__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import os
@@ -60,22 +98,34 @@ except ImportError:
import optparse
from random import random, randint
import codecs
-from urllib import quote
-
#---- Python version compat
+try:
+ from urllib.parse import quote # python3
+except ImportError:
+ from urllib import quote # python2
+
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
- def _unicode_decode(s, encoding, errors='xmlcharrefreplace'):
- return unicode(s, encoding, errors)
-else:
- def _unicode_decode(s, encoding, errors='strict'):
- return s.decode(encoding, errors)
+
+# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
+if sys.version_info[0] <= 2:
+ py3 = False
+ try:
+ bytes
+ except NameError:
+ bytes = str
+ base_string_type = basestring
+elif sys.version_info[0] >= 3:
+ py3 = True
+ unicode = str
+ base_string_type = str
+
#---- globals
@@ -86,21 +136,13 @@ log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
-try:
- import uuid
-except ImportError:
- SECRET_SALT = str(randint(0, 1000000))
-else:
- SECRET_SALT = str(uuid.uuid4())
-def _hash_ascii(s):
- #return md5(s).hexdigest() # Markdown.pl effectively does this.
- return 'md5-' + md5(SECRET_SALT + s).hexdigest()
+SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
-g_escape_table = dict([(ch, _hash_ascii(ch))
- for ch in '\\`*_{}[]()>#+-.!'])
+g_escape_table = dict([(ch, _hash_text(ch))
+ for ch in '\\`*_{}[]()>#+-.!'])
@@ -118,10 +160,8 @@ def markdown_path(path, encoding="utf-8",
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
- try:
- text = fp.read()
- finally:
- fp.close()
+ text = fp.read()
+ fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
@@ -172,6 +212,7 @@ class Markdown(object):
else:
self.safe_mode = safe_mode
+ # Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
@@ -181,11 +222,19 @@ class Markdown(object):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
+ if "toc" in self.extras and not "header-ids" in self.extras:
+ self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
+
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
+ self._escape_table = g_escape_table.copy()
+ if "smarty-pants" in self.extras:
+ self._escape_table['"'] = _hash_text('"')
+ self._escape_table["'"] = _hash_text("'")
+
def reset(self):
self.urls = {}
self.titles = {}
@@ -196,6 +245,14 @@ class Markdown(object):
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
+ if "header-ids" in self.extras:
+ self._count_from_header_id = {} # no `defaultdict` in Python 2.4
+ if "metadata" in self.extras:
+ self.metadata = {}
+
+ # Per "rel"
+ # should only be used in tags with an "href" attribute.
+ _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
def convert(self, text):
"""Convert the given text."""
@@ -245,12 +302,24 @@ class Markdown(object):
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
+ # strip metadata from head and extract
+ if "metadata" in self.extras:
+ text = self._extract_metadata(text)
+
+ text = self.preprocess(text)
+
+ if "fenced-code-blocks" in self.extras and not self.safe_mode:
+ text = self._do_fenced_code_blocks(text)
+
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
+ if "fenced-code-blocks" in self.extras and self.safe_mode:
+ text = self._do_fenced_code_blocks(text)
+
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
@@ -264,14 +333,64 @@ class Markdown(object):
if "footnotes" in self.extras:
text = self._add_footnotes(text)
+ text = self.postprocess(text)
+
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
+ if "nofollow" in self.extras:
+ text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
+
text += "\n"
+
+ rv = UnicodeWithAttrs(text)
+ if "toc" in self.extras:
+ rv._toc = self._toc
+ if "metadata" in self.extras:
+ rv.metadata = self.metadata
+ return rv
+
+ def postprocess(self, text):
+ """A hook for subclasses to do some postprocessing of the html, if
+ desired. This is called before unescaping of special chars and
+ unhashing of raw HTML spans.
+ """
return text
+ def preprocess(self, text):
+ """A hook for subclasses to do some preprocessing of the Markdown, if
+ desired. This is called after basic formatting of the text, but prior
+ to any extras, safe mode, etc. processing.
+ """
+ return text
+
+ # Is metadata if the content starts with '---'-fenced `key: value`
+ # pairs. E.g. (indented for presentation):
+ # ---
+ # foo: bar
+ # another-var: blah blah
+ # ---
+ _metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
+
+ def _extract_metadata(self, text):
+ # fast test
+ if not text.startswith("---"):
+ return text
+ match = self._metadata_pat.match(text)
+ if not match:
+ return text
+
+ tail = text[len(match.group(0)):]
+ metadata_str = match.group(1).strip()
+ for line in metadata_str.split('\n'):
+ key, value = line.split(':', 1)
+ self.metadata[key.strip()] = value.strip()
+
+ return tail
+
+
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
@@ -382,7 +501,7 @@ class Markdown(object):
emacs_vars[variable] = value
# Unquote values.
- for var, val in emacs_vars.items():
+ for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
@@ -414,7 +533,13 @@ class Markdown(object):
return text
return self._detab_re.subn(self._detab_sub, text)[0]
+ # I broke out the html5 tags here and add them to _block_tags_a and
+ # _block_tags_b. This way html5 tags are easy to keep track of.
+ _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
+
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
+ _block_tags_a += _html5tags
+
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
@@ -429,6 +554,8 @@ class Markdown(object):
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
+ _block_tags_b += _html5tags
+
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
@@ -442,10 +569,27 @@ class Markdown(object):
""" % _block_tags_b,
re.X | re.M)
+ _html_markdown_attr_re = re.compile(
+ r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
+ elif 'markdown-in-html' in self.extras and 'markdown=' in html:
+ first_line = html.split('\n', 1)[0]
+ m = self._html_markdown_attr_re.search(first_line)
+ if m:
+ lines = html.split('\n')
+ middle = '\n'.join(lines[1:-1])
+ last_line = lines[-1]
+ first_line = first_line[:m.start()] + first_line[m.end():]
+ f_key = _hash_text(first_line)
+ self.html_blocks[f_key] = first_line
+ l_key = _hash_text(last_line)
+ self.html_blocks[l_key] = last_line
+ return ''.join(["\n\n", f_key,
+ "\n\n", middle, "\n\n",
+ l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
@@ -497,11 +641,11 @@ class Markdown(object):
# Delimiters for next comment block.
try:
start_idx = text.index("", start_idx) + 3
- except ValueError, ex:
+ except ValueError:
break
# Start position for next comment block search.
@@ -590,7 +734,7 @@ class Markdown(object):
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
- self.titles[key] = title.replace('"', '"')
+ self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
@@ -635,28 +779,31 @@ class Markdown(object):
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
-
- _hr_res = [
- re.compile(r"^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$", re.M),
- re.compile(r"^[ ]{0,2}([ ]?\-[ ]?){3,}[ \t]*$", re.M),
- re.compile(r"^[ ]{0,2}([ ]?\_[ ]?){3,}[ \t]*$", re.M),
- ]
+ _hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
+ if "fenced-code-blocks" in self.extras:
+ text = self._do_fenced_code_blocks(text)
+
text = self._do_headers(text)
# Do Horizontal Rules:
+ # On the number of spaces in horizontal rules: The spec is fuzzy: "If
+ # you wish, you may use spaces between the hyphens or asterisks."
+ # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
+ # hr chars to one or two. We'll reproduce that limit here.
hr = "\n', '']
+ for row in rows:
+ hrow = ['
" % backlink)
footer.append('')
@@ -1426,7 +1816,7 @@ class Markdown(object):
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
- _naked_gt_re = re.compile(r'''(?''', re.I)
+ _naked_gt_re = re.compile(r'''(?''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
@@ -1443,7 +1833,7 @@ class Markdown(object):
return text
def _encode_backslash_escapes(self, text):
- for ch, escape in g_escape_table.items():
+ for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
@@ -1512,19 +1902,19 @@ class Markdown(object):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown and :
- .replace('*', g_escape_table['*'])
- .replace('_', g_escape_table['_']))
+ .replace('*', self._escape_table['*'])
+ .replace('_', self._escape_table['_']))
link = '%s' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
- for hash, link in link_from_hash.items():
+ for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
- for ch, hash in g_escape_table.items():
+ for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
@@ -1550,6 +1940,64 @@ class MarkdownWithExtras(Markdown):
#---- internal support functions
+class UnicodeWithAttrs(unicode):
+ """A subclass of unicode used for the return value of conversion to
+ possibly attach some attributes. E.g. the "toc_html" attribute when
+ the "toc" extra is used.
+ """
+ metadata = None
+ _toc = None
+ def toc_html(self):
+ """Return the HTML for the current TOC.
+
+ This expects the `_toc` attribute to have been set on this instance.
+ """
+ if self._toc is None:
+ return None
+
+ def indent():
+ return ' ' * (len(h_stack) - 1)
+ lines = []
+ h_stack = [0] # stack of header-level numbers
+ for level, id, name in self._toc:
+ if level > h_stack[-1]:
+ lines.append("%s
%s' % (
+ indent(), id, name))
+ while len(h_stack) > 1:
+ h_stack.pop()
+ if not lines[-1].endswith("
"):
+ lines[-1] += ""
+ lines.append("%s" % indent())
+ return '\n'.join(lines) + '\n'
+ toc_html = property(toc_html)
+
+## {{{ http://code.activestate.com/recipes/577257/ (r1)
+_slugify_strip_re = re.compile(r'[^\w\s-]')
+_slugify_hyphenate_re = re.compile(r'[-\s]+')
+def _slugify(value):
+ """
+ Normalizes string, converts to lowercase, removes non-alpha characters,
+ and converts spaces to hyphens.
+
+ From Django's "django/template/defaultfilters.py".
+ """
+ import unicodedata
+ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
+ value = _slugify_strip_re.sub('', value).strip().lower()
+ return _slugify_hyphenate_re.sub('-', value)
+## end of http://code.activestate.com/recipes/577257/ }}}
+
+
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
@@ -1583,7 +2031,7 @@ def _regex_from_encoded_pattern(s):
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
- % (char, s, ''.join(flag_from_char.keys())))
+ % (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
@@ -1603,8 +2051,8 @@ def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""
DEBUG = False
if DEBUG:
- print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
- % (tabsize, skip_first_line)
+ print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
+ % (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
@@ -1621,12 +2069,12 @@ def _dedentlines(lines, tabsize=8, skip_first_line=False):
break
else:
continue # skip all-whitespace lines
- if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
+ if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
- if DEBUG: print "dedent: margin=%r" % margin
+ if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
@@ -1638,7 +2086,7 @@ def _dedentlines(lines, tabsize=8, skip_first_line=False):
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
- if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
+ if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
@@ -1646,8 +2094,8 @@ def _dedentlines(lines, tabsize=8, skip_first_line=False):
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
- print "dedent: %r: %r -> removed %d/%d"\
- % (line, ch, removed, margin)
+ print("dedent: %r: %r -> removed %d/%d"\
+ % (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
@@ -1741,6 +2189,22 @@ def _hr_tag_re_from_tab_width(tab_width):
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
+def _xml_escape_attr(attr, skip_single_quote=True):
+ """Escape the given string for use in an HTML/XML tag attribute.
+
+ By default this doesn't bother with escaping `'` to `'`, presuming that
+ the tag attribute is surrounded by double quotes.
+ """
+ escaped = (attr
+ .replace('&', '&')
+ .replace('"', '"')
+ .replace('<', '<')
+ .replace('>', '>'))
+ if not skip_single_quote:
+ escaped = escaped.replace("'", "'")
+ return escaped
+
+
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
@@ -1791,17 +2255,11 @@ def main(argv=None):
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
- "the core Markdown spec). Supported values: "
- "'code-friendly' disables _/__ for emphasis; "
- "'code-color' adds code-block syntax coloring; "
- "'link-patterns' adds auto-linking based on patterns; "
- "'footnotes' adds the footnotes syntax;"
- "'xml' passes one-liner processing instructions and namespaced XML tags;"
- "'pyshell' to put unindented Python interactive shell sessions in a block.")
+ "the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
- ".")
+ "")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
@@ -1855,22 +2313,41 @@ def main(argv=None):
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
+ if not paths:
+ paths = ['-']
for path in paths:
+ if path == '-':
+ text = sys.stdin.read()
+ else:
+ fp = codecs.open(path, 'r', opts.encoding)
+ text = fp.read()
+ fp.close()
if opts.compare:
- print "==== Markdown.pl ===="
- perl_cmd = 'perl %s "%s"' % (markdown_pl, path)
- o = os.popen(perl_cmd)
- perl_html = o.read()
- o.close()
- sys.stdout.write(perl_html)
- print "==== markdown2.py ===="
- html = markdown_path(path, encoding=opts.encoding,
- html4tags=opts.html4tags,
- safe_mode=opts.safe_mode,
- extras=extras, link_patterns=link_patterns,
- use_file_vars=opts.use_file_vars)
- sys.stdout.write(
- html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ from subprocess import Popen, PIPE
+ print("==== Markdown.pl ====")
+ p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
+ p.stdin.write(text.encode('utf-8'))
+ p.stdin.close()
+ perl_html = p.stdout.read().decode('utf-8')
+ if py3:
+ sys.stdout.write(perl_html)
+ else:
+ sys.stdout.write(perl_html.encode(
+ sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ print("==== markdown2.py ====")
+ html = markdown(text,
+ html4tags=opts.html4tags,
+ safe_mode=opts.safe_mode,
+ extras=extras, link_patterns=link_patterns,
+ use_file_vars=opts.use_file_vars)
+ if py3:
+ sys.stdout.write(html)
+ else:
+ sys.stdout.write(html.encode(
+ sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ if extras and "toc" in extras:
+ log.debug("toc_html: " +
+ html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
@@ -1881,10 +2358,8 @@ def main(argv=None):
else:
norm_html = html
norm_perl_html = perl_html
- print "==== match? %r ====" % (norm_perl_html == norm_html)
+ print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit( main(sys.argv) )
-
-
diff --git a/gluon/contrib/memcache/memcache.py b/gluon/contrib/memcache/memcache.py
index 706f5448..9bc61682 100644
--- a/gluon/contrib/memcache/memcache.py
+++ b/gluon/contrib/memcache/memcache.py
@@ -1,12 +1,12 @@
#!/usr/bin/env python
-"""
-client module for memcached (memory cache daemon)
+"""client module for memcached (memory cache daemon)
Overview
========
-See U{the MemCached homepage} for more about memcached.
+See U{the MemCached homepage} for more
+about memcached.
Usage summary
=============
@@ -22,11 +22,12 @@ This should give you a feel for how this module operates::
mc.set("another_key", 3)
mc.delete("another_key")
- mc.set("key", "1") # note that the key used for incr/decr must be a string.
+ mc.set("key", "1") # note that the key used for incr/decr must be
+ # a string.
mc.incr("key")
mc.decr("key")
-The standard way to use memcache with a database is like this::
+The standard way to use memcache with a database is like this:
key = derive_key(obj)
obj = mc.get(key)
@@ -41,27 +42,35 @@ Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
+
"""
-import sys
-import socket
-import time
-import os
-import re
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
+from __future__ import print_function
+
+import binascii
+import os
+import pickle
+import re
+import socket
+import sys
+import threading
+import time
+import zlib
+
+import six
+
-from binascii import crc32 # zlib version is not cross-platform
def cmemcache_hash(key):
- return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1)
+ return (
+ (((binascii.crc32(key.encode('ascii')) & 0xffffffff)
+ >> 16) & 0x7fff) or 1)
serverHashFunction = cmemcache_hash
+
def useOldServerHashFunction():
"""Use the old python-memcache server hash function."""
global serverHashFunction
- serverHashFunction = crc32
+ serverHashFunction = binascii.crc32
try:
from zlib import compress, decompress
@@ -69,27 +78,40 @@ try:
except ImportError:
_supports_compress = False
# quickly define a decompress just in case we recv compressed data.
+
def decompress(val):
- raise _Error("received compressed data but I don't support compression (import error)")
+ raise _Error(
+ "Received compressed data but I don't support "
+ "compression (import error)")
+
+from io import BytesIO
+try:
+ unicode
+except NameError:
+ _has_unicode = False
+else:
+ _has_unicode = True
try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
+ _str_cls = basestring
+except NameError:
+ _str_cls = str
+
+valid_key_chars_re = re.compile('[\x21-\x7e\x80-\xff]+$')
# Original author: Evan Martin of Danga Interactive
-__author__ = "Sean Reifschneider "
-__version__ = "1.48"
+__author__ = "Sean Reifschneider "
+__version__ = "1.53"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
# http://en.wikipedia.org/wiki/Python_Software_Foundation_License
-__license__ = "Python Software Foundation License"
+__license__ = "Python Software Foundation License"
SERVER_MAX_KEY_LENGTH = 250
-# Storing values larger than 1MB requires recompiling memcached. If you do,
-# this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N"
-# after importing this module.
-SERVER_MAX_VALUE_LENGTH = 1024*1024
+# Storing values larger than 1MB requires recompiling memcached. If
+# you do, this value can be changed by doing
+# "memcache.SERVER_MAX_VALUE_LENGTH = N" after importing this module.
+SERVER_MAX_VALUE_LENGTH = 1024 * 1024
class _Error(Exception):
@@ -100,102 +122,119 @@ class _ConnectionDeadError(Exception):
pass
-try:
- # Only exists in Python 2.4+
- from threading import local
-except ImportError:
- # TODO: add the pure-python local implementation
- class local(object):
- pass
-
-
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
-_SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
+_SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
-class Client(local):
- """
- Object representing a pool of memcache servers.
+class Client(threading.local):
+ """Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
- 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
- making this module calculate a hash value. You may prefer, for
- example, to keep all of a given user's objects on the same memcache
- server, so you could use the user's unique id as the hash value.
+ 2. A tuple of C{(hashvalue, key)}. This is useful if you want
+ to avoid making this module calculate a hash value. You may
+ prefer, for example, to keep all of a given user's objects on
+ the same memcache server, so you could use the user's unique
+ id as the hash value.
- @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
+
+ @group Setup: __init__, set_servers, forget_dead_hosts,
+ disconnect_all, debuglog
@group Insertion: set, add, replace, set_multi
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete, delete_multi
- @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
- set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
+ @sort: __init__, set_servers, forget_dead_hosts, disconnect_all,
+ debuglog,\ set, set_multi, add, replace, get, get_multi,
+ incr, decr, delete, delete_multi
"""
- _FLAG_PICKLE = 1<<0
- _FLAG_INTEGER = 1<<1
- _FLAG_LONG = 1<<2
- _FLAG_COMPRESSED = 1<<3
+ _FLAG_PICKLE = 1 << 0
+ _FLAG_INTEGER = 1 << 1
+ _FLAG_LONG = 1 << 2
+ _FLAG_COMPRESSED = 1 << 3
_SERVER_RETRIES = 10 # how many times to try finding a free server.
# exceptions for Client
class MemcachedKeyError(Exception):
pass
+
class MemcachedKeyLengthError(MemcachedKeyError):
pass
+
class MemcachedKeyCharacterError(MemcachedKeyError):
pass
+
class MemcachedKeyNoneError(MemcachedKeyError):
pass
+
class MemcachedKeyTypeError(MemcachedKeyError):
pass
+
class MemcachedStringEncodingError(Exception):
pass
def __init__(self, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
- server_max_key_length=SERVER_MAX_KEY_LENGTH,
- server_max_value_length=SERVER_MAX_VALUE_LENGTH,
+ server_max_key_length=None, server_max_value_length=None,
dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT,
- cache_cas = False):
- """
- Create a new Client object with the given list of servers.
+ cache_cas=False, flush_on_reconnect=0, check_keys=True):
+ """Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
- @param debug: whether to display error messages when a server can't be
- contacted.
- @param pickleProtocol: number to mandate protocol used by (c)Pickle.
- @param pickler: optional override of default Pickler to allow subclassing.
- @param unpickler: optional override of default Unpickler to allow subclassing.
- @param pload: optional persistent_load function to call on pickle loading.
- Useful for cPickle since subclassing isn't allowed.
- @param pid: optional persistent_id function to call on pickle storing.
- Useful for cPickle since subclassing isn't allowed.
- @param dead_retry: number of seconds before retrying a blacklisted
- server. Default to 30 s.
- @param socket_timeout: timeout in seconds for all calls to a server. Defaults
- to 3 seconds.
- @param cache_cas: (default False) If true, cas operations will be
- cached. WARNING: This cache is not expired internally, if you have
- a long-running process you will need to expire it manually via
- "client.reset_cas(), or the cache can grow unlimited.
+ @param debug: whether to display error messages when a server
+ can't be contacted.
+ @param pickleProtocol: number to mandate protocol used by
+ (c)Pickle.
+ @param pickler: optional override of default Pickler to allow
+ subclassing.
+ @param unpickler: optional override of default Unpickler to
+ allow subclassing.
+ @param pload: optional persistent_load function to call on
+ pickle loading. Useful for cPickle since subclassing isn't
+ allowed.
+ @param pid: optional persistent_id function to call on pickle
+ storing. Useful for cPickle since subclassing isn't allowed.
+ @param dead_retry: number of seconds before retrying a
+ blacklisted server. Default to 30 s.
+ @param socket_timeout: timeout in seconds for all calls to a
+ server. Defaults to 3 seconds.
+ @param cache_cas: (default False) If true, cas operations will
+ be cached. WARNING: This cache is not expired internally, if
+ you have a long-running process you will need to expire it
+ manually via client.reset_cas(), or the cache can grow
+ unlimited.
@param server_max_key_length: (default SERVER_MAX_KEY_LENGTH)
Data that is larger than this will not be sent to the server.
- @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH)
- Data that is larger than this will not be sent to the server.
+ @param server_max_value_length: (default
+ SERVER_MAX_VALUE_LENGTH) Data that is larger than this will
+ not be sent to the server.
+ @param flush_on_reconnect: optional flag which prevents a
+ scenario that can cause stale data to be read: If there's more
+ than one memcached server and the connection to one is
+ interrupted, keys that mapped to that server will get
+ reassigned to another. If the first server comes back, those
+ keys will map to it again. If it still has its data, get()s
+ can read stale data that was overwritten on another
+ server. This flag is off by default for backwards
+ compatibility.
+ @param check_keys: (default True) If True, the key is checked
+ to ensure it is the correct length and composed of the right
+ characters.
"""
- local.__init__(self)
+ super(Client, self).__init__()
self.debug = debug
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
+ self.flush_on_reconnect = flush_on_reconnect
self.set_servers(servers)
self.stats = {}
self.cache_cas = cache_cas
self.reset_cas()
+ self.do_check_key = check_keys
# Allow users to modify pickling/unpickling behavior
self.pickleProtocol = pickleProtocol
@@ -204,69 +243,80 @@ class Client(local):
self.persistent_load = pload
self.persistent_id = pid
self.server_max_key_length = server_max_key_length
+ if self.server_max_key_length is None:
+ self.server_max_key_length = SERVER_MAX_KEY_LENGTH
self.server_max_value_length = server_max_value_length
+ if self.server_max_value_length is None:
+ self.server_max_value_length = SERVER_MAX_VALUE_LENGTH
# figure out the pickler style
- file = StringIO()
+ file = BytesIO()
try:
- pickler = self.pickler(file, protocol = self.pickleProtocol)
+ pickler = self.pickler(file, protocol=self.pickleProtocol)
self.picklerIsKeyword = True
except TypeError:
self.picklerIsKeyword = False
def reset_cas(self):
- """
- Reset the cas cache. This is only used if the Client() object
- was created with "cache_cas=True". If used, this cache does not
- expire internally, so it can grow unbounded if you do not clear it
+ """Reset the cas cache.
+
+ This is only used if the Client() object was created with
+ "cache_cas=True". If used, this cache does not expire
+ internally, so it can grow unbounded if you do not clear it
yourself.
"""
self.cas_ids = {}
-
def set_servers(self, servers):
- """
- Set the pool of servers used by this client.
+ """Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
- 1. Strings of the form C{"host:port"}, which implies a default weight of 1.
- 2. Tuples of the form C{("host:port", weight)}, where C{weight} is
- an integer weight value.
+ 1. Strings of the form C{"host:port"}, which implies a
+ default weight of 1.
+ 2. Tuples of the form C{("host:port", weight)}, where
+ C{weight} is an integer weight value.
+
"""
self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,
- socket_timeout=self.socket_timeout)
+ socket_timeout=self.socket_timeout,
+ flush_on_reconnect=self.flush_on_reconnect)
for s in servers]
self._init_buckets()
- def get_stats(self, stat_args = None):
- '''Get statistics from each of the servers.
+ def get_stats(self, stat_args=None):
+ """Get statistics from each of the servers.
@param stat_args: Additional arguments to pass to the memcache
"stats" command.
- @return: A list of tuples ( server_identifier, stats_dictionary ).
- The dictionary contains a number of name/value pairs specifying
- the name of the status field and the string value associated with
- it. The values are not converted from strings.
- '''
+ @return: A list of tuples ( server_identifier,
+ stats_dictionary ). The dictionary contains a number of
+ name/value pairs specifying the name of the status field
+ and the string value associated with it. The values are
+ not converted from strings.
+ """
data = []
for s in self.servers:
- if not s.connect(): continue
+ if not s.connect():
+ continue
if s.family == socket.AF_INET:
- name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
+ name = '%s:%s (%s)' % (s.ip, s.port, s.weight)
+ elif s.family == socket.AF_INET6:
+ name = '[%s]:%s (%s)' % (s.ip, s.port, s.weight)
else:
- name = 'unix:%s (%s)' % ( s.address, s.weight )
+ name = 'unix:%s (%s)' % (s.address, s.weight)
if not stat_args:
s.send_cmd('stats')
else:
s.send_cmd('stats ' + stat_args)
serverData = {}
- data.append(( name, serverData ))
+ data.append((name, serverData))
readline = s.readline
while 1:
line = readline()
- if not line or line.strip() == 'END': break
+ if not line or line.strip() == 'END':
+ break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
@@ -275,33 +325,37 @@ class Client(local):
def get_slabs(self):
data = []
for s in self.servers:
- if not s.connect(): continue
+ if not s.connect():
+ continue
if s.family == socket.AF_INET:
- name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
+ name = '%s:%s (%s)' % (s.ip, s.port, s.weight)
+ elif s.family == socket.AF_INET6:
+ name = '[%s]:%s (%s)' % (s.ip, s.port, s.weight)
else:
- name = 'unix:%s (%s)' % ( s.address, s.weight )
+ name = 'unix:%s (%s)' % (s.address, s.weight)
serverData = {}
- data.append(( name, serverData ))
+ data.append((name, serverData))
s.send_cmd('stats items')
readline = s.readline
while 1:
line = readline()
- if not line or line.strip() == 'END': break
+ if not line or line.strip() == 'END':
+ break
item = line.split(' ', 2)
- #0 = STAT, 1 = ITEM, 2 = Value
+ # 0 = STAT, 1 = ITEM, 2 = Value
slab = item[1].split(':', 2)
- #0 = items, 1 = Slab #, 2 = Name
+ # 0 = items, 1 = Slab #, 2 = Name
if slab[1] not in serverData:
serverData[slab[1]] = {}
serverData[slab[1]][slab[2]] = item[2]
return data
def flush_all(self):
- 'Expire all data currently in the memcache servers.'
+ """Expire all data in memcache servers that are reachable."""
for s in self.servers:
- if not s.connect(): continue
- s.send_cmd('flush_all')
- s.expect("OK")
+ if not s.connect():
+ continue
+ s.flush()
def debuglog(self, str):
if self.debug:
@@ -314,9 +368,7 @@ class Client(local):
self.stats[func] += 1
def forget_dead_hosts(self):
- """
- Reset every host in the pool to an "alive" state.
- """
+ """Reset every host in the pool to an "alive" state."""
for s in self.servers:
s.deaduntil = 0
@@ -332,10 +384,13 @@ class Client(local):
else:
serverhash = serverHashFunction(key)
+ if not self.buckets:
+ return None, None
+
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
- #print "(using server %s)" % server,
+ # print("(using server %s)" % server,)
return server, key
serverhash = serverHashFunction(str(serverhash) + str(i))
return None, None
@@ -345,54 +400,54 @@ class Client(local):
s.close_socket()
def delete_multi(self, keys, time=0, key_prefix=''):
- '''
- Delete multiple keys in the memcache doing just one query.
+ """Delete multiple keys in the memcache doing just one query.
- >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
- >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
+ >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'})
+ >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'}
1
>>> mc.delete_multi(['key1', 'key2'])
1
>>> mc.get_multi(['key1', 'key2']) == {}
1
-
- This method is recommended over iterated regular L{delete}s as it reduces total latency, since
- your app doesn't have to wait for each round-trip of L{delete} before sending
- the next one.
+ This method is recommended over iterated regular L{delete}s as
+ it reduces total latency, since your app doesn't have to wait
+ for each round-trip of L{delete} before sending the next one.
@param keys: An iterable of keys to clear
- @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
- @param key_prefix: Optional string to prepend to each key when sending to memcache.
- See docs for L{get_multi} and L{set_multi}.
-
+ @param time: number of seconds any subsequent set / update
+ commands should fail. Defaults to 0 for no delay.
+ @param key_prefix: Optional string to prepend to each key when
+ sending to memcache. See docs for L{get_multi} and
+ L{set_multi}.
@return: 1 if no failure in communication with any memcacheds.
@rtype: int
-
- '''
+ """
self._statlog('delete_multi')
- server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
+ server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
+ keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
rc = 1
- for server in server_keys.iterkeys():
+ for server in six.iterkeys(server_keys):
bigcmd = []
write = bigcmd.append
- if time != None:
- for key in server_keys[server]: # These are mangled keys
- write("delete %s %d\r\n" % (key, time))
+ if time is not None:
+ for key in server_keys[server]: # These are mangled keys
+ write("delete %s %d\r\n" % (key, time))
else:
- for key in server_keys[server]: # These are mangled keys
- write("delete %s\r\n" % key)
+ for key in server_keys[server]: # These are mangled keys
+ write("delete %s\r\n" % key)
try:
server.send_cmds(''.join(bigcmd))
- except socket.error, msg:
+ except socket.error as msg:
rc = 0
- if isinstance(msg, tuple): msg = msg[1]
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
@@ -400,12 +455,13 @@ class Client(local):
for server in dead_servers:
del server_keys[server]
- for server, keys in server_keys.iteritems():
+ for server, keys in six.iteritems(server_keys):
try:
for key in keys:
server.expect("DELETED")
- except socket.error, msg:
- if isinstance(msg, tuple): msg = msg[1]
+ except socket.error as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
rc = 0
return rc
@@ -418,36 +474,57 @@ class Client(local):
should fail. Defaults to None for no delay.
@rtype: int
'''
- self.check_key(key)
+ return self._deletetouch(['DELETED', 'NOT_FOUND'], "delete", key, time)
+
+ def touch(self, key, time=0):
+ '''Updates the expiration time of a key in memcache.
+
+ @return: Nonzero on success.
+ @param time: Tells memcached the time which this value should
+ expire, either as a delta number of seconds, or an absolute
+ unix time-since-the-epoch value. See the memcached protocol
+ docs section "Storage Commands" for more info on . We
+ default to 0 == cache forever.
+ @rtype: int
+ '''
+ return self._deletetouch(['TOUCHED'], "touch", key, time)
+
+ def _deletetouch(self, expected, cmd, key, time=0):
+ if self.do_check_key:
+ self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
- self._statlog('delete')
- if time != None and time != 0:
- cmd = "delete %s %d" % (key, time)
+ self._statlog(cmd)
+ if time is not None and time != 0:
+ cmd = "%s %s %d" % (cmd, key, time)
else:
- cmd = "delete %s" % key
+ cmd = "%s %s" % (cmd, key)
try:
server.send_cmd(cmd)
line = server.readline()
- if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1
- self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s'
- % repr(line))
- except socket.error, msg:
- if isinstance(msg, tuple): msg = msg[1]
+ if line and line.strip() in expected:
+ return 1
+ self.debuglog('%s expected %s, got: %r'
+ % (cmd, ' or '.join(expected), line))
+ except socket.error as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
return 0
def incr(self, key, delta=1):
- """
- Sends a command to the server to atomically increment the value
- for C{key} by C{delta}, or by 1 if C{delta} is unspecified.
- Returns None if C{key} doesn't exist on server, otherwise it
- returns the new value after incrementing.
+ """Increment value for C{key} by C{delta}
- Note that the value for C{key} must already exist in the memcache,
- and it must be the string representation of an integer.
+ Sends a command to the server to atomically increment the
+ value for C{key} by C{delta}, or by 1 if C{delta} is
+ unspecified. Returns None if C{key} doesn't exist on server,
+ otherwise it returns the new value after incrementing.
+
+ Note that the value for C{key} must already exist in the
+ memcache, and it must be the string representation of an
+ integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
@@ -456,49 +533,57 @@ class Client(local):
>>> mc.incr("counter")
22
- Overflow on server is not checked. Be aware of values approaching
- 2**32. See L{decr}.
+ Overflow on server is not checked. Be aware of values
+ approaching 2**32. See L{decr}.
+
+ @param delta: Integer amount to increment by (should be zero
+ or greater).
- @param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
- """
- Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
- new values are capped at 0. If server value is 1, a decrement of 2
- returns 0, not -1.
+ """Decrement value for C{key} by C{delta}
- @param delta: Integer amount to decrement by (should be zero or greater).
- @return: New value after decrementing.
+ Like L{incr}, but decrements. Unlike L{incr}, underflow is
+ checked and new values are capped at 0. If server value is 1,
+ a decrement of 2 returns 0, not -1.
+
+ @param delta: Integer amount to decrement by (should be zero
+ or greater).
+
+ @return: New value after decrementing or None on error.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
- self.check_key(key)
+ if self.do_check_key:
+ self.check_key(key)
server, key = self._get_server(key)
if not server:
- return 0
+ return None
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
- if line == None or line.strip() =='NOT_FOUND': return None
+ if line is None or line.strip() == 'NOT_FOUND':
+ return None
return int(line)
- except socket.error, msg:
- if isinstance(msg, tuple): msg = msg[1]
+ except socket.error as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
return None
- def add(self, key, val, time = 0, min_compress_len = 0):
- '''
- Add new key with value.
+ def add(self, key, val, time=0, min_compress_len=0):
+ '''Add new key with value.
- Like L{set}, but only stores in memcache if the key doesn't already exist.
+ Like L{set}, but only stores in memcache if the key doesn't
+ already exist.
@return: Nonzero on success.
@rtype: int
@@ -542,68 +627,76 @@ class Client(local):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
- being the server hash value and the second being the key.
- If you want to avoid making this module calculate a hash value.
- You may prefer, for example, to keep all of a given user's objects
- on the same memcache server, so you could use the user's unique
- id as the hash value.
+ being the server hash value and the second being the key. If
+ you want to avoid making this module calculate a hash value.
+ You may prefer, for example, to keep all of a given user's
+ objects on the same memcache server, so you could use the
+ user's unique id as the hash value.
@return: Nonzero on success.
@rtype: int
- @param time: Tells memcached the time which this value should expire, either
- as a delta number of seconds, or an absolute unix time-since-the-epoch
- value. See the memcached protocol docs section "Storage Commands"
- for more info on . We default to 0 == cache forever.
- @param min_compress_len: The threshold length to kick in auto-compression
- of the value using the zlib.compress() routine. If the value being cached is
- a string, then the length of the string is measured, else if the value is an
- object, then the length of the pickle result is measured. If the resulting
- attempt at compression yeilds a larger string than the input, then it is
- discarded. For backwards compatability, this parameter defaults to 0,
- indicating don't ever try to compress.
+
+ @param time: Tells memcached the time which this value should
+ expire, either as a delta number of seconds, or an absolute
+ unix time-since-the-epoch value. See the memcached protocol
+ docs section "Storage Commands" for more info on . We
+ default to 0 == cache forever.
+
+ @param min_compress_len: The threshold length to kick in
+ auto-compression of the value using the zlib.compress()
+ routine. If the value being cached is a string, then the
+ length of the string is measured, else if the value is an
+ object, then the length of the pickle result is measured. If
+ the resulting attempt at compression yeilds a larger string
+ than the input, then it is discarded. For backwards
+ compatability, this parameter defaults to 0, indicating don't
+ ever try to compress.
+
'''
return self._set("set", key, val, time, min_compress_len)
-
def cas(self, key, val, time=0, min_compress_len=0):
- '''Sets a key to a given value in the memcache if it hasn't been
+ '''Check and set (CAS)
+
+ Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
- being the server hash value and the second being the key.
- If you want to avoid making this module calculate a hash value.
- You may prefer, for example, to keep all of a given user's objects
- on the same memcache server, so you could use the user's unique
- id as the hash value.
+ being the server hash value and the second being the key. If
+ you want to avoid making this module calculate a hash value.
+ You may prefer, for example, to keep all of a given user's
+ objects on the same memcache server, so you could use the
+ user's unique id as the hash value.
@return: Nonzero on success.
@rtype: int
- @param time: Tells memcached the time which this value should expire,
- either as a delta number of seconds, or an absolute unix
- time-since-the-epoch value. See the memcached protocol docs section
- "Storage Commands" for more info on . We default to
- 0 == cache forever.
+
+ @param time: Tells memcached the time which this value should
+ expire, either as a delta number of seconds, or an absolute
+ unix time-since-the-epoch value. See the memcached protocol
+ docs section "Storage Commands" for more info on . We
+ default to 0 == cache forever.
+
@param min_compress_len: The threshold length to kick in
- auto-compression of the value using the zlib.compress() routine. If
- the value being cached is a string, then the length of the string is
- measured, else if the value is an object, then the length of the
- pickle result is measured. If the resulting attempt at compression
- yeilds a larger string than the input, then it is discarded. For
- backwards compatability, this parameter defaults to 0, indicating
- don't ever try to compress.
+ auto-compression of the value using the zlib.compress()
+ routine. If the value being cached is a string, then the
+ length of the string is measured, else if the value is an
+ object, then the length of the pickle result is measured. If
+ the resulting attempt at compression yeilds a larger string
+ than the input, then it is discarded. For backwards
+ compatability, this parameter defaults to 0, indicating don't
+ ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len)
-
def _map_and_prefix_keys(self, key_iterable, key_prefix):
- """Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
- prefixed key -> original key.
-
-
+ """Compute the mapping of server (_Host instance) -> list of keys to
+ stuff onto that server, as well as the mapping of prefixed key
+ -> original key.
"""
# Check it just once ...
- key_extra_len=len(key_prefix)
- if key_prefix:
+ key_extra_len = len(key_prefix)
+ if key_prefix and self.do_check_key:
self.check_key(key_prefix)
# server (_Host) -> list of unprefixed server keys in mapping
@@ -613,16 +706,23 @@ class Client(local):
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if isinstance(orig_key, tuple):
- # Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
+ # Tuple of hashvalue, key ala _get_server(). Caller is
+ # essentially telling us what server to stuff this on.
# Ensure call to _get_server gets a Tuple as well.
str_orig_key = str(orig_key[1])
- server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
+
+ # Gotta pre-mangle key before hashing to a
+ # server. Returns the mangled key.
+ server, key = self._get_server(
+ (orig_key[0], key_prefix + str_orig_key))
else:
- str_orig_key = str(orig_key) # set_multi supports int / long keys.
+ # set_multi supports int / long keys.
+ str_orig_key = str(orig_key)
server, key = self._get_server(key_prefix + str_orig_key)
# Now check to make sure key length is proper ...
- self.check_key(str_orig_key, key_extra_len=key_extra_len)
+ if self.do_check_key:
+ self.check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
@@ -635,70 +735,92 @@ class Client(local):
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
- '''
- Sets multiple keys in the memcache doing just one query.
+ '''Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
- >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
+ >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1',
+ ... 'key2' : 'val2'}
1
- This method is recommended over regular L{set} as it lowers the number of
- total packets flying around your network, reducing total latency, since
- your app doesn't have to wait for each round-trip of L{set} before sending
- the next one.
+ This method is recommended over regular L{set} as it lowers
+ the number of total packets flying around your network,
+ reducing total latency, since your app doesn't have to wait
+ for each round-trip of L{set} before sending the next one.
@param mapping: A dict of key/value pairs to set.
- @param time: Tells memcached the time which this value should expire, either
- as a delta number of seconds, or an absolute unix time-since-the-epoch
- value. See the memcached protocol docs section "Storage Commands"
- for more info on . We default to 0 == cache forever.
- @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
- >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
+
+ @param time: Tells memcached the time which this value should
+ expire, either as a delta number of seconds, or an
+ absolute unix time-since-the-epoch value. See the
+ memcached protocol docs section "Storage Commands" for
+ more info on . We default to 0 == cache forever.
+
+ @param key_prefix: Optional string to prepend to each key when
+ sending to memcache. Allows you to efficiently stuff these
+ keys into a pseudo-namespace in memcache:
+
+ >>> notset_keys = mc.set_multi(
+ ... {'key1' : 'val1', 'key2' : 'val2'},
+ ... key_prefix='subspace_')
>>> len(notset_keys) == 0
True
- >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
+ >>> mc.get_multi(['subspace_key1',
+ ... 'subspace_key2']) == {'subspace_key1': 'val1',
+ ... 'subspace_key2' : 'val2'}
True
- Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
- In this case, the return result would be the list of notset original keys, prefix not applied.
+ Causes key 'subspace_key1' and 'subspace_key2' to be
+ set. Useful in conjunction with a higher-level layer which
+ applies namespaces to data in memcache. In this case, the
+ return result would be the list of notset original keys,
+ prefix not applied.
+
+ @param min_compress_len: The threshold length to kick in
+ auto-compression of the value using the zlib.compress()
+ routine. If the value being cached is a string, then the
+ length of the string is measured, else if the value is an
+ object, then the length of the pickle result is
+ measured. If the resulting attempt at compression yeilds a
+ larger string than the input, then it is discarded. For
+ backwards compatability, this parameter defaults to 0,
+ indicating don't ever try to compress.
+
+ @return: List of keys which failed to be stored [ memcache out
+ of memory, etc. ].
- @param min_compress_len: The threshold length to kick in auto-compression
- of the value using the zlib.compress() routine. If the value being cached is
- a string, then the length of the string is measured, else if the value is an
- object, then the length of the pickle result is measured. If the resulting
- attempt at compression yeilds a larger string than the input, then it is
- discarded. For backwards compatability, this parameter defaults to 0,
- indicating don't ever try to compress.
- @return: List of keys which failed to be stored [ memcache out of memory, etc. ].
@rtype: list
-
'''
-
self._statlog('set_multi')
- server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
+ server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
+ six.iterkeys(mapping), key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
- notstored = [] # original keys.
+ notstored = [] # original keys.
- for server in server_keys.iterkeys():
+ for server in six.iterkeys(server_keys):
bigcmd = []
write = bigcmd.append
try:
- for key in server_keys[server]: # These are mangled keys
+ for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(
- mapping[prefixed_to_orig_key[key]],
- min_compress_len)
+ mapping[prefixed_to_orig_key[key]],
+ min_compress_len)
if store_info:
- write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0],
- time, store_info[1], store_info[2]))
+ msg = "set %s %d %d %d\r\n%s\r\n"
+ write(msg % (key,
+ store_info[0],
+ time,
+ store_info[1],
+ store_info[2]))
else:
notstored.append(prefixed_to_orig_key[key])
server.send_cmds(''.join(bigcmd))
- except socket.error, msg:
- if isinstance(msg, tuple): msg = msg[1]
+ except socket.error as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
@@ -707,24 +829,28 @@ class Client(local):
del server_keys[server]
# short-circuit if there are no servers, just return all keys
- if not server_keys: return(mapping.keys())
+ if not server_keys:
+ return(mapping.keys())
- for server, keys in server_keys.iteritems():
+ for server, keys in six.iteritems(server_keys):
try:
for key in keys:
- line = server.readline()
- if line == 'STORED':
+ if server.readline() == 'STORED':
continue
else:
- notstored.append(prefixed_to_orig_key[key]) #un-mangle.
- except (_Error, socket.error), msg:
- if isinstance(msg, tuple): msg = msg[1]
+ # un-mangle.
+ notstored.append(prefixed_to_orig_key[key])
+ except (_Error, socket.error) as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
- """
- Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
+ """Transform val to a storable representation.
+
+ Returns a tuple of the flags, the length of the new value, and
+ the new value itself.
"""
flags = 0
if isinstance(val, str):
@@ -741,9 +867,9 @@ class Client(local):
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
- file = StringIO()
+ file = BytesIO()
if self.picklerIsKeyword:
- pickler = self.pickler(file, protocol = self.pickleProtocol)
+ pickler = self.pickler(file, protocol=self.pickleProtocol)
else:
pickler = self.pickler(file, self.pickleProtocol)
if self.persistent_id:
@@ -752,10 +878,11 @@ class Client(local):
val = file.getvalue()
lv = len(val)
- # We should try to compress if min_compress_len > 0 and we could
- # import zlib and this string is longer than our min threshold.
- if min_compress_len and _supports_compress and lv > min_compress_len:
- comp_val = compress(val)
+ # We should try to compress if min_compress_len > 0 and we
+ # could import zlib and this string is longer than our min
+ # threshold.
+ if min_compress_len and lv > min_compress_len:
+ comp_val = zlib.compress(val)
# Only retain the result if the compression result is smaller
# than the original.
if len(comp_val) < lv:
@@ -763,13 +890,15 @@ class Client(local):
val = comp_val
# silently do not store if value length exceeds maximum
- if self.server_max_value_length != 0 and \
- len(val) > self.server_max_value_length: return(0)
+ if (self.server_max_value_length != 0 and
+ len(val) > self.server_max_value_length):
+ return(0)
return (flags, len(val), val)
- def _set(self, cmd, key, val, time, min_compress_len = 0):
- self.check_key(key)
+ def _set(self, cmd, key, val, time, min_compress_len=0):
+ if self.do_check_key:
+ self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
@@ -778,23 +907,28 @@ class Client(local):
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
- if not store_info: return(0)
+ if not store_info:
+ return(0)
if cmd == 'cas':
if key not in self.cas_ids:
return self._set('set', key, val, time, min_compress_len)
fullcmd = "%s %s %d %d %d %d\r\n%s" % (
- cmd, key, store_info[0], time, store_info[1],
- self.cas_ids[key], store_info[2])
+ cmd, key, store_info[0], time, store_info[1],
+ self.cas_ids[key], store_info[2])
else:
fullcmd = "%s %s %d %d %d\r\n%s" % (
- cmd, key, store_info[0], time, store_info[1], store_info[2])
+ cmd, key, store_info[0],
+ time, store_info[1], store_info[2]
+ )
try:
server.send_cmd(fullcmd)
- return(server.expect("STORED") == "STORED")
- except socket.error, msg:
- if isinstance(msg, tuple): msg = msg[1]
+ return(server.expect("STORED", raise_exception=True)
+ == "STORED")
+ except socket.error as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
return 0
@@ -803,14 +937,15 @@ class Client(local):
except _ConnectionDeadError:
# retry once
try:
- server._get_socket()
- return _unsafe_set()
- except (_ConnectionDeadError, socket.error), msg:
+ if server._get_socket():
+ return _unsafe_set()
+ except (_ConnectionDeadError, socket.error) as msg:
server.mark_dead(msg)
return 0
def _get(self, cmd, key):
- self.check_key(key)
+ if self.do_check_key:
+ self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
@@ -823,20 +958,25 @@ class Client(local):
rkey = flags = rlen = cas_id = None
if cmd == 'gets':
- rkey, flags, rlen, cas_id, = self._expect_cas_value(server)
+ rkey, flags, rlen, cas_id, = self._expect_cas_value(
+ server, raise_exception=True
+ )
if rkey and self.cache_cas:
self.cas_ids[rkey] = cas_id
else:
- rkey, flags, rlen, = self._expectvalue(server)
+ rkey, flags, rlen, = self._expectvalue(
+ server, raise_exception=True
+ )
if not rkey:
return None
try:
value = self._recv_value(server, flags, rlen)
finally:
- server.expect("END")
- except (_Error, socket.error), msg:
- if isinstance(msg, tuple): msg = msg[1]
+ server.expect("END", raise_exception=True)
+ except (_Error, socket.error) as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
return None
@@ -850,7 +990,7 @@ class Client(local):
if server.connect():
return _unsafe_get()
return None
- except (_ConnectionDeadError, socket.error), msg:
+ except (_ConnectionDeadError, socket.error) as msg:
server.mark_dead(msg)
return None
@@ -869,54 +1009,73 @@ class Client(local):
return self._get('gets', key)
def get_multi(self, keys, key_prefix=''):
- '''
- Retrieves multiple keys from the memcache doing just one query.
+ '''Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
- >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
+ >>> mc.get_multi(["foo", "baz", "foobar"]) == {
+ ... "foo": "bar", "baz": 42
+ ... }
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
- This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
- >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
+ This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict
+ will just have unprefixed keys 'k1', 'k2'.
+
+ >>> mc.get_multi(['k1', 'k2', 'nonexist'],
+ ... key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
- get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
- They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
- In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
+ get_mult [ and L{set_multi} ] can take str()-ables like ints /
+ longs as keys too. Such as your db pri key fields. They're
+ rotored through str() before being passed off to memcache,
+ with or without the use of a key_prefix. In this mode, the
+ key_prefix could be a table name, and the key itself a db
+ primary key number.
- >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
+ >>> mc.set_multi({42: 'douglass adams',
+ ... 46: 'and 2 just ahead of me'},
+ ... key_prefix='numkeys_') == []
1
- >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
+ >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {
+ ... 42: 'douglass adams',
+ ... 46: 'and 2 just ahead of me'
+ ... }
1
- This method is recommended over regular L{get} as it lowers the number of
- total packets flying around your network, reducing total latency, since
- your app doesn't have to wait for each round-trip of L{get} before sending
- the next one.
+ This method is recommended over regular L{get} as it lowers
+ the number of total packets flying around your network,
+ reducing total latency, since your app doesn't have to wait
+ for each round-trip of L{get} before sending the next one.
See also L{set_multi}.
@param keys: An array of keys.
- @param key_prefix: A string to prefix each key when we communicate with memcache.
- Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
- @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
+ @param key_prefix: A string to prefix each key when we
+ communicate with memcache. Facilitates pseudo-namespaces
+ within memcache. Returned dictionary keys will not have this
+ prefix.
+
+ @return: A dictionary of key/value pairs that were
+ available. If key_prefix was provided, the keys in the retured
+ dictionary will not have it present.
'''
self._statlog('get_multi')
- server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
+ server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
+ keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
- for server in server_keys.iterkeys():
+ for server in six.iterkeys(server_keys):
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
- except socket.error, msg:
- if isinstance(msg, tuple): msg = msg[1]
+ except socket.error as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
@@ -925,7 +1084,7 @@ class Client(local):
del server_keys[server]
retvals = {}
- for server in server_keys.iterkeys():
+ for server in six.iterkeys(server_keys):
try:
line = server.readline()
while line and line != 'END':
@@ -933,16 +1092,18 @@ class Client(local):
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
- retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
+ # un-prefix returned key.
+ retvals[prefixed_to_orig_key[rkey]] = val
line = server.readline()
- except (_Error, socket.error), msg:
- if isinstance(msg, tuple): msg = msg[1]
+ except (_Error, socket.error) as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
server.mark_dead(msg)
return retvals
- def _expect_cas_value(self, server, line=None):
+ def _expect_cas_value(self, server, line=None, raise_exception=False):
if not line:
- line = server.readline()
+ line = server.readline(raise_exception)
if line and line[:5] == 'VALUE':
resp, rkey, flags, len, cas_id = line.split()
@@ -950,9 +1111,9 @@ class Client(local):
else:
return (None, None, None, None)
- def _expectvalue(self, server, line=None):
+ def _expectvalue(self, server, line=None, raise_exception=False):
if not line:
- line = server.readline()
+ line = server.readline(raise_exception)
if line and line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
@@ -963,19 +1124,19 @@ class Client(local):
return (None, None, None)
def _recv_value(self, server, flags, rlen):
- rlen += 2 # include \r\n
+ rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d"
- % (len(buf), rlen))
+ % (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags & Client._FLAG_COMPRESSED:
- buf = decompress(buf)
+ buf = zlib.decompress(buf)
- if flags == 0 or flags == Client._FLAG_COMPRESSED:
+ if flags == 0 or flags == Client._FLAG_COMPRESSED:
# Either a bare string or a compressed string now decompressed...
val = buf
elif flags & Client._FLAG_INTEGER:
@@ -984,21 +1145,25 @@ class Client(local):
val = long(buf)
elif flags & Client._FLAG_PICKLE:
try:
- file = StringIO(buf)
+ file = BytesIO(buf)
unpickler = self.unpickler(file)
if self.persistent_load:
unpickler.persistent_load = self.persistent_load
val = unpickler.load()
- except Exception, e:
+ except Exception as e:
self.debuglog('Pickle error: %s\n' % e)
return None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
+ raise ValueError('Unknown flags on get: %x' % flags)
return val
def check_key(self, key, key_extra_len=0):
- """Checks sanity of key. Fails if:
+ """Checks sanity of key.
+
+ Fails if:
+
Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
Contains control characters (Raises MemcachedKeyCharacterError).
Is not a string (Raises MemcachedStringEncodingError)
@@ -1006,34 +1171,39 @@ class Client(local):
Is not a string (Raises MemcachedKeyError)
Is None (Raises MemcachedKeyError)
"""
- if isinstance(key, tuple): key = key[1]
+ if isinstance(key, tuple):
+ key = key[1]
if not key:
raise Client.MemcachedKeyNoneError("Key is None")
- if isinstance(key, unicode):
+
+ # Make sure we're not a specific unicode type, if we're old enough that
+ # it's a separate type.
+ if _has_unicode is True and isinstance(key, unicode):
raise Client.MemcachedStringEncodingError(
- "Keys must be str()'s, not unicode. Convert your unicode "
- "strings using mystring.encode(charset)!")
+ "Keys must be str()'s, not unicode. Convert your unicode "
+ "strings using mystring.encode(charset)!")
if not isinstance(key, str):
raise Client.MemcachedKeyTypeError("Key must be str()'s")
- if isinstance(key, basestring):
- if self.server_max_key_length != 0 and \
- len(key) + key_extra_len > self.server_max_key_length:
- raise Client.MemcachedKeyLengthError("Key length is > %s"
- % self.server_max_key_length)
- for char in key:
- if ord(char) < 33 or ord(char) == 127:
- raise Client.MemcachedKeyCharacterError(
- "Control characters not allowed")
+ if isinstance(key, _str_cls):
+ if (self.server_max_key_length != 0 and
+ len(key) + key_extra_len > self.server_max_key_length):
+ raise Client.MemcachedKeyLengthError(
+ "Key length is > %s" % self.server_max_key_length
+ )
+ if not valid_key_chars_re.match(key):
+ raise Client.MemcachedKeyCharacterError(
+ "Control characters not allowed")
class _Host(object):
def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY,
- socket_timeout=_SOCKET_TIMEOUT):
+ socket_timeout=_SOCKET_TIMEOUT, flush_on_reconnect=0):
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.debug = debug
+ self.flush_on_reconnect = flush_on_reconnect
if isinstance(host, tuple):
host, self.weight = host
else:
@@ -1041,10 +1211,14 @@ class _Host(object):
# parse the connection string
m = re.match(r'^(?Punix):(?P.*)$', host)
+ if not m:
+ m = re.match(r'^(?Pinet6):'
+ r'\[(?P[^\[\]]+)\](:(?P[0-9]+))?$', host)
if not m:
m = re.match(r'^(?Pinet):'
- r'(?P[^:]+)(:(?P[0-9]+))?$', host)
- if not m: m = re.match(r'^(?P[^:]+)(:(?P[0-9]+))?$', host)
+ r'(?P[^:]+)(:(?P[0-9]+))?$', host)
+ if not m:
+ m = re.match(r'^(?P[^:]+)(:(?P[0-9]+))?$', host)
if not m:
raise ValueError('Unable to parse connection string: "%s"' % host)
@@ -1052,14 +1226,20 @@ class _Host(object):
if hostData.get('proto') == 'unix':
self.family = socket.AF_UNIX
self.address = hostData['path']
+ elif hostData.get('proto') == 'inet6':
+ self.family = socket.AF_INET6
+ self.ip = hostData['host']
+ self.port = int(hostData.get('port') or 11211)
+ self.address = (self.ip, self.port)
else:
self.family = socket.AF_INET
self.ip = hostData['host']
- self.port = int(hostData.get('port', 11211))
- self.address = ( self.ip, self.port )
+ self.port = int(hostData.get('port') or 11211)
+ self.address = (self.ip, self.port)
self.deaduntil = 0
self.socket = None
+ self.flush_on_next_connect = 0
self.buffer = ''
@@ -1081,6 +1261,8 @@ class _Host(object):
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + self.dead_retry
+ if self.flush_on_reconnect:
+ self.flush_on_next_connect = 1
self.close_socket()
def _get_socket(self):
@@ -1089,18 +1271,23 @@ class _Host(object):
if self.socket:
return self.socket
s = socket.socket(self.family, socket.SOCK_STREAM)
- if hasattr(s, 'settimeout'): s.settimeout(self.socket_timeout)
+ if hasattr(s, 'settimeout'):
+ s.settimeout(self.socket_timeout)
try:
s.connect(self.address)
- except socket.timeout, msg:
+ except socket.timeout as msg:
self.mark_dead("connect: %s" % msg)
return None
- except socket.error, msg:
- if isinstance(msg, tuple): msg = msg[1]
- self.mark_dead("connect: %s" % msg[1])
+ except socket.error as msg:
+ if isinstance(msg, tuple):
+ msg = msg[1]
+ self.mark_dead("connect: %s" % msg)
return None
self.socket = s
self.buffer = ''
+ if self.flush_on_next_connect:
+ self.flush()
+ self.flush_on_next_connect = 0
return s
def close_socket(self):
@@ -1112,12 +1299,21 @@ class _Host(object):
self.socket.sendall(cmd + '\r\n')
def send_cmds(self, cmds):
- """ cmds already has trailing \r\n's applied """
+ """cmds already has trailing \r\n's applied."""
self.socket.sendall(cmds)
- def readline(self):
+ def readline(self, raise_exception=False):
+ """Read a line and return it.
+
+ If "raise_exception" is set, raise _ConnectionDeadError if the
+ read fails, otherwise return an empty string.
+ """
buf = self.buffer
- recv = self.socket.recv
+ if self.socket:
+ recv = self.socket.recv
+ else:
+ recv = lambda bufsize: ''
+
while True:
index = buf.find('\r\n')
if index >= 0:
@@ -1125,18 +1321,21 @@ class _Host(object):
data = recv(4096)
if not data:
# connection close, let's kill it and raise
- self.close_socket()
- raise _ConnectionDeadError()
+ self.mark_dead('connection closed in readline()')
+ if raise_exception:
+ raise _ConnectionDeadError()
+ else:
+ return ''
buf += data
- self.buffer = buf[index+2:]
+ self.buffer = buf[index + 2:]
return buf[:index]
- def expect(self, text):
- line = self.readline()
+ def expect(self, text, raise_exception=False):
+ line = self.readline(raise_exception)
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'"
- % (text, line))
+ % (text, line))
return line
def recv(self, rlen):
@@ -1146,11 +1345,15 @@ class _Host(object):
foo = self_socket_recv(max(rlen - len(buf), 4096))
buf += foo
if not foo:
- raise _Error( 'Read %d bytes, expecting %d, '
- 'read returned 0 length bytes' % ( len(buf), rlen ))
+ raise _Error('Read %d bytes, expecting %d, '
+ 'read returned 0 length bytes' % (len(buf), rlen))
self.buffer = buf[rlen:]
return buf[:rlen]
+ def flush(self):
+ self.send_cmd('flush_all')
+ self.expect('OK')
+
def __str__(self):
d = ''
if self.deaduntil:
@@ -1158,12 +1361,15 @@ class _Host(object):
if self.family == socket.AF_INET:
return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
+ elif self.family == socket.AF_INET6:
+ return "inet6:[%s]:%d%s" % (self.address[0], self.address[1], d)
else:
return "unix:%s%s" % (self.address, d)
def _doctest():
- import doctest, memcache
+ import doctest
+ import memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
@@ -1171,10 +1377,10 @@ def _doctest():
if __name__ == "__main__":
failures = 0
- print "Testing docstrings..."
+ print("Testing docstrings...")
_doctest()
- print "Running tests:"
- print
+ print("Running tests:")
+ print()
serverList = [["127.0.0.1:11211"]]
if '--do-unix' in sys.argv:
serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
@@ -1183,27 +1389,32 @@ if __name__ == "__main__":
mc = Client(servers, debug=1)
def to_s(val):
- if not isinstance(val, basestring):
+ if not isinstance(val, _str_cls):
return "%s (%s)" % (val, type(val))
return "%s" % val
+
def test_setget(key, val):
global failures
- print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
+ print("Testing set/get {'%s': %s} ..."
+ % (to_s(key), to_s(val)), end=" ")
mc.set(key, val)
newval = mc.get(key)
if newval == val:
- print "OK"
+ print("OK")
return 1
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
return 0
-
class FooStruct(object):
+
def __init__(self):
self.bar = "baz"
+
def __str__(self):
return "A FooStruct"
+
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
@@ -1211,138 +1422,155 @@ if __name__ == "__main__":
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
- if test_setget("long", long(1<<30)):
- print "Testing delete ...",
+ if test_setget("long", long(1 << 30)):
+ print("Testing delete ...", end=" ")
if mc.delete("long"):
- print "OK"
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
- print "Checking results of delete ..."
- if mc.get("long") == None:
- print "OK"
+ print("FAIL")
+ failures += 1
+ print("Checking results of delete ...", end=" ")
+ if mc.get("long") is None:
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
- print "Testing get_multi ...",
- print mc.get_multi(["a_string", "an_integer"])
+ print("FAIL")
+ failures += 1
+ print("Testing get_multi ...",)
+ print(mc.get_multi(["a_string", "an_integer"]))
# removed from the protocol
- #if test_setget("timed_delete", 'foo'):
- # print "Testing timed delete ...",
- # if mc.delete("timed_delete", 1):
- # print "OK"
- # else:
- # print "FAIL"; failures = failures + 1
- # print "Checking results of timed delete ..."
- # if mc.get("timed_delete") == None:
- # print "OK"
- # else:
- # print "FAIL"; failures = failures + 1
+ # if test_setget("timed_delete", 'foo'):
+ # print "Testing timed delete ...",
+ # if mc.delete("timed_delete", 1):
+ # print("OK")
+ # else:
+ # print("FAIL")
+ # failures += 1
+ # print "Checking results of timed delete ..."
+ # if mc.get("timed_delete") is None:
+ # print("OK")
+ # else:
+ # print("FAIL")
+ # failures += 1
- print "Testing get(unknown value) ...",
- print to_s(mc.get("unknown_value"))
+ print("Testing get(unknown value) ...", end=" ")
+ print(to_s(mc.get("unknown_value")))
f = FooStruct()
test_setget("foostruct", f)
- print "Testing incr ...",
+ print("Testing incr ...", end=" ")
x = mc.incr("an_integer", 1)
if x == 43:
- print "OK"
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
- print "Testing decr ...",
+ print("Testing decr ...", end=" ")
x = mc.decr("an_integer", 1)
if x == 42:
- print "OK"
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
sys.stdout.flush()
# sanity tests
- print "Testing sending spaces...",
+ print("Testing sending spaces...", end=" ")
sys.stdout.flush()
try:
x = mc.set("this has spaces", 1)
- except Client.MemcachedKeyCharacterError, msg:
- print "OK"
+ except Client.MemcachedKeyCharacterError as msg:
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
- print "Testing sending control characters...",
+ print("Testing sending control characters...", end=" ")
try:
x = mc.set("this\x10has\x11control characters\x02", 1)
- except Client.MemcachedKeyCharacterError, msg:
- print "OK"
+ except Client.MemcachedKeyCharacterError as msg:
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
- print "Testing using insanely long key...",
+ print("Testing using insanely long key...", end=" ")
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH, 1)
- except Client.MemcachedKeyLengthError, msg:
- print "FAIL"; failures = failures + 1
+ except Client.MemcachedKeyLengthError as msg:
+ print("FAIL")
+ failures += 1
else:
- print "OK"
+ print("OK")
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'a', 1)
- except Client.MemcachedKeyLengthError, msg:
- print "OK"
+ except Client.MemcachedKeyLengthError as msg:
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
- print "Testing sending a unicode-string key...",
+ print("Testing sending a unicode-string key...", end=" ")
try:
- x = mc.set(u'keyhere', 1)
- except Client.MemcachedStringEncodingError, msg:
- print "OK",
+ x = mc.set(unicode('keyhere'), 1)
+ except Client.MemcachedStringEncodingError as msg:
+ print("OK", end=" ")
else:
- print "FAIL",; failures = failures + 1
+ print("FAIL", end=" ")
+ failures += 1
try:
- x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
- except:
- print "FAIL",; failures = failures + 1
+ x = mc.set((unicode('a')*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
+ except Client.MemcachedKeyError:
+ print("FAIL", end=" ")
+ failures += 1
else:
- print "OK",
- import pickle
+ print("OK", end=" ")
s = pickle.loads('V\\u4f1a\np0\n.')
try:
- x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
+ x = mc.set((s * SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except Client.MemcachedKeyLengthError:
- print "OK"
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
- print "Testing using a value larger than the memcached value limit...",
+ print("Testing using a value larger than the memcached value limit...")
+ print('NOTE: "MemCached: while expecting[...]" is normal...')
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
- if mc.get('keyhere') == None:
- print "OK",
+ if mc.get('keyhere') is None:
+ print("OK", end=" ")
else:
- print "FAIL",; failures = failures + 1
+ print("FAIL", end=" ")
+ failures += 1
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
- if mc.get('keyhere') == None:
- print "OK"
+ if mc.get('keyhere') is None:
+ print("OK")
else:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
- print "Testing set_multi() with no memcacheds running",
+ print("Testing set_multi() with no memcacheds running", end=" ")
mc.disconnect_all()
- errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
+ errors = mc.set_multi({'keyhere': 'a', 'keythere': 'b'})
if errors != []:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
else:
- print "OK"
+ print("OK")
- print "Testing delete_multi() with no memcacheds running",
+ print("Testing delete_multi() with no memcacheds running", end=" ")
mc.disconnect_all()
- ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
+ ret = mc.delete_multi({'keyhere': 'a', 'keythere': 'b'})
if ret != 1:
- print "FAIL"; failures = failures + 1
+ print("FAIL")
+ failures += 1
else:
- print "OK"
+ print("OK")
if failures > 0:
- print '*** THERE WERE FAILED TESTS'
+ print('*** THERE WERE FAILED TESTS')
sys.exit(1)
sys.exit(0)
diff --git a/gluon/contrib/pypyodbc.py b/gluon/contrib/pypyodbc.py
index 7960a5bb..7d359ccb 100644
--- a/gluon/contrib/pypyodbc.py
+++ b/gluon/contrib/pypyodbc.py
@@ -4,7 +4,7 @@
# The MIT License (MIT)
#
-# Copyright (c) 2013 Henry Zhou and PyPyODBC contributors
+# Copyright (c) 2014 Henry Zhou and PyPyODBC contributors
# Copyright (c) 2004 Michele Petrazzo
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
@@ -16,7 +16,7 @@
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO #EVENT SHALL THE
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
@@ -25,7 +25,7 @@ pooling = True
apilevel = '2.0'
paramstyle = 'qmark'
threadsafety = 1
-version = '1.2.0'
+version = '1.3.0'
lowercase=True
DEBUG = 0
@@ -592,38 +592,38 @@ if sys.platform == 'cli':
# http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.sdk_12.5.1.aseodbc/html/aseodbc/CACFDIGH.htm
SQL_data_type_dict = { \
-#SQL Data TYPE 0.Python Data Type 1.Default Output Converter 2.Buffer Type 3.Buffer Allocator 4.Default Buffer Size
-SQL_TYPE_NULL : (None, lambda x: None, SQL_C_CHAR, create_buffer, 2 ),
-SQL_CHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
-SQL_NUMERIC : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 ),
-SQL_DECIMAL : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 ),
-SQL_INTEGER : (int, int, SQL_C_CHAR, create_buffer, 150 ),
-SQL_SMALLINT : (int, int, SQL_C_CHAR, create_buffer, 150 ),
-SQL_FLOAT : (float, float, SQL_C_CHAR, create_buffer, 150 ),
-SQL_REAL : (float, float, SQL_C_CHAR, create_buffer, 150 ),
-SQL_DOUBLE : (float, float, SQL_C_CHAR, create_buffer, 200 ),
-SQL_DATE : (datetime.date, dt_cvt, SQL_C_CHAR , create_buffer, 30 ),
-SQL_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
-SQL_SS_TIME2 : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
-SQL_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 ),
-SQL_VARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
-SQL_LONGVARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 20500 ),
-SQL_BINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 ),
-SQL_VARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 ),
-SQL_LONGVARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 20500 ),
-SQL_BIGINT : (long, long, SQL_C_CHAR, create_buffer, 150 ),
-SQL_TINYINT : (int, int, SQL_C_CHAR, create_buffer, 150 ),
-SQL_BIT : (bool, lambda x:x == BYTE_1, SQL_C_CHAR, create_buffer, 2 ),
-SQL_WCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 ),
-SQL_WVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 ),
-SQL_GUID : (str, str, SQL_C_CHAR, create_buffer, 50 ),
-SQL_WLONGVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 ),
-SQL_TYPE_DATE : (datetime.date, dt_cvt, SQL_C_CHAR, create_buffer, 30 ),
-SQL_TYPE_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
-SQL_TYPE_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 ),
-SQL_SS_VARIANT : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
-SQL_SS_XML : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 ),
-SQL_SS_UDT : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 ),
+#SQL Data TYPE 0.Python Data Type 1.Default Output Converter 2.Buffer Type 3.Buffer Allocator 4.Default Size 5.Variable Length
+SQL_TYPE_NULL : (None, lambda x: None, SQL_C_CHAR, create_buffer, 2 , False ),
+SQL_CHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 , False ),
+SQL_NUMERIC : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_DECIMAL : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_INTEGER : (int, int, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_SMALLINT : (int, int, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_FLOAT : (float, float, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_REAL : (float, float, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_DOUBLE : (float, float, SQL_C_CHAR, create_buffer, 200 , False ),
+SQL_DATE : (datetime.date, dt_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
+SQL_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 , False ),
+SQL_SS_TIME2 : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 , False ),
+SQL_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
+SQL_VARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 , False ),
+SQL_LONGVARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 20500 , True ),
+SQL_BINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 , True ),
+SQL_VARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 , True ),
+SQL_LONGVARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 20500 , True ),
+SQL_BIGINT : (long, long, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_TINYINT : (int, int, SQL_C_CHAR, create_buffer, 150 , False ),
+SQL_BIT : (bool, lambda x:x == BYTE_1, SQL_C_CHAR, create_buffer, 2 , False ),
+SQL_WCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 , False ),
+SQL_WVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 , False ),
+SQL_GUID : (str, str, SQL_C_CHAR, create_buffer, 2048 , False ),
+SQL_WLONGVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 , True ),
+SQL_TYPE_DATE : (datetime.date, dt_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
+SQL_TYPE_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 , False ),
+SQL_TYPE_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
+SQL_SS_VARIANT : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 , True ),
+SQL_SS_XML : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 , True ),
+SQL_SS_UDT : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 , True ),
}
@@ -645,6 +645,7 @@ SQLRETURN -> ctypes.c_short
funcs_with_ret = [
"SQLAllocHandle",
"SQLBindParameter",
+ "SQLBindCol",
"SQLCloseCursor",
"SQLColAttribute",
"SQLColumns",
@@ -1175,7 +1176,9 @@ class Cursor:
"""prepare a query"""
#self._free_results(FREE_STATEMENT)
-
+ if not self.connection:
+ self.close()
+
if type(query_string) == unicode:
c_query_string = wchar_pointer(UCS_buf(query_string))
ret = ODBC_API.SQLPrepareW(self.stmt_h, c_query_string, len(query_string))
@@ -1231,6 +1234,8 @@ class Cursor:
def _BindParams(self, param_types, pram_io_list = []):
"""Create parameter buffers based on param types, and bind them to the statement"""
# Clear the old Parameters
+ if not self.connection:
+ self.close()
#self._free_results(NO_FREE_STATEMENT)
# Get the number of query parameters judged by database.
@@ -1414,7 +1419,7 @@ class Cursor:
dec_num, ADDR(ParameterBuffer), BufferLen,ADDR(LenOrIndBuf))
if ret != SQL_SUCCESS:
check_success(self, ret)
- # Append the value buffer and the lenth buffer to the array
+ # Append the value buffer and the length buffer to the array
ParamBufferList.append((ParameterBuffer,LenOrIndBuf,sql_type))
self._last_param_types = param_types
@@ -1426,6 +1431,9 @@ class Cursor:
If parameters are provided, the query would first be prepared, then executed with parameters;
If parameters are not provided, only th query sting, it would be executed directly
"""
+ if not self.connection:
+ self.close()
+
self._free_stmt(SQL_CLOSE)
if params:
# If parameters exist, first prepare the query then executed with parameters
@@ -1549,7 +1557,7 @@ class Cursor:
c_buf_len = len(c_char_buf)
elif param_types[col_num][0] == 'bi':
- c_char_buf = str(param_val)
+ c_char_buf = str_8b(param_val)
c_buf_len = len(c_char_buf)
else:
@@ -1587,6 +1595,8 @@ class Cursor:
def _SQLExecute(self):
+ if not self.connection:
+ self.close()
ret = SQLExecute(self.stmt_h)
if ret != SQL_SUCCESS:
check_success(self, ret)
@@ -1594,6 +1604,9 @@ class Cursor:
def execdirect(self, query_string):
"""Execute a query directly"""
+ if not self.connection:
+ self.close()
+
self._free_stmt()
self._last_param_types = None
self.statement = None
@@ -1611,6 +1624,8 @@ class Cursor:
def callproc(self, procname, args):
+ if not self.connection:
+ self.close()
raise Warning('', 'Still not fully implemented')
self._pram_io_list = [row[4] for row in self.procedurecolumns(procedure = procname).fetchall() if row[4] not in (SQL_RESULT_COL, SQL_RETURN_VALUE)]
@@ -1637,6 +1652,9 @@ class Cursor:
def executemany(self, query_string, params_list = [None]):
+ if not self.connection:
+ self.close()
+
for params in params_list:
self.execute(query_string, params, many_mode = True)
self._NumOfRows()
@@ -1647,28 +1665,38 @@ class Cursor:
def _CreateColBuf(self):
+ if not self.connection:
+ self.close()
self._free_stmt(SQL_UNBIND)
NOC = self._NumOfCols()
self._ColBufferList = []
+ bind_data = True
for col_num in range(NOC):
- col_name = self.description[col_num][0]
-
- col_sql_data_type = self._ColTypeCodeList[col_num]
+ col_name = self.description[col_num][0]
+ col_size = self.description[col_num][2]
+ col_sql_data_type = self._ColTypeCodeList[col_num]
+ target_type = SQL_data_type_dict[col_sql_data_type][2]
+ dynamic_length = SQL_data_type_dict[col_sql_data_type][5]
# set default size base on the column's sql data type
total_buf_len = SQL_data_type_dict[col_sql_data_type][4]
- # over-write if there's preset size value for "large columns"
- if total_buf_len >= 20500:
+
+ # over-write if there's pre-set size value for "large columns"
+ if total_buf_len > 20500:
total_buf_len = self._outputsize.get(None,total_buf_len)
- # over-write if there's preset size value for the "col_num" column
+ # over-write if there's pre-set size value for the "col_num" column
total_buf_len = self._outputsize.get(col_num, total_buf_len)
+ # if the size of the buffer is very long, do not bind
+ # because a large buffer decrease performance, and sometimes you only get a NULL value.
+ # in that case use sqlgetdata instead.
+ if col_size >= 1024:
+ dynamic_length = True
alloc_buffer = SQL_data_type_dict[col_sql_data_type][3](total_buf_len)
used_buf_len = c_ssize_t()
- target_type = SQL_data_type_dict[col_sql_data_type][2]
force_unicode = self.connection.unicode_results
if force_unicode and col_sql_data_type in (SQL_CHAR,SQL_VARCHAR,SQL_LONGVARCHAR):
@@ -1676,14 +1704,22 @@ class Cursor:
alloc_buffer = create_buffer_u(total_buf_len)
buf_cvt_func = self.connection.output_converter[self._ColTypeCodeList[col_num]]
- ADDR(alloc_buffer)
- ADDR(used_buf_len)
- self._ColBufferList.append([col_name, target_type, used_buf_len, ADDR(used_buf_len), alloc_buffer, ADDR(alloc_buffer), total_buf_len, buf_cvt_func])
-
-
+
+ if bind_data:
+ if dynamic_length:
+ bind_data = False
+ self._ColBufferList.append([col_name, target_type, used_buf_len, ADDR(used_buf_len), alloc_buffer, ADDR(alloc_buffer), total_buf_len, buf_cvt_func, bind_data])
+
+ if bind_data:
+ ret = ODBC_API.SQLBindCol(self.stmt_h, col_num + 1, target_type, ADDR(alloc_buffer), total_buf_len, ADDR(used_buf_len))
+ if ret != SQL_SUCCESS:
+ check_success(self, ret)
def _UpdateDesc(self):
"Get the information of (name, type_code, display_size, internal_size, col_precision, scale, null_ok)"
+ if not self.connection:
+ self.close()
+
force_unicode = self.connection.unicode_results
if force_unicode:
Cname = create_buffer_u(1024)
@@ -1739,6 +1775,9 @@ class Cursor:
def _NumOfRows(self):
"""Get the number of rows"""
+ if not self.connection:
+ self.close()
+
NOR = c_ssize_t()
ret = SQLRowCount(self.stmt_h, ADDR(NOR))
if ret != SQL_SUCCESS:
@@ -1749,6 +1788,9 @@ class Cursor:
def _NumOfCols(self):
"""Get the number of cols"""
+ if not self.connection:
+ self.close()
+
NOC = c_short()
ret = SQLNumResultCols(self.stmt_h, ADDR(NOC))
if ret != SQL_SUCCESS:
@@ -1757,6 +1799,9 @@ class Cursor:
def fetchall(self):
+ if not self.connection:
+ self.close()
+
rows = []
while True:
row = self.fetchone()
@@ -1767,6 +1812,9 @@ class Cursor:
def fetchmany(self, num = None):
+ if not self.connection:
+ self.close()
+
if num is None:
num = self.arraysize
rows = []
@@ -1780,74 +1828,83 @@ class Cursor:
def fetchone(self):
+ if not self.connection:
+ self.close()
+
ret = SQLFetch(self.stmt_h)
- if ret == SQL_SUCCESS:
+
+ if ret in (SQL_SUCCESS,SQL_SUCCESS_WITH_INFO):
'''Bind buffers for the record set columns'''
value_list = []
col_num = 1
- for col_name, target_type, used_buf_len, ADDR_used_buf_len, alloc_buffer, ADDR_alloc_buffer, total_buf_len, buf_cvt_func in self._ColBufferList:
-
- blocks = []
+ for col_name, target_type, used_buf_len, ADDR_used_buf_len, alloc_buffer, ADDR_alloc_buffer, total_buf_len, buf_cvt_func, bind_data in self._ColBufferList:
+ raw_data_parts = []
while 1:
- ret = SQLGetData(self.stmt_h, col_num, target_type, ADDR_alloc_buffer, total_buf_len, ADDR_used_buf_len)
+ if bind_data:
+ ret = SQL_SUCCESS
+ else:
+ ret = SQLGetData(self.stmt_h, col_num, target_type, ADDR_alloc_buffer, total_buf_len, ADDR_used_buf_len)
if ret == SQL_SUCCESS:
if used_buf_len.value == SQL_NULL_DATA:
value_list.append(None)
else:
- if blocks == []:
+ if raw_data_parts == []:
+ # Means no previous data, no need to combine
if target_type == SQL_C_BINARY:
value_list.append(buf_cvt_func(alloc_buffer.raw[:used_buf_len.value]))
elif target_type == SQL_C_WCHAR:
value_list.append(buf_cvt_func(from_buffer_u(alloc_buffer)))
else:
- #print col_name, target_type, alloc_buffer.value
value_list.append(buf_cvt_func(alloc_buffer.value))
else:
+ # There are previous fetched raw data to combine
if target_type == SQL_C_BINARY:
- blocks.append(alloc_buffer.raw[:used_buf_len.value])
+ raw_data_parts.append(alloc_buffer.raw[:used_buf_len.value])
elif target_type == SQL_C_WCHAR:
- blocks.append(from_buffer_u(alloc_buffer))
+ raw_data_parts.append(from_buffer_u(alloc_buffer))
else:
- #print col_name, target_type, alloc_buffer.value
- blocks.append(alloc_buffer.value)
+ raw_data_parts.append(alloc_buffer.value)
break
elif ret == SQL_SUCCESS_WITH_INFO:
+ # Means the data is only partial
if target_type == SQL_C_BINARY:
- blocks.append(alloc_buffer.raw)
+ raw_data_parts.append(alloc_buffer.raw)
else:
- blocks.append(alloc_buffer.value)
+ raw_data_parts.append(alloc_buffer.value)
elif ret == SQL_NO_DATA:
+ # Means all data has been transmitted
break
else:
check_success(self, ret)
- if blocks != []:
+ if raw_data_parts != []:
if py_v3:
if target_type != SQL_C_BINARY:
- raw_value = ''.join(blocks)
+ raw_value = ''.join(raw_data_parts)
else:
- raw_value = BLANK_BYTE.join(blocks)
+ raw_value = BLANK_BYTE.join(raw_data_parts)
else:
- raw_value = ''.join(blocks)
+ raw_value = ''.join(raw_data_parts)
value_list.append(buf_cvt_func(raw_value))
col_num += 1
-
+
return self._row_type(value_list)
else:
if ret == SQL_NO_DATA_FOUND:
+
return None
else:
check_success(self, ret)
def __next__(self):
- self.next()
+ return self.next()
- def next(self):
+ def next(self):
row = self.fetchone()
if row is None:
raise(StopIteration)
@@ -1858,6 +1915,9 @@ class Cursor:
def skip(self, count = 0):
+ if not self.connection:
+ self.close()
+
for i in range(count):
ret = ODBC_API.SQLFetchScroll(self.stmt_h, SQL_FETCH_NEXT, 0)
if ret != SQL_SUCCESS:
@@ -1867,6 +1927,9 @@ class Cursor:
def nextset(self):
+ if not self.connection:
+ self.close()
+
ret = ODBC_API.SQLMoreResults(self.stmt_h)
if ret not in (SQL_SUCCESS, SQL_NO_DATA):
check_success(self, ret)
@@ -1882,6 +1945,9 @@ class Cursor:
def _free_stmt(self, free_type = None):
+ if not self.connection:
+ self.close()
+
if not self.connection.connected:
raise ProgrammingError('HY000','Attempt to use a closed connection.')
@@ -1903,6 +1969,9 @@ class Cursor:
def getTypeInfo(self, sqlType = None):
+ if not self.connection:
+ self.close()
+
if sqlType is None:
type = SQL_ALL_TYPES
else:
@@ -1917,6 +1986,9 @@ class Cursor:
def tables(self, table=None, catalog=None, schema=None, tableType=None):
"""Return a list with all tables"""
+ if not self.connection:
+ self.close()
+
l_catalog = l_schema = l_table = l_tableType = 0
if unicode in [type(x) for x in (table, catalog, schema,tableType)]:
@@ -1961,7 +2033,10 @@ class Cursor:
def columns(self, table=None, catalog=None, schema=None, column=None):
- """Return a list with all columns"""
+ """Return a list with all columns"""
+ if not self.connection:
+ self.close()
+
l_catalog = l_schema = l_table = l_column = 0
if unicode in [type(x) for x in (table, catalog, schema,column)]:
@@ -2004,6 +2079,9 @@ class Cursor:
def primaryKeys(self, table=None, catalog=None, schema=None):
+ if not self.connection:
+ self.close()
+
l_catalog = l_schema = l_table = 0
if unicode in [type(x) for x in (table, catalog, schema)]:
@@ -2044,6 +2122,9 @@ class Cursor:
def foreignKeys(self, table=None, catalog=None, schema=None, foreignTable=None, foreignCatalog=None, foreignSchema=None):
+ if not self.connection:
+ self.close()
+
l_catalog = l_schema = l_table = l_foreignTable = l_foreignCatalog = l_foreignSchema = 0
if unicode in [type(x) for x in (table, catalog, schema,foreignTable,foreignCatalog,foreignSchema)]:
@@ -2092,6 +2173,9 @@ class Cursor:
def procedurecolumns(self, procedure=None, catalog=None, schema=None, column=None):
+ if not self.connection:
+ self.close()
+
l_catalog = l_schema = l_procedure = l_column = 0
if unicode in [type(x) for x in (procedure, catalog, schema,column)]:
string_p = lambda x:wchar_pointer(UCS_buf(x))
@@ -2132,6 +2216,9 @@ class Cursor:
def procedures(self, procedure=None, catalog=None, schema=None):
+ if not self.connection:
+ self.close()
+
l_catalog = l_schema = l_procedure = 0
if unicode in [type(x) for x in (procedure, catalog, schema)]:
@@ -2170,6 +2257,9 @@ class Cursor:
def statistics(self, table, catalog=None, schema=None, unique=False, quick=True):
+ if not self.connection:
+ self.close()
+
l_table = l_catalog = l_schema = 0
if unicode in [type(x) for x in (table, catalog, schema)]:
@@ -2217,15 +2307,23 @@ class Cursor:
def commit(self):
+ if not self.connection:
+ self.close()
self.connection.commit()
def rollback(self):
+ if not self.connection:
+ self.close()
self.connection.rollback()
def setoutputsize(self, size, column = None):
+ if not self.connection:
+ self.close()
self._outputsize[column] = size
def setinputsizes(self, sizes):
+ if not self.connection:
+ self.close()
self._inputsizers = [size for size in sizes]
@@ -2234,35 +2332,31 @@ class Cursor:
# ret = ODBC_API.SQLCloseCursor(self.stmt_h)
# check_success(self, ret)
#
- ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_CLOSE)
- check_success(self, ret)
+ if self.connection.connected:
+ ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_CLOSE)
+ check_success(self, ret)
- ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_UNBIND)
- check_success(self, ret)
+ ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_UNBIND)
+ check_success(self, ret)
- ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_RESET_PARAMS)
- check_success(self, ret)
+ ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_RESET_PARAMS)
+ check_success(self, ret)
- ret = ODBC_API.SQLFreeHandle(SQL_HANDLE_STMT, self.stmt_h)
- check_success(self, ret)
+ ret = ODBC_API.SQLFreeHandle(SQL_HANDLE_STMT, self.stmt_h)
+ check_success(self, ret)
+
self.closed = True
-
def __del__(self):
if not self.closed:
- #if DEBUG:print 'auto closing cursor: ',
- try:
- self.close()
- except:
- #if DEBUG:print 'failed'
- pass
- else:
- #if DEBUG:print 'succeed'
- pass
+ self.close()
def __exit__(self, type, value, traceback):
+ if not self.connection:
+ self.close()
+
if value:
self.rollback()
else:
@@ -2291,7 +2385,7 @@ class Connection:
self.autocommit = autocommit
self.readonly = False
self.timeout = 0
- self._cursors = []
+ # self._cursors = []
for key, value in list(kargs.items()):
connectString = connectString + key + '=' + value + ';'
self.connectString = connectString
@@ -2425,7 +2519,7 @@ class Connection:
if not self.connected:
raise ProgrammingError('HY000','Attempt to use a closed connection.')
cur = Cursor(self, row_type_callable=row_type_callable)
- self._cursors.append(cur)
+ # self._cursors.append(cur)
return cur
def update_db_special_info(self):
@@ -2436,6 +2530,7 @@ class Connection:
SQL_SS_TIME2,
):
cur = Cursor(self)
+
try:
info_tuple = cur.getTypeInfo(sql_type)
if info_tuple is not None:
@@ -2534,10 +2629,10 @@ class Connection:
def close(self):
if not self.connected:
raise ProgrammingError('HY000','Attempt to close a closed connection.')
- for cur in self._cursors:
- if not cur is None:
- if not cur.closed:
- cur.close()
+ # for cur in self._cursors:
+ # if not cur is None:
+ # if not cur.closed:
+ # cur.close()
if self.connected:
#if DEBUG:print 'disconnect'
@@ -2680,4 +2775,18 @@ def dataSources():
ctrl_err(SQL_HANDLE_ENV, shared_env_h, ret)
else:
dsn_list[dsn.value] = desc.value
- return dsn_list
\ No newline at end of file
+ return dsn_list
+
+
+def monkey_patch_for_gevent():
+ import functools, gevent
+ apply_e = gevent.get_hub().threadpool.apply_e
+ def monkey_patch(func):
+ @functools.wraps(func)
+ def wrap(*args, **kwargs):
+ #if DEBUG:print('%s called with %s %s' % (func, args, kwargs))
+ return apply_e(Exception, func, args, kwargs)
+ return wrap
+ for attr in dir(ODBC_API):
+ if attr.startswith('SQL') and hasattr(getattr(ODBC_API, attr), 'argtypes'):
+ setattr(ODBC_API, attr, monkey_patch(getattr(ODBC_API, attr)))
diff --git a/gluon/contrib/redis_cache.py b/gluon/contrib/redis_cache.py
index 4475a82b..4d4d4664 100644
--- a/gluon/contrib/redis_cache.py
+++ b/gluon/contrib/redis_cache.py
@@ -6,7 +6,10 @@ import redis
from redis.exceptions import ConnectionError
from gluon import current
from gluon.cache import CacheAbstract
-import cPickle as pickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
import time
import re
import logging
@@ -165,7 +168,7 @@ class RedisClient(object):
expireat = int(time.time() + time_expire) + 120
bucket_key = "%s:%s" % (cache_set_key, expireat / 60)
value = f()
- value_ = pickle.dumps(value)
+ value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
if time_expire == 0:
time_expire = 1
self.r_server.setex(key, value_, time_expire)
diff --git a/gluon/contrib/redis_session.py b/gluon/contrib/redis_session.py
index d7bc419d..2789af33 100644
--- a/gluon/contrib/redis_session.py
+++ b/gluon/contrib/redis_session.py
@@ -3,12 +3,9 @@ Developed by niphlod@gmail.com
"""
import redis
-from redis.exceptions import ConnectionError
from gluon import current
from gluon.storage import Storage
-import cPickle as pickle
import time
-import re
import logging
import thread
diff --git a/gluon/contrib/shell.py b/gluon/contrib/shell.py
index 0a224100..8a571e42 100755
--- a/gluon/contrib/shell.py
+++ b/gluon/contrib/shell.py
@@ -31,12 +31,13 @@ An interactive, stateful AJAX shell that runs Python code on the server.
import logging
import new
-import os
-import cPickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
import sys
import traceback
import types
-import wsgiref.handlers
import StringIO
import threading
locker = threading.RLock()
@@ -100,7 +101,7 @@ class History:
name: the name of the global to remove
value: any picklable value
"""
- blob = cPickle.dumps(value)
+ blob = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
if name in self.global_names:
index = self.global_names.index(name)
@@ -159,7 +160,7 @@ def represent(obj):
code below to determine whether the object changes over time.
"""
try:
- return cPickle.dumps(obj)
+ return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
except:
return repr(obj)
@@ -258,7 +259,7 @@ def run(history, statement, env={}):
if not name.startswith('__'):
try:
history.set_global(name, val)
- except (TypeError, cPickle.PicklingError), ex:
+ except (TypeError, pickle.PicklingError), ex:
UNPICKLABLE_TYPES.append(type(val))
history.add_unpicklable(statement, new_globals.keys())
diff --git a/gluon/fileutils.py b/gluon/fileutils.py
index 46f4611c..121ad99e 100644
--- a/gluon/fileutils.py
+++ b/gluon/fileutils.py
@@ -20,7 +20,7 @@ import datetime
import logging
from http import HTTP
from gzip import open as gzopen
-
+from recfile import generate
__all__ = [
'parse_version',
@@ -400,6 +400,8 @@ def get_session(request, other_application='admin'):
session_id = request.cookies['session_id_' + other_application].value
session_filename = os.path.join(
up(request.folder), other_application, 'sessions', session_id)
+ if not os.path.exists(session_filename):
+ session_filename = generate(session_filename)
osession = storage.load_storage(session_filename)
except Exception, e:
osession = storage.Storage()
diff --git a/gluon/globals.py b/gluon/globals.py
index 2a19e208..5285b83f 100644
--- a/gluon/globals.py
+++ b/gluon/globals.py
@@ -25,15 +25,19 @@ from gluon.serializers import json, custom_json
import gluon.settings as settings
from gluon.utils import web2py_uuid, secure_dumps, secure_loads
from gluon.settings import global_settings
-from gluon.dal import Field
+from gluon import recfile
import hashlib
import portalocker
-import cPickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
from pickle import Pickler, MARK, DICT, EMPTY_DICT
from types import DictionaryType
import cStringIO
import datetime
import re
+import copy_reg
import Cookie
import os
import sys
@@ -166,7 +170,6 @@ class Request(Storage):
- is_local
- is_https
- restful()
- - settings
"""
def __init__(self, env):
@@ -826,11 +829,11 @@ class Session(Storage):
'sessions', response.session_id)
try:
response.session_file = \
- open(response.session_filename, 'rb+')
+ recfile.open(response.session_filename, 'rb+')
portalocker.lock(response.session_file,
portalocker.LOCK_EX)
response.session_locked = True
- self.update(cPickle.load(response.session_file))
+ self.update(pickle.load(response.session_file))
response.session_file.seek(0)
oc = response.session_filename.split('/')[-1].split('-')[0]
if check_client and response.session_client != oc:
@@ -895,7 +898,7 @@ class Session(Storage):
if row:
# rows[0].update_record(locked=True)
# Unpickle the data
- session_data = cPickle.loads(row.session_data)
+ session_data = pickle.loads(row.session_data)
self.update(session_data)
response.session_new = False
else:
@@ -907,7 +910,7 @@ class Session(Storage):
else:
response.session_id = None
response.session_new = True
- # if there is no session id yet, we'll need to create a
+ # if there is no session id yet, we'll need to create a
# new session
else:
response.session_new = True
@@ -925,7 +928,7 @@ class Session(Storage):
response.cookies[response.session_id_name]['expires'] = \
cookie_expires.strftime(FMT)
- session_pickled = cPickle.dumps(self)
+ session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_hash = hashlib.md5(session_pickled).hexdigest()
if self.flash:
@@ -1084,7 +1087,7 @@ class Session(Storage):
return True
def _unchanged(self, response):
- session_pickled = cPickle.dumps(self)
+ session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_pickled = session_pickled
session_hash = hashlib.md5(session_pickled).hexdigest()
return response.session_hash == session_hash
@@ -1111,7 +1114,7 @@ class Session(Storage):
else:
unique_key = response.session_db_unique_key
- session_pickled = response.session_pickled or cPickle.dumps(self)
+ session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
dd = dict(locked=False,
client_ip=response.session_client,
@@ -1148,11 +1151,11 @@ class Session(Storage):
session_folder = os.path.dirname(response.session_filename)
if not os.path.exists(session_folder):
os.mkdir(session_folder)
- response.session_file = open(response.session_filename, 'wb')
+ response.session_file = recfile.open(response.session_filename, 'wb')
portalocker.lock(response.session_file, portalocker.LOCK_EX)
response.session_locked = True
if response.session_file:
- session_pickled = response.session_pickled or cPickle.dumps(self)
+ session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_file.write(session_pickled)
response.session_file.truncate()
finally:
@@ -1177,3 +1180,8 @@ class Session(Storage):
del response.session_file
except:
pass
+
+def pickle_session(s):
+ return Session, (dict(s),)
+
+copy_reg.pickle(Session, pickle_session)
diff --git a/gluon/html.py b/gluon/html.py
index 50ca9054..ee491479 100644
--- a/gluon/html.py
+++ b/gluon/html.py
@@ -21,7 +21,10 @@ import sanitizer
import itertools
import decoder
import copy_reg
-import cPickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
import marshal
from HTMLParser import HTMLParser
@@ -31,6 +34,7 @@ from gluon.storage import Storage
from gluon.utils import web2py_uuid, simple_hash, compare
from gluon.highlight import highlight
+
regex_crlf = re.compile('\r|\n')
join = ''.join
@@ -43,6 +47,7 @@ entitydefs.setdefault('apos', u"'".encode('utf-8'))
__all__ = [
'A',
+ 'ASSIGNJS',
'B',
'BEAUTIFY',
'BODY',
@@ -1240,13 +1245,13 @@ class CAT(DIV):
def TAG_unpickler(data):
- return cPickle.loads(data)
+ return pickle.loads(data)
def TAG_pickler(data):
d = DIV()
d.__dict__ = data.__dict__
- marshal_dump = cPickle.dumps(d)
+ marshal_dump = pickle.dumps(d, pickle.HIGHEST_PROTOCOL)
return (TAG_unpickler, (marshal_dump,))
@@ -2825,6 +2830,14 @@ class MARKMIN(XmlComponent):
def __str__(self):
return self.xml()
+def ASSIGNJS(**kargs):
+ from gluon.serializers import json
+ s = ""
+ for key, value in kargs.items():
+ s+='var %s = %s;\n' % (key, json(value))
+ return XML(s)
+
+
if __name__ == '__main__':
import doctest
doctest.testmod()
diff --git a/gluon/newcron.py b/gluon/newcron.py
index 96e1e917..8e6497b9 100644
--- a/gluon/newcron.py
+++ b/gluon/newcron.py
@@ -21,7 +21,10 @@ import datetime
import platform
import portalocker
import fileutils
-import cPickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
from gluon.settings import global_settings
logger = logging.getLogger("web2py.cron")
@@ -139,7 +142,7 @@ class Token(object):
ret = None
portalocker.lock(self.master, portalocker.LOCK_EX)
try:
- (start, stop) = cPickle.load(self.master)
+ (start, stop) = pickle.load(self.master)
except:
(start, stop) = (0, 1)
if startup or self.now - start > locktime:
@@ -149,7 +152,7 @@ class Token(object):
logger.warning('WEB2PY CRON: Stale cron.master detected')
logger.debug('WEB2PY CRON: Acquiring lock')
self.master.seek(0)
- cPickle.dump((self.now, 0), self.master)
+ pickle.dump((self.now, 0), self.master)
self.master.flush()
finally:
portalocker.unlock(self.master)
@@ -166,10 +169,10 @@ class Token(object):
portalocker.lock(self.master, portalocker.LOCK_EX)
logger.debug('WEB2PY CRON: Releasing cron lock')
self.master.seek(0)
- (start, stop) = cPickle.load(self.master)
+ (start, stop) = pickle.load(self.master)
if start == self.now: # if this is my lock
self.master.seek(0)
- cPickle.dump((self.now, time.time()), self.master)
+ pickle.dump((self.now, time.time()), self.master)
portalocker.unlock(self.master)
self.master.close()
diff --git a/gluon/recfile.py b/gluon/recfile.py
new file mode 100755
index 00000000..e2865d34
--- /dev/null
+++ b/gluon/recfile.py
@@ -0,0 +1,63 @@
+import os, uuid
+
+def generate(filename, depth=2, base=512):
+ if os.path.sep in filename:
+ path, filename = os.path.split(filename)
+ else:
+ path = None
+ dummyhash = sum(ord(c)*256**(i % 4) for i,c in enumerate(filename)) % base**depth
+ folders = []
+ for level in range(depth-1,-1,-1):
+ code, dummyhash = divmod(dummyhash, base**level)
+ folders.append("%03x" % code)
+ folders.append(filename)
+ if path:
+ folders.insert(0,path)
+ return os.path.join(*folders)
+
+def exists(filename, path=None):
+ if os.path.exists(filename):
+ return True
+ if path is None:
+ path, filename = os.path.split(filename)
+ fullfilename = os.path.join(path, generate(filename))
+ if os.path.exists(fullfilename):
+ return True
+ return False
+
+def remove(filename, path=None):
+ if os.path.exists(filename):
+ return os.unlink(filename)
+ if path is None:
+ path, filename = os.path.split(filename)
+ fullfilename = os.path.join(path, generate(filename))
+ if os.path.exists(fullfilename):
+ return os.unlink(fullfilename)
+ raise IOError
+
+def open(filename, mode="r", path=None):
+ if not path:
+ path, filename = os.path.split(filename)
+ fullfilename = None
+ if not mode.startswith('w'):
+ fullfilename = os.path.join(path, filename)
+ if not os.path.exists(fullfilename):
+ fullfilename = None
+ if not fullfilename:
+ fullfilename = os.path.join(path, generate(filename))
+ if mode.startswith('w') and not os.path.exists(os.path.dirname(fullfilename)):
+ os.makedirs(os.path.dirname(fullfilename))
+ return file(fullfilename, mode)
+
+def test():
+ if not os.path.exists('tests'):
+ os.mkdir('tests')
+ for k in range(20):
+ filename = os.path.join('tests',str(uuid.uuid4())+'.test')
+ open(filename, "w").write('test')
+ assert open(filename, "r").read()=='test'
+ if exists(filename):
+ remove(filename)
+
+if __name__ == '__main__':
+ test()
diff --git a/gluon/restricted.py b/gluon/restricted.py
index bcb78163..de7d8afb 100644
--- a/gluon/restricted.py
+++ b/gluon/restricted.py
@@ -11,7 +11,10 @@ Restricted environment to execute application's code
"""
import sys
-import cPickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
import traceback
import types
import os
@@ -55,7 +58,7 @@ class TicketStorage(Storage):
try:
table = self._get_table(self.db, self.tablename, request.application)
table.insert(ticket_id=ticket_id,
- ticket_data=cPickle.dumps(ticket_data),
+ ticket_data=pickle.dumps(ticket_data, pickle.HIGHEST_PROTOCOL),
created_datetime=request.now)
self.db.commit()
message = 'In FILE: %(layer)s\n\n%(traceback)s\n'
@@ -68,7 +71,7 @@ class TicketStorage(Storage):
def _store_on_disk(self, request, ticket_id, ticket_data):
ef = self._error_file(request, ticket_id, 'wb')
try:
- cPickle.dump(ticket_data, ef)
+ pickle.dump(ticket_data, ef)
finally:
ef.close()
@@ -103,13 +106,13 @@ class TicketStorage(Storage):
except IOError:
return {}
try:
- return cPickle.load(ef)
+ return pickle.load(ef)
finally:
ef.close()
else:
table = self._get_table(self.db, self.tablename, app)
rows = self.db(table.ticket_id == ticket_id).select()
- return cPickle.loads(rows[0].ticket_data) if rows else {}
+ return pickle.loads(rows[0].ticket_data) if rows else {}
class RestrictedError(Exception):
diff --git a/gluon/scheduler.py b/gluon/scheduler.py
index 66b0becb..6a72dda5 100644
--- a/gluon/scheduler.py
+++ b/gluon/scheduler.py
@@ -96,7 +96,7 @@ IDENTIFIER = "%s#%s" % (socket.gethostname(),os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB
-from gluon import IS_INT_IN_RANGE, IS_DATETIME
+from gluon import IS_INT_IN_RANGE, IS_DATETIME, IS_IN_DB
from gluon.utils import web2py_uuid
from gluon.storage import Storage
@@ -671,7 +671,10 @@ class Scheduler(MetaScheduler):
db.define_table(
'scheduler_task_deps',
Field('job_name', default='job_0'),
- Field('task_parent', 'reference scheduler_task'),
+ Field('task_parent', 'integer',
+ requires=IS_IN_DB(db, 'scheduler_task.id',
+ '%(task_name)s')
+ ),
Field('task_child', 'reference scheduler_task'),
Field('can_visit', 'boolean', default=False),
migrate=self.__get_migrate('scheduler_task_deps', migrate)
@@ -1311,7 +1314,7 @@ class Scheduler(MetaScheduler):
"""
from gluon.dal import Query
sr, st = self.db.scheduler_run, self.db.scheduler_task
- if isinstance(ref, int):
+ if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
@@ -1362,7 +1365,7 @@ class Scheduler(MetaScheduler):
Experimental
"""
st, sw = self.db.scheduler_task, self.db.scheduler_worker
- if isinstance(ref, int):
+ if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
diff --git a/gluon/serializers.py b/gluon/serializers.py
index 0323aa80..563a3c0b 100644
--- a/gluon/serializers.py
+++ b/gluon/serializers.py
@@ -163,15 +163,18 @@ def ics(events, title=None, link=None, timeshift=0, calname=True,
def rss(feed):
if not 'entries' in feed and 'items' in feed:
feed['entries'] = feed['items']
+ def safestr(obj, key, default=''):
+ return str(obj[key]).encode('utf-8', 'replace') if key in obj else default
+
now = datetime.datetime.now()
- rss = rss2.RSS2(title=str(feed.get('title', '(notitle)').encode('utf-8', 'replace')),
- link=str(feed.get('link', None).encode('utf-8', 'replace')),
- description=str(feed.get('description', '').encode('utf-8', 'replace')),
+ rss = rss2.RSS2(title=safestr(feed,'title'),
+ link=safestr(feed,'link'),
+ description=safestr(feed,'description'),
lastBuildDate=feed.get('created_on', now),
items=[rss2.RSSItem(
- title=str(entry.get('title', '(notitle)').encode('utf-8', 'replace')),
- link=str(entry.get('link', None).encode('utf-8', 'replace')),
- description=str(entry.get('description', '').encode('utf-8', 'replace')),
+ title=safestr(entry,'title','(notitle)'),
+ link=safestr(entry,'link'),
+ description=safestr(entry,'description'),
pubDate=entry.get('created_on', now)
) for entry in feed.get('entries', [])])
return rss.to_xml(encoding='utf-8')
diff --git a/gluon/sqlhtml.py b/gluon/sqlhtml.py
index 3bdd4e39..98d4395a 100644
--- a/gluon/sqlhtml.py
+++ b/gluon/sqlhtml.py
@@ -655,7 +655,7 @@ class AutocompleteWidget(object):
if settings and settings.global_settings.web2py_runtime_gae:
rows = self.db(field.__ge__(self.request.vars[self.keyword]) & field.__lt__(self.request.vars[self.keyword] + u'\ufffd')).select(orderby=self.orderby, limitby=self.limitby, *(self.fields+self.help_fields))
else:
- rows = self.db(field.like(self.request.vars[self.keyword] + '%')).select(orderby=self.orderby, limitby=self.limitby, distinct=self.distinct, *(self.fields+self.help_fields))
+ rows = self.db(field.like(self.request.vars[self.keyword] + '%', case_sensitive=False)).select(orderby=self.orderby, limitby=self.limitby, distinct=self.distinct, *(self.fields+self.help_fields))
if rows:
if self.is_reference:
id_field = self.fields[1]
@@ -1292,7 +1292,7 @@ class SQLFORM(FORM):
xfields.append(
(self.FIELDKEY_DELETE_RECORD + SQLFORM.ID_ROW_SUFFIX,
LABEL(
- T(delete_label), separator,
+ T(delete_label), sep,
_for=self.FIELDKEY_DELETE_RECORD,
_id=self.FIELDKEY_DELETE_RECORD + \
SQLFORM.ID_LABEL_SUFFIX),
@@ -2114,6 +2114,8 @@ class SQLFORM(FORM):
field_id = groupby #take the field passed as groupby
elif groupby and isinstance(groupby, Expression):
field_id = groupby.first #take the first groupby field
+ while not(isinstance(field_id, Field)): # Navigate to the first Field of the expression
+ field_id = field_id.first
table = field_id.table
tablename = table._tablename
if not any(str(f) == str(field_id) for f in fields):
diff --git a/gluon/storage.py b/gluon/storage.py
index 89c88dca..2c94079c 100644
--- a/gluon/storage.py
+++ b/gluon/storage.py
@@ -12,7 +12,11 @@ Provides:
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
-import cPickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
+import copy_reg
import gluon.portalocker as portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
@@ -129,6 +133,12 @@ class Storage(dict):
values = self.getlist(key)
return values[-1] if values else default
+
+def pickle_storage(s):
+ return Storage, (dict(s),)
+
+copy_reg.pickle(Storage, pickle_storage)
+
PICKABLE = (str, int, long, float, bool, list, dict, tuple, set)
@@ -141,10 +151,10 @@ class StorageList(Storage):
def __getattr__(self, key):
if key in self:
- return getattr(self, key)
+ return self.get(key)
else:
r = []
- setattr(self, key, r)
+ self[key] = r
return r
@@ -152,7 +162,7 @@ def load_storage(filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'rb')
- storage = cPickle.load(fp)
+ storage = pickle.load(fp)
finally:
if fp:
fp.close()
@@ -163,7 +173,7 @@ def save_storage(storage, filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'wb')
- cPickle.dump(dict(storage), fp)
+ pickle.dump(dict(storage), fp)
finally:
if fp:
fp.close()
diff --git a/gluon/template.py b/gluon/template.py
index 131cb927..7a18e7ea 100644
--- a/gluon/template.py
+++ b/gluon/template.py
@@ -279,15 +279,19 @@ class TemplateParser(object):
self.context = context
# allow optional alternative delimiters
- if delimiters is None:
- delimiters = context.get('response', {})\
- .get('app_settings',{}).get('template_delimiters')
+
if delimiters != self.default_delimiters:
- escaped_delimiters = (escape(elimiters[0]),
+ escaped_delimiters = (escape(delimiters[0]),
escape(delimiters[1]))
self.r_tag = compile(r'(%s.*?%s)' % escaped_delimiters, DOTALL)
- else:
- delimiters = self.default_delimiters
+ elif hasattr(context.get('response', None), 'delimiters'):
+ if context['response'].delimiters != self.default_delimiters:
+ delimiters = context['response'].delimiters
+ escaped_delimiters = (
+ escape(delimiters[0]),
+ escape(delimiters[1]))
+ self.r_tag = compile(r'(%s.*?%s)' % escaped_delimiters,
+ DOTALL)
self.delimiters = delimiters
# Create a root level Content that everything will go into.
diff --git a/gluon/tests/__init__.py b/gluon/tests/__init__.py
index fbc12288..8aedb385 100644
--- a/gluon/tests/__init__.py
+++ b/gluon/tests/__init__.py
@@ -2,14 +2,8 @@ import os, sys
from test_http import *
from test_cache import *
-
-NOSQL = any([name in (os.getenv("DB") or "")
- for name in ("datastore", "mongodb", "imap")])
-if NOSQL:
- from test_dal_nosql import *
-else:
- from test_dal import *
-
+from test_contenttype import *
+from test_fileutils import *
from test_html import *
from test_is_url import *
from test_languages import *
@@ -25,3 +19,12 @@ from test_web import *
if sys.version[:3] == '2.7':
from test_old_doctests import *
+
+
+NOSQL = any([name in (os.getenv("DB") or "")
+ for name in ("datastore", "mongodb", "imap")])
+
+if NOSQL:
+ from test_dal_nosql import *
+else:
+ from test_dal import *
\ No newline at end of file
diff --git a/gluon/tests/fix_path.py b/gluon/tests/fix_path.py
new file mode 100644
index 00000000..f7acf8bc
--- /dev/null
+++ b/gluon/tests/fix_path.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import os
+import sys
+
+def fix_sys_path(current_path):
+ """
+ logic to have always the correct sys.path
+ '', web2py/gluon, web2py/site-packages, web2py/ ...
+ """
+
+ def add_path_first(path):
+ sys.path = [path] + [p for p in sys.path if (
+ not p == path and not p == (path + '/'))]
+
+ path = os.path.dirname(os.path.abspath(current_path))
+
+ if not os.path.isfile(os.path.join(path,'web2py.py')):
+ i = 0
+ while i<10:
+ i += 1
+ if os.path.exists(os.path.join(path,'web2py.py')):
+ break
+ path = os.path.abspath(os.path.join(path, '..'))
+
+ paths = [path,
+ os.path.abspath(os.path.join(path, 'site-packages')),
+ os.path.abspath(os.path.join(path, 'gluon')),
+ '']
+ [add_path_first(path) for path in paths]
\ No newline at end of file
diff --git a/gluon/tests/test_cache.py b/gluon/tests/test_cache.py
index be50e8ef..39b0605a 100644
--- a/gluon/tests/test_cache.py
+++ b/gluon/tests/test_cache.py
@@ -4,43 +4,15 @@
"""
Unit tests for gluon.cache
"""
-
-import sys
import os
import unittest
+from fix_path import fix_sys_path
-
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
from storage import Storage
-from cache import CacheInRam, CacheOnDisk
+from cache import CacheInRam, CacheOnDisk, Cache
oldcwd = None
@@ -76,6 +48,20 @@ class TestCache(unittest.TestCase):
cache.clear()
self.assertEqual(cache('a', lambda: 3, 100), 3)
self.assertEqual(cache('a', lambda: 4, 0), 4)
+ #test singleton behaviour
+ cache = CacheInRam()
+ cache.clear()
+ self.assertEqual(cache('a', lambda: 3, 100), 3)
+ self.assertEqual(cache('a', lambda: 4, 0), 4)
+ #test key deletion
+ cache('a', None)
+ self.assertEqual(cache('a', lambda: 5, 100), 5)
+ #test increment
+ self.assertEqual(cache.increment('a'), 6)
+ self.assertEqual(cache('a', lambda: 1, 100), 6)
+ cache.increment('b')
+ self.assertEqual(cache('b', lambda: 'x', 100), 1)
+
def testCacheOnDisk(self):
@@ -93,6 +79,30 @@ class TestCache(unittest.TestCase):
cache.clear()
self.assertEqual(cache('a', lambda: 3, 100), 3)
self.assertEqual(cache('a', lambda: 4, 0), 4)
+ #test singleton behaviour
+ cache = CacheOnDisk(s)
+ cache.clear()
+ self.assertEqual(cache('a', lambda: 3, 100), 3)
+ self.assertEqual(cache('a', lambda: 4, 0), 4)
+ #test key deletion
+ cache('a', None)
+ self.assertEqual(cache('a', lambda: 5, 100), 5)
+ #test increment
+ self.assertEqual(cache.increment('a'), 6)
+ self.assertEqual(cache('a', lambda: 1, 100), 6)
+ cache.increment('b')
+ self.assertEqual(cache('b', lambda: 'x', 100), 1)
+
+ def testCacheWithPrefix(self):
+ s = Storage({'application': 'admin',
+ 'folder': 'applications/admin'})
+ cache = Cache(s)
+ prefix = cache.with_prefix(cache.ram,'prefix')
+ self.assertEqual(prefix('a', lambda: 1, 0), 1)
+ self.assertEqual(prefix('a', lambda: 2, 100), 1)
+ self.assertEqual(cache.ram('prefixa', lambda: 2, 100), 1)
+
+
if __name__ == '__main__':
diff --git a/gluon/tests/test_contenttype.py b/gluon/tests/test_contenttype.py
new file mode 100644
index 00000000..1b40065d
--- /dev/null
+++ b/gluon/tests/test_contenttype.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+ Unit tests for gluon.contenttype
+"""
+
+import unittest
+from fix_path import fix_sys_path
+
+fix_sys_path(__file__)
+
+from contenttype import contenttype
+
+
+class TestContentType(unittest.TestCase):
+
+ def testTypeRecognition(self):
+ rtn = contenttype('.png')
+ self.assertEqual(rtn, 'image/png')
+ rtn = contenttype('.gif')
+ self.assertEqual(rtn, 'image/gif')
+ rtn = contenttype('.tar.bz2')
+ self.assertEqual(rtn, 'application/x-bzip-compressed-tar')
+ # test overrides and additions
+ mapping = {
+ '.load': 'text/html; charset=utf-8',
+ '.json': 'application/json',
+ '.jsonp': 'application/jsonp',
+ '.pickle': 'application/python-pickle',
+ '.w2p': 'application/w2p',
+ '.md': 'text/x-markdown; charset=utf-8'
+ }
+ for k, v in mapping.iteritems():
+ self.assertEqual(contenttype(k), v)
+
+ # test without dot extension
+ rtn = contenttype('png')
+ self.assertEqual(rtn, 'text/plain; charset=utf-8')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/gluon/tests/test_contribs.py b/gluon/tests/test_contribs.py
index 2e8a15e6..22f3978d 100644
--- a/gluon/tests/test_contribs.py
+++ b/gluon/tests/test_contribs.py
@@ -3,37 +3,10 @@
""" Unit tests for contribs """
-import sys
-import os
import unittest
+from fix_path import fix_sys_path
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
from utils import md5_hash
diff --git a/gluon/tests/test_dal.py b/gluon/tests/test_dal.py
index 5a2d87b1..0486281f 100644
--- a/gluon/tests/test_dal.py
+++ b/gluon/tests/test_dal.py
@@ -15,42 +15,20 @@ try:
except:
from io import StringIO
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
+from fix_path import fix_sys_path
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
#for travis-ci
-DEFAULT_URI = os.environ.get('DB', 'sqlite:memory')
+DEFAULT_URI = os.getenv('DB', 'sqlite:memory')
print 'Testing against %s engine (%s)' % (DEFAULT_URI.partition(':')[0], DEFAULT_URI)
from dal import DAL, Field
from dal.objects import Table
from dal.helpers.classes import SQLALL
+from dal import DAL, Field, Table, SQLALL
+from gluon.cache import CacheInRam
ALLOWED_DATATYPES = [
'string',
@@ -131,6 +109,7 @@ class TestFields(unittest.TestCase):
isinstance(f.formatter(datetime.datetime.now()), str)
def testRun(self):
+ """Test all field types and their return values"""
db = DAL(DEFAULT_URI, check_reserved=['all'])
for ft in ['string', 'text', 'password', 'upload', 'blob']:
db.define_table('tt', Field('aa', ft, default=''))
@@ -150,8 +129,22 @@ class TestFields(unittest.TestCase):
self.assertEqual(db().select(db.tt.aa)[0].aa, True)
db.tt.drop()
db.define_table('tt', Field('aa', 'json', default={}))
- self.assertEqual(db.tt.insert(aa={}), 1)
- self.assertEqual(db().select(db.tt.aa)[0].aa, {})
+ # test different python objects for correct serialization in json
+ objs = [
+ {'a' : 1, 'b' : 2},
+ [1, 2, 3],
+ 'abc',
+ True,
+ False,
+ None,
+ 11,
+ 14.3,
+ long(11)
+ ]
+ for obj in objs:
+ rtn_id = db.tt.insert(aa=obj)
+ rtn = db(db.tt.id == rtn_id).select().first().aa
+ self.assertEqual(obj, rtn)
db.tt.drop()
db.define_table('tt', Field('aa', 'date',
default=datetime.date.today()))
@@ -551,9 +544,8 @@ class TestMinMaxSumAvg(unittest.TestCase):
db.tt.drop()
-class TestCache(unittest.TestCase):
+class TestCacheSelect(unittest.TestCase):
def testRun(self):
- from cache import CacheInRam
cache = CacheInRam()
db = DAL(DEFAULT_URI, check_reserved=['all'])
db.define_table('tt', Field('aa'))
@@ -1446,7 +1438,6 @@ class TestQuoting(unittest.TestCase):
db._adapter.types[key]=db._adapter.types[key].replace(
'%(on_delete_action)s','NO ACTION')
-
t0 = db.define_table('t0',
Field('f', 'string'))
t1 = db.define_table('b',
diff --git a/gluon/tests/test_dal_nosql.py b/gluon/tests/test_dal_nosql.py
index 0ac1506e..980ac3cc 100644
--- a/gluon/tests/test_dal_nosql.py
+++ b/gluon/tests/test_dal_nosql.py
@@ -19,34 +19,9 @@ try:
except:
from io import StringIO
+from fix_path import fix_sys_path
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
#for travis-ci
DEFAULT_URI = os.environ.get('DB', 'sqlite:memory')
diff --git a/gluon/tests/test_fileutils.py b/gluon/tests/test_fileutils.py
new file mode 100644
index 00000000..02b25421
--- /dev/null
+++ b/gluon/tests/test_fileutils.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import unittest
+import datetime
+from fix_path import fix_sys_path
+
+fix_sys_path(__file__)
+
+from fileutils import parse_version
+
+
+class TestFileUtils(unittest.TestCase):
+
+ def testParseVersion(self):
+ rtn = parse_version('Version 1.99.0-rc.1+timestamp.2011.09.19.08.23.26')
+ self.assertEqual(rtn, (1, 99, 0, 'rc.1', datetime.datetime(2011, 9, 19, 8, 23, 26)))
+ rtn = parse_version('Version 2.9.11-stable+timestamp.2014.09.15.18.31.17')
+ self.assertEqual(rtn, (2, 9, 11, 'stable', datetime.datetime(2014, 9, 15, 18, 31, 17)))
+ rtn = parse_version('Version 1.99.0 (2011-09-19 08:23:26)')
+ self.assertEqual(rtn, (1, 99, 0, 'dev', datetime.datetime(2011, 9, 19, 8, 23, 26)))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/gluon/tests/test_html.py b/gluon/tests/test_html.py
index bce172fb..bdc35558 100644
--- a/gluon/tests/test_html.py
+++ b/gluon/tests/test_html.py
@@ -5,37 +5,10 @@
Unit tests for gluon.html
"""
-import sys
-import os
import unittest
+from fix_path import fix_sys_path
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
from html import *
from storage import Storage
diff --git a/gluon/tests/test_http.py b/gluon/tests/test_http.py
index 5257b6fa..ee46dc48 100644
--- a/gluon/tests/test_http.py
+++ b/gluon/tests/test_http.py
@@ -3,38 +3,10 @@
"""Unit tests for http.py """
-import sys
-import os
import unittest
+from fix_path import fix_sys_path
-
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
from http import HTTP, defined_status
@@ -68,8 +40,5 @@ class TestHTTP(unittest.TestCase):
# test wrong call detection
-
-
-
if __name__ == '__main__':
unittest.main()
diff --git a/gluon/tests/test_is_url.py b/gluon/tests/test_is_url.py
index abdc6b3c..79448afb 100644
--- a/gluon/tests/test_is_url.py
+++ b/gluon/tests/test_is_url.py
@@ -4,42 +4,14 @@
Unit tests for IS_URL()
"""
-import sys
-import os
import unittest
+from fix_path import fix_sys_path
+
+fix_sys_path(__file__)
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
-
-
-from validators import IS_URL, IS_HTTP_URL, IS_GENERIC_URL, \
- unicode_to_ascii_authority
+from validators import IS_URL, IS_HTTP_URL, IS_GENERIC_URL
+from validators import unicode_to_ascii_authority
class TestIsUrl(unittest.TestCase):
diff --git a/gluon/tests/test_languages.py b/gluon/tests/test_languages.py
index 91bc3762..13f13a9d 100644
--- a/gluon/tests/test_languages.py
+++ b/gluon/tests/test_languages.py
@@ -7,38 +7,11 @@
import sys
import os
-import unittest
import tempfile
-import threading
-import logging
+import unittest
+from fix_path import fix_sys_path
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
#support skipif also in python 2.6
def _skipIf(cond, message=''):
@@ -55,7 +28,6 @@ else:
skipIf = _skipIf
import languages
-from storage import Storage
MP_WORKING = 0
try:
import multiprocessing
@@ -76,6 +48,7 @@ def read_write(args):
languages.write_dict(filename, content)
return True
+
class TestLanguagesParallel(unittest.TestCase):
def setUp(self):
@@ -91,7 +64,7 @@ class TestLanguagesParallel(unittest.TestCase):
os.remove(self.filename)
except:
pass
-
+
@skipIf(MP_WORKING == 0, 'multiprocessing tests unavailable')
def test_reads_and_writes(self):
readwriters = 10
@@ -99,7 +72,7 @@ class TestLanguagesParallel(unittest.TestCase):
results = pool.map(read_write, [[self.filename, 10]] * readwriters)
for result in results:
self.assertTrue(result)
-
+
@skipIf(MP_WORKING == 1, 'multiprocessing tests available')
def test_reads_and_writes_no_mp(self):
results = []
@@ -108,6 +81,7 @@ class TestLanguagesParallel(unittest.TestCase):
for result in results:
self.assertTrue(result)
+
class TestTranslations(unittest.TestCase):
def setUp(self):
@@ -144,7 +118,6 @@ class TestTranslations(unittest.TestCase):
T.force('it')
self.assertEqual(str(T('Hello World')),
'Salve Mondo')
-
+
if __name__ == '__main__':
unittest.main()
-
\ No newline at end of file
diff --git a/gluon/tests/test_old_doctests.py b/gluon/tests/test_old_doctests.py
index 7a2c7fcd..292c92b2 100644
--- a/gluon/tests/test_old_doctests.py
+++ b/gluon/tests/test_old_doctests.py
@@ -7,41 +7,15 @@
"""
import sys
import os
-import unittest
import doctest
+import unittest
+from fix_path import fix_sys_path
+fix_sys_path(__file__)
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
def load_tests(loader, tests, ignore):
-
tests.addTests(
doctest.DocTestSuite('html')
)
diff --git a/gluon/tests/test_storage.py b/gluon/tests/test_storage.py
index ea9a6698..e046506b 100644
--- a/gluon/tests/test_storage.py
+++ b/gluon/tests/test_storage.py
@@ -3,40 +3,14 @@
""" Unit tests for storage.py """
-import sys
-import os
import unittest
+from fix_path import fix_sys_path
+fix_sys_path(__file__)
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
-
-from storage import Storage
+from storage import Storage, StorageList, List
+from http import HTTP
+import pickle
class TestStorage(unittest.TestCase):
@@ -97,5 +71,70 @@ class TestStorage(unittest.TestCase):
self.assertEquals(s['a'], None)
self.assertTrue('a' in s)
+ def test_pickling(self):
+ """ Test storage pickling """
+ s = Storage(a=1)
+ sd = pickle.dumps(s, pickle.HIGHEST_PROTOCOL)
+ news = pickle.loads(sd)
+ self.assertEqual(news.a, 1)
+
+ def test_getlist(self):
+ # usually used with request.vars
+ a = Storage()
+ a.x = 'abc'
+ a.y = ['abc', 'def']
+ self.assertEqual(a.getlist('x'), ['abc'])
+ self.assertEqual(a.getlist('y'), ['abc', 'def'])
+ self.assertEqual(a.getlist('z'), [])
+
+ def test_getfirst(self):
+ # usually with request.vars
+ a = Storage()
+ a.x = 'abc'
+ a.y = ['abc', 'def']
+ self.assertEqual(a.getfirst('x'), 'abc')
+ self.assertEqual(a.getfirst('y'), 'abc')
+ self.assertEqual(a.getfirst('z'), None)
+
+ def test_getlast(self):
+ # usually with request.vars
+ a = Storage()
+ a.x = 'abc'
+ a.y = ['abc', 'def']
+ self.assertEqual(a.getlast('x'), 'abc')
+ self.assertEqual(a.getlast('y'), 'def')
+ self.assertEqual(a.getlast('z'), None)
+
+
+class TestStorageList(unittest.TestCase):
+ """ Tests storage.StorageList """
+
+ def test_attribute(self):
+ s = StorageList(a=1)
+
+ self.assertEqual(s.a, 1)
+ self.assertEqual(s['a'], 1)
+ self.assertEqual(s.b, [])
+ s.b.append(1)
+ self.assertEqual(s.b, [1])
+
+
+class TestList(unittest.TestCase):
+ """ Tests Storage.List (fast-check for request.args()) """
+
+ def test_listcall(self):
+ a = List((1, 2, 3))
+ self.assertEqual(a(1), 2)
+ self.assertEqual(a(-1), 3)
+ self.assertEqual(a(-5), None)
+ self.assertEqual(a(-5, default='x'), 'x')
+ self.assertEqual(a(-3, cast=str), '1')
+ a.append('1234')
+ self.assertEqual(a(3), '1234')
+ self.assertEqual(a(3, cast=int), 1234)
+ a.append('x')
+ self.assertRaises(HTTP, a, 4, cast=int)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/gluon/tests/test_template.py b/gluon/tests/test_template.py
index 85758339..021a30a1 100644
--- a/gluon/tests/test_template.py
+++ b/gluon/tests/test_template.py
@@ -4,38 +4,10 @@
Unit tests for gluon.template
"""
-import sys
-import os
import unittest
+from fix_path import fix_sys_path
-
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
from template import render
diff --git a/gluon/tests/test_utils.py b/gluon/tests/test_utils.py
index 77fd9e4e..3372a73d 100644
--- a/gluon/tests/test_utils.py
+++ b/gluon/tests/test_utils.py
@@ -3,38 +3,10 @@
""" Unit tests for utils.py """
-import sys
-import os
import unittest
+from fix_path import fix_sys_path
-
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
from utils import md5_hash
diff --git a/gluon/tests/test_validators.py b/gluon/tests/test_validators.py
index cbafa0ac..ef3ce7cc 100644
--- a/gluon/tests/test_validators.py
+++ b/gluon/tests/test_validators.py
@@ -3,43 +3,17 @@
"""Unit tests for http.py """
-import sys
-import os
import unittest
-
-
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
-
import datetime
import decimal
-from gluon.validators import *
import re
+from fix_path import fix_sys_path
+
+fix_sys_path(__file__)
+
+
+from gluon.validators import *
+
class TestValidators(unittest.TestCase):
@@ -114,6 +88,19 @@ class TestValidators(unittest.TestCase):
self.assertEqual(rtn, (datetime.date(2008, 3, 3), None))
rtn = v(datetime.date(2010,3,3))
self.assertEqual(rtn, (datetime.date(2010, 3, 3), 'oops'))
+ v = IS_DATE_IN_RANGE(maximum=datetime.date(2009,12,31),
+ format="%m/%d/%Y")
+ rtn = v('03/03/2010')
+ self.assertEqual(rtn, ('03/03/2010', 'Enter date on or before 12/31/2009'))
+ v = IS_DATE_IN_RANGE(minimum=datetime.date(2008,1,1),
+ format="%m/%d/%Y")
+ rtn = v('03/03/2007')
+ self.assertEqual(rtn, ('03/03/2007', 'Enter date on or after 01/01/2008'))
+ v = IS_DATE_IN_RANGE(minimum=datetime.date(2008,1,1),
+ maximum=datetime.date(2009,12,31),
+ format="%m/%d/%Y")
+ rtn = v('03/03/2007')
+ self.assertEqual(rtn, ('03/03/2007', 'Enter date in range 01/01/2008 12/31/2009'))
def test_IS_DATE(self):
v = IS_DATE(format="%m/%d/%Y",error_message="oops")
@@ -135,6 +122,19 @@ class TestValidators(unittest.TestCase):
self.assertEquals(rtn, (datetime.datetime(2008, 3, 3, 0, 0), None))
rtn = v(datetime.datetime(2010,3,3,0,0))
self.assertEquals(rtn, (datetime.datetime(2010, 3, 3, 0, 0), 'oops'))
+ v = IS_DATETIME_IN_RANGE(maximum=datetime.datetime(2009,12,31,12,20),
+ format='%m/%d/%Y %H:%M:%S')
+ rtn = v('03/03/2010 12:20:00')
+ self.assertEqual(rtn, ('03/03/2010 12:20:00', 'Enter date and time on or before 12/31/2009 12:20:00'))
+ v = IS_DATETIME_IN_RANGE(minimum=datetime.datetime(2008,1,1,12,20),
+ format='%m/%d/%Y %H:%M:%S')
+ rtn = v('03/03/2007 12:20:00')
+ self.assertEqual(rtn, ('03/03/2007 12:20:00', 'Enter date and time on or after 01/01/2008 12:20:00'))
+ v = IS_DATETIME_IN_RANGE(minimum=datetime.datetime(2008,1,1,12,20),
+ maximum=datetime.datetime(2009,12,31,12,20),
+ format='%m/%d/%Y %H:%M:%S')
+ rtn = v('03/03/2007 12:20:00')
+ self.assertEqual(rtn, ('03/03/2007 12:20:00', 'Enter date and time in range 01/01/2008 12:20:00 12/31/2009 12:20:00'))
def test_IS_DATETIME(self):
v = IS_DATETIME(format="%m/%d/%Y %H:%M",error_message="oops")
@@ -186,6 +186,8 @@ class TestValidators(unittest.TestCase):
self.assertEqual(rtn, ('6,5', 'Enter a number'))
rtn = IS_DECIMAL_IN_RANGE(dot=',')('6.5')
self.assertEqual(rtn, (decimal.Decimal('6.5'), None))
+ rtn = IS_DECIMAL_IN_RANGE(1,5)(decimal.Decimal('4'))
+ self.assertEqual(rtn, (decimal.Decimal('4'), None))
def test_IS_EMAIL(self):
rtn = IS_EMAIL()('a@b.com')
@@ -242,6 +244,16 @@ class TestValidators(unittest.TestCase):
self.assertEqual(rtn, ('Ima Fool@example.com', 'Enter a valid email address'))
rtn = IS_EMAIL()('localguy@localhost') # localhost as domain
self.assertEqual(rtn, ('localguy@localhost', None))
+ # test for banned
+ rtn = IS_EMAIL(banned='^.*\.com(|\..*)$')('localguy@localhost') # localhost as domain
+ self.assertEqual(rtn, ('localguy@localhost', None))
+ rtn = IS_EMAIL(banned='^.*\.com(|\..*)$')('abc@example.com')
+ self.assertEqual(rtn, ('abc@example.com', 'Enter a valid email address'))
+ # test for forced
+ rtn = IS_EMAIL(forced='^.*\.edu(|\..*)$')('localguy@localhost')
+ self.assertEqual(rtn, ('localguy@localhost', 'Enter a valid email address'))
+ rtn = IS_EMAIL(forced='^.*\.edu(|\..*)$')('localguy@example.edu')
+ self.assertEqual(rtn, ('localguy@example.edu', None))
def test_IS_LIST_OF_EMAILS(self):
emails = ['localguy@localhost', '_Yosemite.Sam@example.com']
@@ -255,6 +267,19 @@ class TestValidators(unittest.TestCase):
rtn = IS_LIST_OF_EMAILS()(';'.join(emails))
self.assertEqual(rtn, ('localguy@localhost;_Yosemite.Sam@example.com;a', 'Invalid emails: a'))
+ def test_IS_LIST_OF(self):
+ values = [0,1,2,3,4]
+ rtn = IS_LIST_OF(IS_INT_IN_RANGE(0, 10))(values)
+ self.assertEqual(rtn, (values, None))
+ values.append(11)
+ rtn = IS_LIST_OF(IS_INT_IN_RANGE(0, 10))(values)
+ self.assertEqual(rtn, (values, 'Enter an integer between 0 and 9'))
+ rtn = IS_LIST_OF(IS_INT_IN_RANGE(0, 10))(1)
+ self.assertEqual(rtn, ([1], None))
+ rtn = IS_LIST_OF(IS_INT_IN_RANGE(0, 10), minimum=10)([1,2])
+ self.assertEqual(rtn, ([1, 2], 'Enter between 10 and 100 values'))
+ rtn = IS_LIST_OF(IS_INT_IN_RANGE(0, 10), maximum=2)([1,2,3])
+ self.assertEqual(rtn, ([1, 2, 3], 'Enter between 0 and 2 values'))
def test_IS_EMPTY_OR(self):
rtn = IS_EMPTY_OR(IS_EMAIL())('abc@def.com')
@@ -515,8 +540,46 @@ class TestValidators(unittest.TestCase):
self.assertEqual(rtn, (None, 'Enter from 1 to 255 characters'))
rtn = IS_LENGTH(minsize=1)([])
self.assertEqual(rtn, ([], 'Enter from 1 to 255 characters'))
+ rtn = IS_LENGTH(minsize=1)([1, 2])
+ self.assertEqual(rtn, ([1, 2], None))
rtn = IS_LENGTH(minsize=1)([1])
self.assertEqual(rtn, ([1], None))
+ # test unicode
+ rtn = IS_LENGTH(2)(u'°2')
+ self.assertEqual(rtn, ('\xc2\xb02', None))
+ rtn = IS_LENGTH(2)(u'°12')
+ self.assertEqual(rtn, (u'\xb012', 'Enter from 0 to 2 characters'))
+ # test automatic str()
+ rtn = IS_LENGTH(minsize=1)(1)
+ self.assertEqual(rtn, ('1', None))
+ rtn = IS_LENGTH(minsize=2)(1)
+ self.assertEqual(rtn, (1, 'Enter from 2 to 255 characters'))
+ # test FieldStorage
+ import cgi
+ from StringIO import StringIO
+ a = cgi.FieldStorage()
+ a.file = StringIO('abc')
+ rtn = IS_LENGTH(minsize=4)(a)
+ self.assertEqual(rtn, (a, 'Enter from 4 to 255 characters'))
+ urlencode_data = "key2=value2x&key3=value3&key4=value4"
+ urlencode_environ = {
+ 'CONTENT_LENGTH': str(len(urlencode_data)),
+ 'CONTENT_TYPE': 'application/x-www-form-urlencoded',
+ 'QUERY_STRING': 'key1=value1&key2=value2y',
+ 'REQUEST_METHOD': 'POST',
+ }
+ fake_stdin = StringIO(urlencode_data)
+ fake_stdin.seek(0)
+ a = cgi.FieldStorage(fp=fake_stdin, environ=urlencode_environ)
+ rtn = IS_LENGTH(minsize=6)(a)
+ self.assertEqual(rtn, (a, 'Enter from 6 to 255 characters'))
+ a = cgi.FieldStorage()
+ rtn = IS_LENGTH(minsize=6)(a)
+ self.assertEqual(rtn, (a, 'Enter from 6 to 255 characters'))
+ rtn = IS_LENGTH(6)(a)
+ self.assertEqual(rtn, (a, None))
+
+
def test_IS_LOWER(self):
rtn = IS_LOWER()('ABC')
@@ -545,10 +608,14 @@ class TestValidators(unittest.TestCase):
self.assertEqual(rtn, ('hellas', 'Invalid expression'))
rtn = IS_MATCH('hell$', strict=True)('hellas')
self.assertEqual(rtn, ('hellas', 'Invalid expression'))
+ rtn = IS_MATCH('^.hell$', strict=True)('shell')
+ self.assertEqual(rtn, ('shell', None))
rtn = IS_MATCH(u'hell', is_unicode=True)('àòè')
self.assertEqual(rtn, ('\xc3\xa0\xc3\xb2\xc3\xa8', 'Invalid expression'))
rtn = IS_MATCH(u'hell', is_unicode=True)(u'hell')
self.assertEqual(rtn, (u'hell', None))
+ rtn = IS_MATCH('hell', is_unicode=True)(u'hell')
+ self.assertEqual(rtn, (u'hell', None))
def test_IS_EQUAL_TO(self):
@@ -718,6 +785,57 @@ class TestValidators(unittest.TestCase):
rtn = IS_JSON()('spam1234')
self.assertEqual(rtn, ('spam1234', 'Invalid json'))
+ def test_IS_UPLOAD_FILENAME(self):
+ import cgi
+ from StringIO import StringIO
+ def gen_fake(filename):
+ formdata_file_data = """
+---123
+Content-Disposition: form-data; name="key2"
+
+value2y
+---123
+Content-Disposition: form-data; name="file_attach"; filename="%s"
+Content-Type: text/plain
+
+this is the content of the fake file
+
+---123--
+""" % filename
+ formdata_file_environ = {
+ 'CONTENT_LENGTH': str(len(formdata_file_data)),
+ 'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
+ 'QUERY_STRING': 'key1=value1&key2=value2x',
+ 'REQUEST_METHOD': 'POST',
+ }
+ return cgi.FieldStorage(fp=StringIO(formdata_file_data), environ=formdata_file_environ)['file_attach']
+
+ fake = gen_fake('example.pdf')
+ rtn = IS_UPLOAD_FILENAME(extension='pdf')(fake)
+ self.assertEqual(rtn, (fake, None))
+ fake = gen_fake('example.gif')
+ rtn = IS_UPLOAD_FILENAME(extension='pdf')(fake)
+ self.assertEqual(rtn, (fake, 'Enter valid filename'))
+ fake = gen_fake('backup2014.tar.gz')
+ rtn = IS_UPLOAD_FILENAME(filename='backup.*', extension='tar.gz', lastdot=False)(fake)
+ self.assertEqual(rtn, (fake, None))
+ fake = gen_fake('README')
+ rtn = IS_UPLOAD_FILENAME(filename='^README$', extension='^$', case=0)(fake)
+ self.assertEqual(rtn, (fake, None))
+ fake = gen_fake('readme')
+ rtn = IS_UPLOAD_FILENAME(filename='^README$', extension='^$', case=0)(fake)
+ self.assertEqual(rtn, (fake, 'Enter valid filename'))
+ fake = gen_fake('readme')
+ rtn = IS_UPLOAD_FILENAME(filename='README', case=2)(fake)
+ self.assertEqual(rtn, (fake, None))
+ fake = gen_fake('README')
+ rtn = IS_UPLOAD_FILENAME(filename='README', case=2)(fake)
+ self.assertEqual(rtn, (fake, None))
+ rtn = IS_UPLOAD_FILENAME(extension='pdf')('example.pdf')
+ self.assertEqual(rtn, ('example.pdf', 'Enter valid filename'))
+
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/gluon/tests/test_web.py b/gluon/tests/test_web.py
index e39011cf..97ccc542 100644
--- a/gluon/tests/test_web.py
+++ b/gluon/tests/test_web.py
@@ -13,34 +13,9 @@ import subprocess
import time
import signal
+from fix_path import fix_sys_path
-def fix_sys_path():
- """
- logic to have always the correct sys.path
- '', web2py/gluon, web2py/site-packages, web2py/ ...
- """
-
- def add_path_first(path):
- sys.path = [path] + [p for p in sys.path if (
- not p == path and not p == (path + '/'))]
-
- path = os.path.dirname(os.path.abspath(__file__))
-
- if not os.path.isfile(os.path.join(path,'web2py.py')):
- i = 0
- while i<10:
- i += 1
- if os.path.exists(os.path.join(path,'web2py.py')):
- break
- path = os.path.abspath(os.path.join(path, '..'))
-
- paths = [path,
- os.path.abspath(os.path.join(path, 'site-packages')),
- os.path.abspath(os.path.join(path, 'gluon')),
- '']
- [add_path_first(path) for path in paths]
-
-fix_sys_path()
+fix_sys_path(__file__)
from contrib.webclient import WebClient
from urllib2 import HTTPError
diff --git a/gluon/tools.py b/gluon/tools.py
index ecd342ad..a5ec6ed7 100644
--- a/gluon/tools.py
+++ b/gluon/tools.py
@@ -11,7 +11,10 @@ Auth, Mail, PluginManager and various utilities
"""
import base64
-import cPickle
+try:
+ import cPickle as pickle
+except:
+ import pickle
import datetime
import thread
import logging
@@ -2710,7 +2713,8 @@ class Auth(object):
extra_fields = [
Field("password_two", "password", requires=IS_EQUAL_TO(
request.post_vars.get(passfield,None),
- error_message=self.messages.mismatched_password))]
+ error_message=self.messages.mismatched_password),
+ label=current.T("Confirm Password"))]
else:
extra_fields = []
form = SQLFORM(table_user,
@@ -3187,11 +3191,14 @@ class Auth(object):
if log is DEFAULT:
log = self.messages['change_password_log']
passfield = self.settings.password_field
- is_crypt = copy.copy([t for t in table_user[passfield].requires
- if isinstance(t,CRYPT)][0])
- is_crypt.min_length = 0
+ requires = table_user[passfield].requires
+ if not isinstance(requires,(list, tuple)):
+ requires = [requires]
+ requires = filter(lambda t:isinstance(t,CRYPT), requires)
+ if requires:
+ requires[0].min_length = 0
form = SQLFORM.factory(
- Field('old_password', 'password', requires=[is_crypt],
+ Field('old_password', 'password', requires=requires,
label=self.messages.old_password),
Field('new_password', 'password',
label=self.messages.new_password,
@@ -3326,7 +3333,7 @@ class Auth(object):
user = table_user(user_id)
if not user:
raise HTTP(401, "Not Authorized")
- auth.impersonator = cPickle.dumps(session)
+ auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL)
auth.user.update(
table_user._filter_fields(user, True))
self.user = auth.user
@@ -3337,7 +3344,7 @@ class Auth(object):
elif user_id in (0, '0'):
if self.is_impersonating():
session.clear()
- session.update(cPickle.loads(auth.impersonator))
+ session.update(pickle.loads(auth.impersonator))
self.user = session.auth.user
self.update_groups()
self.run_login_onaccept()
diff --git a/gluon/utils.py b/gluon/utils.py
index 60a4ff02..914cb0cf 100644
--- a/gluon/utils.py
+++ b/gluon/utils.py
@@ -23,7 +23,6 @@ import logging
import socket
import base64
import zlib
-import types
_struct_2_long_long = struct.Struct('=QQ')
@@ -160,7 +159,7 @@ def pad(s, n=32, padchar=' '):
def secure_dumps(data, encryption_key, hash_key=None, compression_level=None):
if not hash_key:
hash_key = sha1(encryption_key).hexdigest()
- dump = pickle.dumps(data)
+ dump = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
if compression_level:
dump = zlib.compress(dump, compression_level)
key = pad(encryption_key[:32])
diff --git a/gluon/validators.py b/gluon/validators.py
index 4e35c6ba..f98c6c0c 100644
--- a/gluon/validators.py
+++ b/gluon/validators.py
@@ -372,14 +372,18 @@ class IS_JSON(Validator):
if self.native_json:
simplejson.loads(value) # raises error in case of malformed json
return (value, None) # the serialized value is not passed
- return (simplejson.loads(value), None)
+ else:
+ return (simplejson.loads(value), None)
except JSONErrors:
return (value, translate(self.error_message))
def formatter(self,value):
if value is None:
return None
- return simplejson.dumps(value)
+ if self.native_json:
+ return value
+ else:
+ return simplejson.dumps(value)
class IS_IN_SET(Validator):
@@ -1164,11 +1168,8 @@ class IS_LIST_OF_EMAILS(object):
def __call__(self, value):
bad_emails = []
- emails = []
f = IS_EMAIL()
for email in self.split_emails.findall(value):
- if not email in emails:
- emails.append(email)
error = f(email)[1]
if error and not email in bad_emails:
bad_emails.append(email)
@@ -2516,7 +2517,7 @@ class IS_LIST_OF(Validator):
if not isinstance(other, (list,tuple)):
other = [other]
for item in ivalue:
- if item.strip():
+ if str(item).strip():
v = item
for validator in other:
(v, e) = validator(v)
diff --git a/scripts/sessions2trash.py b/scripts/sessions2trash.py
index 6e11b2a0..e07e731b 100755
--- a/scripts/sessions2trash.py
+++ b/scripts/sessions2trash.py
@@ -29,6 +29,12 @@ Typical usage:
"""
from __future__ import with_statement
+
+import sys
+import os
+print os.path.join(*__file__.split(os.sep)[:-2] or ['.'])
+sys.path.append(os.path.join(*__file__.split(os.sep)[:-2] or ['.']))
+
from gluon import current
from gluon.storage import Storage
from optparse import OptionParser
@@ -37,6 +43,7 @@ import datetime
import os
import stat
import time
+import glob
EXPIRATION_MINUTES = 60
SLEEP_MINUTES = 5
@@ -157,6 +164,9 @@ class SessionFile(object):
def delete(self):
try:
os.unlink(self.filename)
+ path = os.path.dirname(filename)
+ if not path.endswith('sessions') and len(os.listdir(path))==0:
+ os.rmdir(path)
except:
pass
@@ -191,10 +201,11 @@ def single_loop(expiration=None, force=False, verbose=False):
except:
expiration = EXPIRATION_MINUTES * 60
- set_db = SessionSetDb(expiration, force, verbose)
set_files = SessionSetFiles(expiration, force, verbose)
- set_db.trash()
set_files.trash()
+ set_db = SessionSetDb(expiration, force, verbose)
+ set_db.trash()
+
def main():
"""Main processing."""