Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b50cf27e2 | ||
|
|
a38c7c2aa8 | ||
|
|
f0461ae84a | ||
|
|
0120ad5b95 | ||
|
|
e38565345c | ||
|
|
b1180f483c | ||
|
|
05a92e86f7 | ||
|
|
75f2b90151 | ||
|
|
6a75132d54 | ||
|
|
3d9ebb5720 | ||
|
|
ec62375f6d | ||
|
|
f1ac5126a1 | ||
|
|
e4e9038ab6 | ||
|
|
ce897b3569 | ||
|
|
c4a4ee27c4 | ||
|
|
ecc6e5dc5e | ||
|
|
b991f2926f | ||
|
|
da195b787d | ||
|
|
d81f4606db | ||
|
|
d29a108ad7 | ||
|
|
94a0b1aec5 | ||
|
|
0a571338b6 | ||
|
|
c2b1547802 | ||
|
|
cd2920c2cf | ||
|
|
0863bb3eca | ||
|
|
04c1064ab8 | ||
|
|
5c9796ac72 | ||
|
|
cef7202781 | ||
|
|
c3478f660a | ||
|
|
09a44d41ae | ||
|
|
4740dc7cce | ||
|
|
463c5deac4 | ||
|
|
6403878f95 | ||
|
|
bd87879cde | ||
|
|
61d5a776d4 | ||
|
|
c3cd32cc4f | ||
|
|
b98687239c |
@@ -1,4 +1,4 @@
|
||||
## 2.9.6 - 2.9.8
|
||||
## 2.9.6 - 2.9.10
|
||||
|
||||
- fixed support of GAE + SQL
|
||||
- fixed a typo in the license of some login_methods code. It is now LGPL consistently with the rest of the web2py code. This change applied to all previous web2py versions.
|
||||
@@ -6,7 +6,7 @@
|
||||
- Sphinx documentation (thanks Niphlod)
|
||||
- improved scheduler (thanks Niphlod)
|
||||
- increased security
|
||||
- better cache.dick (thanks Leonel)
|
||||
- better cache.disk (thanks Leonel)
|
||||
- sessions are stored in subfolders for speed
|
||||
- postgres support for "INSERT ... RETURING ..."
|
||||
- ldap support for Certificate Authority (thanks Maggs and Shane)
|
||||
@@ -15,6 +15,8 @@
|
||||
- support for Collection+JSON Hypermedia API (RESTful self documenting API)
|
||||
- jQuery 1.11
|
||||
- codemirror 4.0.3
|
||||
- markdown2 2.2.3
|
||||
- memcache 1.53
|
||||
- support for the new janrain API
|
||||
- new "web2py.py -G config" to make GAE configuration easier
|
||||
- many small bug fixes
|
||||
|
||||
2
Makefile
2
Makefile
@@ -30,7 +30,7 @@ update:
|
||||
echo "remember that pymysql was tweaked"
|
||||
src:
|
||||
### Use semantic versioning
|
||||
echo 'Version 2.9.9-stable+timestamp.'`date +%Y.%m.%d.%H.%M.%S` > VERSION
|
||||
echo 'Version 2.9.10-stable+timestamp.'`date +%Y.%m.%d.%H.%M.%S` > VERSION
|
||||
### rm -f all junk files
|
||||
make clean
|
||||
### clean up baisc apps
|
||||
|
||||
@@ -39,11 +39,10 @@ That's it!!!
|
||||
VERSION > this web2py version
|
||||
web2py.py > the startup script
|
||||
anyserver.py > to run with third party servers
|
||||
wsgihandler.py > handler to connect to WSGI
|
||||
... > other handlers and example files
|
||||
gluon/ > the core libraries
|
||||
contrib/ > third party libraries
|
||||
tests/ > unittests
|
||||
tests/ > unittests
|
||||
applications/ > are the apps
|
||||
admin/ > web based IDE
|
||||
...
|
||||
@@ -67,6 +66,9 @@ That's it!!!
|
||||
examples/ > example config files, mv .. and customize
|
||||
extras/ > other files which are required for building web2py
|
||||
scripts/ > utility and installation scripts
|
||||
handlers/
|
||||
wsgihandler.py > handler to connect to WSGI
|
||||
... > handlers for Fast-CGI, SCGI, Gevent, etc
|
||||
site-packages/ > additional optional modules
|
||||
logs/ > log files will go in there
|
||||
deposit/ > a place where web2py stores apps temporarily
|
||||
|
||||
2
VERSION
2
VERSION
@@ -1 +1 @@
|
||||
Version 2.9.9-stable+timestamp.2014.09.08.08.12.34
|
||||
Version 2.9.10-stable+timestamp.2014.09.15.08.05.10
|
||||
|
||||
@@ -582,7 +582,7 @@ def bg_graph_model():
|
||||
if hasattr(db[tablename],'_meta_graphmodel'):
|
||||
meta_graphmodel = db[tablename]._meta_graphmodel
|
||||
else:
|
||||
meta_graphmodel = dict(group='Undefined', color='#ECECEC')
|
||||
meta_graphmodel = dict(group=request.application, color='#ECECEC')
|
||||
|
||||
group = meta_graphmodel['group'].replace(' ', '')
|
||||
if not subgraphs.has_key(group):
|
||||
|
||||
9
applications/admin/static/js/jquery.js
vendored
9
applications/admin/static/js/jquery.js
vendored
File diff suppressed because one or more lines are too long
@@ -582,7 +582,7 @@ def bg_graph_model():
|
||||
if hasattr(db[tablename],'_meta_graphmodel'):
|
||||
meta_graphmodel = db[tablename]._meta_graphmodel
|
||||
else:
|
||||
meta_graphmodel = dict(group='Undefined', color='#ECECEC')
|
||||
meta_graphmodel = dict(group=request.application, color='#ECECEC')
|
||||
|
||||
group = meta_graphmodel['group'].replace(' ', '')
|
||||
if not subgraphs.has_key(group):
|
||||
|
||||
9
applications/examples/static/js/jquery.js
vendored
9
applications/examples/static/js/jquery.js
vendored
File diff suppressed because one or more lines are too long
@@ -582,7 +582,7 @@ def bg_graph_model():
|
||||
if hasattr(db[tablename],'_meta_graphmodel'):
|
||||
meta_graphmodel = db[tablename]._meta_graphmodel
|
||||
else:
|
||||
meta_graphmodel = dict(group='Undefined', color='#ECECEC')
|
||||
meta_graphmodel = dict(group=request.application, color='#ECECEC')
|
||||
|
||||
group = meta_graphmodel['group'].replace(' ', '')
|
||||
if not subgraphs.has_key(group):
|
||||
|
||||
8
applications/welcome/static/js/jquery.js
vendored
8
applications/welcome/static/js/jquery.js
vendored
File diff suppressed because one or more lines are too long
@@ -20,7 +20,6 @@ caching will be provided by the GAE memcache
|
||||
(see gluon.contrib.gae_memcache)
|
||||
"""
|
||||
import time
|
||||
import shutil
|
||||
import thread
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
# fix response
|
||||
|
||||
import re
|
||||
import os
|
||||
import cPickle
|
||||
import gluon.serializers
|
||||
from gluon import current, HTTP
|
||||
from gluon.html import markmin_serializer, TAG, HTML, BODY, UL, XML, H1
|
||||
from gluon.contenttype import contenttype
|
||||
from gluon.contrib.fpdf import FPDF, HTMLMixin
|
||||
from gluon.sanitizer import sanitize
|
||||
from gluon.contrib.markmin.markmin2latex import markmin2latex
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@
|
||||
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Copyright (c) 2013 Henry Zhou <jiangwen365@gmail.com> and PyPyODBC contributors
|
||||
# Copyright (c) 2014 Henry Zhou <jiangwen365@gmail.com> and PyPyODBC contributors
|
||||
# Copyright (c) 2004 Michele Petrazzo
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
||||
@@ -16,7 +16,7 @@
|
||||
# of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
|
||||
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO #EVENT SHALL THE
|
||||
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
|
||||
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
# DEALINGS IN THE SOFTWARE.
|
||||
@@ -25,7 +25,7 @@ pooling = True
|
||||
apilevel = '2.0'
|
||||
paramstyle = 'qmark'
|
||||
threadsafety = 1
|
||||
version = '1.2.0'
|
||||
version = '1.3.0'
|
||||
lowercase=True
|
||||
|
||||
DEBUG = 0
|
||||
@@ -592,38 +592,38 @@ if sys.platform == 'cli':
|
||||
# http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.sdk_12.5.1.aseodbc/html/aseodbc/CACFDIGH.htm
|
||||
|
||||
SQL_data_type_dict = { \
|
||||
#SQL Data TYPE 0.Python Data Type 1.Default Output Converter 2.Buffer Type 3.Buffer Allocator 4.Default Buffer Size
|
||||
SQL_TYPE_NULL : (None, lambda x: None, SQL_C_CHAR, create_buffer, 2 ),
|
||||
SQL_CHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
|
||||
SQL_NUMERIC : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_DECIMAL : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_INTEGER : (int, int, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_SMALLINT : (int, int, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_FLOAT : (float, float, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_REAL : (float, float, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_DOUBLE : (float, float, SQL_C_CHAR, create_buffer, 200 ),
|
||||
SQL_DATE : (datetime.date, dt_cvt, SQL_C_CHAR , create_buffer, 30 ),
|
||||
SQL_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
|
||||
SQL_SS_TIME2 : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
|
||||
SQL_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 ),
|
||||
SQL_VARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
|
||||
SQL_LONGVARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 20500 ),
|
||||
SQL_BINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 ),
|
||||
SQL_VARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 ),
|
||||
SQL_LONGVARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 20500 ),
|
||||
SQL_BIGINT : (long, long, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_TINYINT : (int, int, SQL_C_CHAR, create_buffer, 150 ),
|
||||
SQL_BIT : (bool, lambda x:x == BYTE_1, SQL_C_CHAR, create_buffer, 2 ),
|
||||
SQL_WCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 ),
|
||||
SQL_WVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 ),
|
||||
SQL_GUID : (str, str, SQL_C_CHAR, create_buffer, 50 ),
|
||||
SQL_WLONGVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 ),
|
||||
SQL_TYPE_DATE : (datetime.date, dt_cvt, SQL_C_CHAR, create_buffer, 30 ),
|
||||
SQL_TYPE_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 ),
|
||||
SQL_TYPE_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 ),
|
||||
SQL_SS_VARIANT : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 ),
|
||||
SQL_SS_XML : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 ),
|
||||
SQL_SS_UDT : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 ),
|
||||
#SQL Data TYPE 0.Python Data Type 1.Default Output Converter 2.Buffer Type 3.Buffer Allocator 4.Default Size 5.Variable Length
|
||||
SQL_TYPE_NULL : (None, lambda x: None, SQL_C_CHAR, create_buffer, 2 , False ),
|
||||
SQL_CHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 , False ),
|
||||
SQL_NUMERIC : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_DECIMAL : (Decimal, Decimal_cvt, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_INTEGER : (int, int, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_SMALLINT : (int, int, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_FLOAT : (float, float, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_REAL : (float, float, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_DOUBLE : (float, float, SQL_C_CHAR, create_buffer, 200 , False ),
|
||||
SQL_DATE : (datetime.date, dt_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
|
||||
SQL_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 , False ),
|
||||
SQL_SS_TIME2 : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 , False ),
|
||||
SQL_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
|
||||
SQL_VARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 , False ),
|
||||
SQL_LONGVARCHAR : (str, lambda x: x, SQL_C_CHAR, create_buffer, 20500 , True ),
|
||||
SQL_BINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 , True ),
|
||||
SQL_VARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 , True ),
|
||||
SQL_LONGVARBINARY : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 20500 , True ),
|
||||
SQL_BIGINT : (long, long, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_TINYINT : (int, int, SQL_C_CHAR, create_buffer, 150 , False ),
|
||||
SQL_BIT : (bool, lambda x:x == BYTE_1, SQL_C_CHAR, create_buffer, 2 , False ),
|
||||
SQL_WCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 , False ),
|
||||
SQL_WVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 2048 , False ),
|
||||
SQL_GUID : (str, str, SQL_C_CHAR, create_buffer, 2048 , False ),
|
||||
SQL_WLONGVARCHAR : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 , True ),
|
||||
SQL_TYPE_DATE : (datetime.date, dt_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
|
||||
SQL_TYPE_TIME : (datetime.time, tm_cvt, SQL_C_CHAR, create_buffer, 20 , False ),
|
||||
SQL_TYPE_TIMESTAMP : (datetime.datetime, dttm_cvt, SQL_C_CHAR, create_buffer, 30 , False ),
|
||||
SQL_SS_VARIANT : (str, lambda x: x, SQL_C_CHAR, create_buffer, 2048 , True ),
|
||||
SQL_SS_XML : (unicode, lambda x: x, SQL_C_WCHAR, create_buffer_u, 20500 , True ),
|
||||
SQL_SS_UDT : (bytearray, bytearray_cvt, SQL_C_BINARY, create_buffer, 5120 , True ),
|
||||
}
|
||||
|
||||
|
||||
@@ -645,6 +645,7 @@ SQLRETURN -> ctypes.c_short
|
||||
funcs_with_ret = [
|
||||
"SQLAllocHandle",
|
||||
"SQLBindParameter",
|
||||
"SQLBindCol",
|
||||
"SQLCloseCursor",
|
||||
"SQLColAttribute",
|
||||
"SQLColumns",
|
||||
@@ -1175,7 +1176,9 @@ class Cursor:
|
||||
"""prepare a query"""
|
||||
|
||||
#self._free_results(FREE_STATEMENT)
|
||||
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
if type(query_string) == unicode:
|
||||
c_query_string = wchar_pointer(UCS_buf(query_string))
|
||||
ret = ODBC_API.SQLPrepareW(self.stmt_h, c_query_string, len(query_string))
|
||||
@@ -1231,6 +1234,8 @@ class Cursor:
|
||||
def _BindParams(self, param_types, pram_io_list = []):
|
||||
"""Create parameter buffers based on param types, and bind them to the statement"""
|
||||
# Clear the old Parameters
|
||||
if not self.connection:
|
||||
self.close()
|
||||
#self._free_results(NO_FREE_STATEMENT)
|
||||
|
||||
# Get the number of query parameters judged by database.
|
||||
@@ -1414,7 +1419,7 @@ class Cursor:
|
||||
dec_num, ADDR(ParameterBuffer), BufferLen,ADDR(LenOrIndBuf))
|
||||
if ret != SQL_SUCCESS:
|
||||
check_success(self, ret)
|
||||
# Append the value buffer and the lenth buffer to the array
|
||||
# Append the value buffer and the length buffer to the array
|
||||
ParamBufferList.append((ParameterBuffer,LenOrIndBuf,sql_type))
|
||||
|
||||
self._last_param_types = param_types
|
||||
@@ -1426,6 +1431,9 @@ class Cursor:
|
||||
If parameters are provided, the query would first be prepared, then executed with parameters;
|
||||
If parameters are not provided, only th query sting, it would be executed directly
|
||||
"""
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
self._free_stmt(SQL_CLOSE)
|
||||
if params:
|
||||
# If parameters exist, first prepare the query then executed with parameters
|
||||
@@ -1549,7 +1557,7 @@ class Cursor:
|
||||
c_buf_len = len(c_char_buf)
|
||||
|
||||
elif param_types[col_num][0] == 'bi':
|
||||
c_char_buf = str(param_val)
|
||||
c_char_buf = str_8b(param_val)
|
||||
c_buf_len = len(c_char_buf)
|
||||
|
||||
else:
|
||||
@@ -1587,6 +1595,8 @@ class Cursor:
|
||||
|
||||
|
||||
def _SQLExecute(self):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
ret = SQLExecute(self.stmt_h)
|
||||
if ret != SQL_SUCCESS:
|
||||
check_success(self, ret)
|
||||
@@ -1594,6 +1604,9 @@ class Cursor:
|
||||
|
||||
def execdirect(self, query_string):
|
||||
"""Execute a query directly"""
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
self._free_stmt()
|
||||
self._last_param_types = None
|
||||
self.statement = None
|
||||
@@ -1611,6 +1624,8 @@ class Cursor:
|
||||
|
||||
|
||||
def callproc(self, procname, args):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
raise Warning('', 'Still not fully implemented')
|
||||
self._pram_io_list = [row[4] for row in self.procedurecolumns(procedure = procname).fetchall() if row[4] not in (SQL_RESULT_COL, SQL_RETURN_VALUE)]
|
||||
|
||||
@@ -1637,6 +1652,9 @@ class Cursor:
|
||||
|
||||
|
||||
def executemany(self, query_string, params_list = [None]):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
for params in params_list:
|
||||
self.execute(query_string, params, many_mode = True)
|
||||
self._NumOfRows()
|
||||
@@ -1647,28 +1665,38 @@ class Cursor:
|
||||
|
||||
|
||||
def _CreateColBuf(self):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
self._free_stmt(SQL_UNBIND)
|
||||
NOC = self._NumOfCols()
|
||||
self._ColBufferList = []
|
||||
bind_data = True
|
||||
for col_num in range(NOC):
|
||||
col_name = self.description[col_num][0]
|
||||
|
||||
col_sql_data_type = self._ColTypeCodeList[col_num]
|
||||
col_name = self.description[col_num][0]
|
||||
col_size = self.description[col_num][2]
|
||||
col_sql_data_type = self._ColTypeCodeList[col_num]
|
||||
|
||||
target_type = SQL_data_type_dict[col_sql_data_type][2]
|
||||
dynamic_length = SQL_data_type_dict[col_sql_data_type][5]
|
||||
# set default size base on the column's sql data type
|
||||
total_buf_len = SQL_data_type_dict[col_sql_data_type][4]
|
||||
# over-write if there's preset size value for "large columns"
|
||||
if total_buf_len >= 20500:
|
||||
|
||||
# over-write if there's pre-set size value for "large columns"
|
||||
if total_buf_len > 20500:
|
||||
total_buf_len = self._outputsize.get(None,total_buf_len)
|
||||
# over-write if there's preset size value for the "col_num" column
|
||||
# over-write if there's pre-set size value for the "col_num" column
|
||||
total_buf_len = self._outputsize.get(col_num, total_buf_len)
|
||||
|
||||
# if the size of the buffer is very long, do not bind
|
||||
# because a large buffer decrease performance, and sometimes you only get a NULL value.
|
||||
# in that case use sqlgetdata instead.
|
||||
if col_size >= 1024:
|
||||
dynamic_length = True
|
||||
|
||||
alloc_buffer = SQL_data_type_dict[col_sql_data_type][3](total_buf_len)
|
||||
|
||||
used_buf_len = c_ssize_t()
|
||||
|
||||
target_type = SQL_data_type_dict[col_sql_data_type][2]
|
||||
force_unicode = self.connection.unicode_results
|
||||
|
||||
if force_unicode and col_sql_data_type in (SQL_CHAR,SQL_VARCHAR,SQL_LONGVARCHAR):
|
||||
@@ -1676,14 +1704,22 @@ class Cursor:
|
||||
alloc_buffer = create_buffer_u(total_buf_len)
|
||||
|
||||
buf_cvt_func = self.connection.output_converter[self._ColTypeCodeList[col_num]]
|
||||
ADDR(alloc_buffer)
|
||||
ADDR(used_buf_len)
|
||||
self._ColBufferList.append([col_name, target_type, used_buf_len, ADDR(used_buf_len), alloc_buffer, ADDR(alloc_buffer), total_buf_len, buf_cvt_func])
|
||||
|
||||
|
||||
|
||||
if bind_data:
|
||||
if dynamic_length:
|
||||
bind_data = False
|
||||
self._ColBufferList.append([col_name, target_type, used_buf_len, ADDR(used_buf_len), alloc_buffer, ADDR(alloc_buffer), total_buf_len, buf_cvt_func, bind_data])
|
||||
|
||||
if bind_data:
|
||||
ret = ODBC_API.SQLBindCol(self.stmt_h, col_num + 1, target_type, ADDR(alloc_buffer), total_buf_len, ADDR(used_buf_len))
|
||||
if ret != SQL_SUCCESS:
|
||||
check_success(self, ret)
|
||||
|
||||
def _UpdateDesc(self):
|
||||
"Get the information of (name, type_code, display_size, internal_size, col_precision, scale, null_ok)"
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
force_unicode = self.connection.unicode_results
|
||||
if force_unicode:
|
||||
Cname = create_buffer_u(1024)
|
||||
@@ -1739,6 +1775,9 @@ class Cursor:
|
||||
|
||||
def _NumOfRows(self):
|
||||
"""Get the number of rows"""
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
NOR = c_ssize_t()
|
||||
ret = SQLRowCount(self.stmt_h, ADDR(NOR))
|
||||
if ret != SQL_SUCCESS:
|
||||
@@ -1749,6 +1788,9 @@ class Cursor:
|
||||
|
||||
def _NumOfCols(self):
|
||||
"""Get the number of cols"""
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
NOC = c_short()
|
||||
ret = SQLNumResultCols(self.stmt_h, ADDR(NOC))
|
||||
if ret != SQL_SUCCESS:
|
||||
@@ -1757,6 +1799,9 @@ class Cursor:
|
||||
|
||||
|
||||
def fetchall(self):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
rows = []
|
||||
while True:
|
||||
row = self.fetchone()
|
||||
@@ -1767,6 +1812,9 @@ class Cursor:
|
||||
|
||||
|
||||
def fetchmany(self, num = None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
if num is None:
|
||||
num = self.arraysize
|
||||
rows = []
|
||||
@@ -1780,74 +1828,83 @@ class Cursor:
|
||||
|
||||
|
||||
def fetchone(self):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
ret = SQLFetch(self.stmt_h)
|
||||
if ret == SQL_SUCCESS:
|
||||
|
||||
if ret in (SQL_SUCCESS,SQL_SUCCESS_WITH_INFO):
|
||||
'''Bind buffers for the record set columns'''
|
||||
|
||||
value_list = []
|
||||
col_num = 1
|
||||
for col_name, target_type, used_buf_len, ADDR_used_buf_len, alloc_buffer, ADDR_alloc_buffer, total_buf_len, buf_cvt_func in self._ColBufferList:
|
||||
|
||||
blocks = []
|
||||
for col_name, target_type, used_buf_len, ADDR_used_buf_len, alloc_buffer, ADDR_alloc_buffer, total_buf_len, buf_cvt_func, bind_data in self._ColBufferList:
|
||||
raw_data_parts = []
|
||||
while 1:
|
||||
ret = SQLGetData(self.stmt_h, col_num, target_type, ADDR_alloc_buffer, total_buf_len, ADDR_used_buf_len)
|
||||
if bind_data:
|
||||
ret = SQL_SUCCESS
|
||||
else:
|
||||
ret = SQLGetData(self.stmt_h, col_num, target_type, ADDR_alloc_buffer, total_buf_len, ADDR_used_buf_len)
|
||||
if ret == SQL_SUCCESS:
|
||||
if used_buf_len.value == SQL_NULL_DATA:
|
||||
value_list.append(None)
|
||||
else:
|
||||
if blocks == []:
|
||||
if raw_data_parts == []:
|
||||
# Means no previous data, no need to combine
|
||||
if target_type == SQL_C_BINARY:
|
||||
value_list.append(buf_cvt_func(alloc_buffer.raw[:used_buf_len.value]))
|
||||
elif target_type == SQL_C_WCHAR:
|
||||
value_list.append(buf_cvt_func(from_buffer_u(alloc_buffer)))
|
||||
else:
|
||||
#print col_name, target_type, alloc_buffer.value
|
||||
value_list.append(buf_cvt_func(alloc_buffer.value))
|
||||
else:
|
||||
# There are previous fetched raw data to combine
|
||||
if target_type == SQL_C_BINARY:
|
||||
blocks.append(alloc_buffer.raw[:used_buf_len.value])
|
||||
raw_data_parts.append(alloc_buffer.raw[:used_buf_len.value])
|
||||
elif target_type == SQL_C_WCHAR:
|
||||
blocks.append(from_buffer_u(alloc_buffer))
|
||||
raw_data_parts.append(from_buffer_u(alloc_buffer))
|
||||
else:
|
||||
#print col_name, target_type, alloc_buffer.value
|
||||
blocks.append(alloc_buffer.value)
|
||||
raw_data_parts.append(alloc_buffer.value)
|
||||
break
|
||||
|
||||
elif ret == SQL_SUCCESS_WITH_INFO:
|
||||
# Means the data is only partial
|
||||
if target_type == SQL_C_BINARY:
|
||||
blocks.append(alloc_buffer.raw)
|
||||
raw_data_parts.append(alloc_buffer.raw)
|
||||
else:
|
||||
blocks.append(alloc_buffer.value)
|
||||
raw_data_parts.append(alloc_buffer.value)
|
||||
|
||||
elif ret == SQL_NO_DATA:
|
||||
# Means all data has been transmitted
|
||||
break
|
||||
else:
|
||||
check_success(self, ret)
|
||||
|
||||
if blocks != []:
|
||||
if raw_data_parts != []:
|
||||
if py_v3:
|
||||
if target_type != SQL_C_BINARY:
|
||||
raw_value = ''.join(blocks)
|
||||
raw_value = ''.join(raw_data_parts)
|
||||
else:
|
||||
raw_value = BLANK_BYTE.join(blocks)
|
||||
raw_value = BLANK_BYTE.join(raw_data_parts)
|
||||
else:
|
||||
raw_value = ''.join(blocks)
|
||||
raw_value = ''.join(raw_data_parts)
|
||||
|
||||
value_list.append(buf_cvt_func(raw_value))
|
||||
col_num += 1
|
||||
|
||||
|
||||
return self._row_type(value_list)
|
||||
|
||||
else:
|
||||
if ret == SQL_NO_DATA_FOUND:
|
||||
|
||||
return None
|
||||
else:
|
||||
check_success(self, ret)
|
||||
|
||||
def __next__(self):
|
||||
self.next()
|
||||
return self.next()
|
||||
|
||||
def next(self):
|
||||
def next(self):
|
||||
row = self.fetchone()
|
||||
if row is None:
|
||||
raise(StopIteration)
|
||||
@@ -1858,6 +1915,9 @@ class Cursor:
|
||||
|
||||
|
||||
def skip(self, count = 0):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
for i in range(count):
|
||||
ret = ODBC_API.SQLFetchScroll(self.stmt_h, SQL_FETCH_NEXT, 0)
|
||||
if ret != SQL_SUCCESS:
|
||||
@@ -1867,6 +1927,9 @@ class Cursor:
|
||||
|
||||
|
||||
def nextset(self):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
ret = ODBC_API.SQLMoreResults(self.stmt_h)
|
||||
if ret not in (SQL_SUCCESS, SQL_NO_DATA):
|
||||
check_success(self, ret)
|
||||
@@ -1882,6 +1945,9 @@ class Cursor:
|
||||
|
||||
|
||||
def _free_stmt(self, free_type = None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
if not self.connection.connected:
|
||||
raise ProgrammingError('HY000','Attempt to use a closed connection.')
|
||||
|
||||
@@ -1903,6 +1969,9 @@ class Cursor:
|
||||
|
||||
|
||||
def getTypeInfo(self, sqlType = None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
if sqlType is None:
|
||||
type = SQL_ALL_TYPES
|
||||
else:
|
||||
@@ -1917,6 +1986,9 @@ class Cursor:
|
||||
|
||||
def tables(self, table=None, catalog=None, schema=None, tableType=None):
|
||||
"""Return a list with all tables"""
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
l_catalog = l_schema = l_table = l_tableType = 0
|
||||
|
||||
if unicode in [type(x) for x in (table, catalog, schema,tableType)]:
|
||||
@@ -1961,7 +2033,10 @@ class Cursor:
|
||||
|
||||
|
||||
def columns(self, table=None, catalog=None, schema=None, column=None):
|
||||
"""Return a list with all columns"""
|
||||
"""Return a list with all columns"""
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
l_catalog = l_schema = l_table = l_column = 0
|
||||
|
||||
if unicode in [type(x) for x in (table, catalog, schema,column)]:
|
||||
@@ -2004,6 +2079,9 @@ class Cursor:
|
||||
|
||||
|
||||
def primaryKeys(self, table=None, catalog=None, schema=None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
l_catalog = l_schema = l_table = 0
|
||||
|
||||
if unicode in [type(x) for x in (table, catalog, schema)]:
|
||||
@@ -2044,6 +2122,9 @@ class Cursor:
|
||||
|
||||
|
||||
def foreignKeys(self, table=None, catalog=None, schema=None, foreignTable=None, foreignCatalog=None, foreignSchema=None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
l_catalog = l_schema = l_table = l_foreignTable = l_foreignCatalog = l_foreignSchema = 0
|
||||
|
||||
if unicode in [type(x) for x in (table, catalog, schema,foreignTable,foreignCatalog,foreignSchema)]:
|
||||
@@ -2092,6 +2173,9 @@ class Cursor:
|
||||
|
||||
|
||||
def procedurecolumns(self, procedure=None, catalog=None, schema=None, column=None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
l_catalog = l_schema = l_procedure = l_column = 0
|
||||
if unicode in [type(x) for x in (procedure, catalog, schema,column)]:
|
||||
string_p = lambda x:wchar_pointer(UCS_buf(x))
|
||||
@@ -2132,6 +2216,9 @@ class Cursor:
|
||||
|
||||
|
||||
def procedures(self, procedure=None, catalog=None, schema=None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
l_catalog = l_schema = l_procedure = 0
|
||||
|
||||
if unicode in [type(x) for x in (procedure, catalog, schema)]:
|
||||
@@ -2170,6 +2257,9 @@ class Cursor:
|
||||
|
||||
|
||||
def statistics(self, table, catalog=None, schema=None, unique=False, quick=True):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
l_table = l_catalog = l_schema = 0
|
||||
|
||||
if unicode in [type(x) for x in (table, catalog, schema)]:
|
||||
@@ -2217,15 +2307,23 @@ class Cursor:
|
||||
|
||||
|
||||
def commit(self):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
self.connection.commit()
|
||||
|
||||
def rollback(self):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
self.connection.rollback()
|
||||
|
||||
def setoutputsize(self, size, column = None):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
self._outputsize[column] = size
|
||||
|
||||
def setinputsizes(self, sizes):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
self._inputsizers = [size for size in sizes]
|
||||
|
||||
|
||||
@@ -2234,35 +2332,31 @@ class Cursor:
|
||||
# ret = ODBC_API.SQLCloseCursor(self.stmt_h)
|
||||
# check_success(self, ret)
|
||||
#
|
||||
ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_CLOSE)
|
||||
check_success(self, ret)
|
||||
if self.connection.connected:
|
||||
ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_CLOSE)
|
||||
check_success(self, ret)
|
||||
|
||||
ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_UNBIND)
|
||||
check_success(self, ret)
|
||||
ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_UNBIND)
|
||||
check_success(self, ret)
|
||||
|
||||
ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_RESET_PARAMS)
|
||||
check_success(self, ret)
|
||||
ret = ODBC_API.SQLFreeStmt(self.stmt_h, SQL_RESET_PARAMS)
|
||||
check_success(self, ret)
|
||||
|
||||
ret = ODBC_API.SQLFreeHandle(SQL_HANDLE_STMT, self.stmt_h)
|
||||
check_success(self, ret)
|
||||
ret = ODBC_API.SQLFreeHandle(SQL_HANDLE_STMT, self.stmt_h)
|
||||
check_success(self, ret)
|
||||
|
||||
|
||||
self.closed = True
|
||||
|
||||
|
||||
|
||||
def __del__(self):
|
||||
if not self.closed:
|
||||
#if DEBUG:print 'auto closing cursor: ',
|
||||
try:
|
||||
self.close()
|
||||
except:
|
||||
#if DEBUG:print 'failed'
|
||||
pass
|
||||
else:
|
||||
#if DEBUG:print 'succeed'
|
||||
pass
|
||||
self.close()
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if not self.connection:
|
||||
self.close()
|
||||
|
||||
if value:
|
||||
self.rollback()
|
||||
else:
|
||||
@@ -2291,7 +2385,7 @@ class Connection:
|
||||
self.autocommit = autocommit
|
||||
self.readonly = False
|
||||
self.timeout = 0
|
||||
self._cursors = []
|
||||
# self._cursors = []
|
||||
for key, value in list(kargs.items()):
|
||||
connectString = connectString + key + '=' + value + ';'
|
||||
self.connectString = connectString
|
||||
@@ -2425,7 +2519,7 @@ class Connection:
|
||||
if not self.connected:
|
||||
raise ProgrammingError('HY000','Attempt to use a closed connection.')
|
||||
cur = Cursor(self, row_type_callable=row_type_callable)
|
||||
self._cursors.append(cur)
|
||||
# self._cursors.append(cur)
|
||||
return cur
|
||||
|
||||
def update_db_special_info(self):
|
||||
@@ -2436,6 +2530,7 @@ class Connection:
|
||||
SQL_SS_TIME2,
|
||||
):
|
||||
cur = Cursor(self)
|
||||
|
||||
try:
|
||||
info_tuple = cur.getTypeInfo(sql_type)
|
||||
if info_tuple is not None:
|
||||
@@ -2534,10 +2629,10 @@ class Connection:
|
||||
def close(self):
|
||||
if not self.connected:
|
||||
raise ProgrammingError('HY000','Attempt to close a closed connection.')
|
||||
for cur in self._cursors:
|
||||
if not cur is None:
|
||||
if not cur.closed:
|
||||
cur.close()
|
||||
# for cur in self._cursors:
|
||||
# if not cur is None:
|
||||
# if not cur.closed:
|
||||
# cur.close()
|
||||
|
||||
if self.connected:
|
||||
#if DEBUG:print 'disconnect'
|
||||
@@ -2680,4 +2775,18 @@ def dataSources():
|
||||
ctrl_err(SQL_HANDLE_ENV, shared_env_h, ret)
|
||||
else:
|
||||
dsn_list[dsn.value] = desc.value
|
||||
return dsn_list
|
||||
return dsn_list
|
||||
|
||||
|
||||
def monkey_patch_for_gevent():
|
||||
import functools, gevent
|
||||
apply_e = gevent.get_hub().threadpool.apply_e
|
||||
def monkey_patch(func):
|
||||
@functools.wraps(func)
|
||||
def wrap(*args, **kwargs):
|
||||
#if DEBUG:print('%s called with %s %s' % (func, args, kwargs))
|
||||
return apply_e(Exception, func, args, kwargs)
|
||||
return wrap
|
||||
for attr in dir(ODBC_API):
|
||||
if attr.startswith('SQL') and hasattr(getattr(ODBC_API, attr), 'argtypes'):
|
||||
setattr(ODBC_API, attr, monkey_patch(getattr(ODBC_API, attr)))
|
||||
|
||||
@@ -6,7 +6,10 @@ import redis
|
||||
from redis.exceptions import ConnectionError
|
||||
from gluon import current
|
||||
from gluon.cache import CacheAbstract
|
||||
import cPickle as pickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
import time
|
||||
import re
|
||||
import logging
|
||||
@@ -165,7 +168,7 @@ class RedisClient(object):
|
||||
expireat = int(time.time() + time_expire) + 120
|
||||
bucket_key = "%s:%s" % (cache_set_key, expireat / 60)
|
||||
value = f()
|
||||
value_ = pickle.dumps(value)
|
||||
value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
|
||||
if time_expire == 0:
|
||||
time_expire = 1
|
||||
self.r_server.setex(key, value_, time_expire)
|
||||
|
||||
@@ -3,12 +3,9 @@ Developed by niphlod@gmail.com
|
||||
"""
|
||||
|
||||
import redis
|
||||
from redis.exceptions import ConnectionError
|
||||
from gluon import current
|
||||
from gluon.storage import Storage
|
||||
import cPickle as pickle
|
||||
import time
|
||||
import re
|
||||
import logging
|
||||
import thread
|
||||
|
||||
|
||||
@@ -31,12 +31,13 @@ An interactive, stateful AJAX shell that runs Python code on the server.
|
||||
|
||||
import logging
|
||||
import new
|
||||
import os
|
||||
import cPickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
import wsgiref.handlers
|
||||
import StringIO
|
||||
import threading
|
||||
locker = threading.RLock()
|
||||
@@ -100,7 +101,7 @@ class History:
|
||||
name: the name of the global to remove
|
||||
value: any picklable value
|
||||
"""
|
||||
blob = cPickle.dumps(value)
|
||||
blob = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
if name in self.global_names:
|
||||
index = self.global_names.index(name)
|
||||
@@ -159,7 +160,7 @@ def represent(obj):
|
||||
code below to determine whether the object changes over time.
|
||||
"""
|
||||
try:
|
||||
return cPickle.dumps(obj)
|
||||
return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
|
||||
except:
|
||||
return repr(obj)
|
||||
|
||||
@@ -258,7 +259,7 @@ def run(history, statement, env={}):
|
||||
if not name.startswith('__'):
|
||||
try:
|
||||
history.set_global(name, val)
|
||||
except (TypeError, cPickle.PicklingError), ex:
|
||||
except (TypeError, pickle.PicklingError), ex:
|
||||
UNPICKLABLE_TYPES.append(type(val))
|
||||
history.add_unpicklable(statement, new_globals.keys())
|
||||
|
||||
|
||||
@@ -121,7 +121,6 @@ OrderedDict = _import_OrderedDict()
|
||||
|
||||
def _import_c_make_encoder():
|
||||
try:
|
||||
raise ImportError # because assumes simplejson in path
|
||||
from simplejson._speedups import make_encoder
|
||||
return make_encoder
|
||||
except ImportError:
|
||||
@@ -411,7 +410,7 @@ def _toggle_speedups(enabled):
|
||||
if enabled:
|
||||
dec.scanstring = dec.c_scanstring or dec.py_scanstring
|
||||
enc.c_make_encoder = c_make_encoder
|
||||
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
|
||||
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
|
||||
enc.py_encode_basestring_ascii)
|
||||
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
|
||||
else:
|
||||
@@ -437,4 +436,3 @@ def _toggle_speedups(enabled):
|
||||
encoding='utf-8',
|
||||
default=None,
|
||||
)
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import struct
|
||||
from scanner import make_scanner
|
||||
def _import_c_scanstring():
|
||||
try:
|
||||
raise ImportError # because assumes simplejson in path
|
||||
from simplejson._speedups import scanstring
|
||||
return scanstring
|
||||
except ImportError:
|
||||
@@ -420,4 +419,3 @@ class JSONDecoder(object):
|
||||
except StopIteration:
|
||||
raise JSONDecodeError("No JSON object could be decoded", s, idx)
|
||||
return obj, end
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ from decimal import Decimal
|
||||
|
||||
def _import_speedups():
|
||||
try:
|
||||
raise ImportError # because assumes simplejson in path
|
||||
from simplejson import _speedups
|
||||
return _speedups.encode_basestring_ascii, _speedups.make_encoder
|
||||
except ImportError:
|
||||
@@ -500,4 +499,3 @@ def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
|
||||
del markers[markerid]
|
||||
|
||||
return _iterencode
|
||||
|
||||
|
||||
@@ -117,4 +117,3 @@ class OrderedDict(dict, DictMixin):
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
import re
|
||||
def _import_c_make_scanner():
|
||||
try:
|
||||
raise ImportError # because assumes simplejson in path
|
||||
from simplejson._speedups import make_scanner
|
||||
return make_scanner
|
||||
except ImportError:
|
||||
@@ -76,4 +75,3 @@ def py_make_scanner(context):
|
||||
return scan_once
|
||||
|
||||
make_scanner = c_make_scanner or py_make_scanner
|
||||
|
||||
|
||||
71
gluon/dal.py
71
gluon/dal.py
@@ -720,7 +720,7 @@ class BaseAdapter(ConnectionPool):
|
||||
|
||||
__metaclass__ = AdapterMeta
|
||||
|
||||
native_json = False
|
||||
driver_auto_json = []
|
||||
driver = None
|
||||
driver_name = None
|
||||
drivers = () # list of drivers from which to pick
|
||||
@@ -1766,13 +1766,20 @@ class BaseAdapter(ConnectionPool):
|
||||
query = self.common_filter(query, tablenames_for_common_filters)
|
||||
sql_w = ' WHERE ' + self.expand(query) if query else ''
|
||||
|
||||
JOIN = ' CROSS JOIN '
|
||||
|
||||
if inner_join and not left:
|
||||
sql_t = ', '.join([self.table_alias(t)
|
||||
# Wrap table references with parenthesis (approach 1)
|
||||
# sql_t = ', '.join([self.table_alias(t)
|
||||
# for t in iexcluded + itables_to_merge.keys()])
|
||||
# sql_t = '(%s)' % sql_t
|
||||
# or approach 2: Use 'JOIN' instead comma:
|
||||
sql_t = JOIN.join([self.table_alias(t)
|
||||
for t in iexcluded + itables_to_merge.keys()])
|
||||
for t in ijoinon:
|
||||
sql_t += ' %s %s' % (icommand, t)
|
||||
elif not inner_join and left:
|
||||
sql_t = ', '.join([self.table_alias(t)
|
||||
sql_t = JOIN.join([self.table_alias(t)
|
||||
for t in excluded + tables_to_merge.keys()])
|
||||
if joint:
|
||||
sql_t += ' %s %s' % (command,
|
||||
@@ -1785,7 +1792,7 @@ class BaseAdapter(ConnectionPool):
|
||||
tables_in_joinon = set(joinont + ijoinont)
|
||||
tables_not_in_joinon = \
|
||||
all_tables_in_query.difference(tables_in_joinon)
|
||||
sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon])
|
||||
sql_t = JOIN.join([self.table_alias(t) for t in tables_not_in_joinon])
|
||||
for t in ijoinon:
|
||||
sql_t += ' %s %s' % (icommand, t)
|
||||
if joint:
|
||||
@@ -2037,7 +2044,8 @@ class BaseAdapter(ConnectionPool):
|
||||
else:
|
||||
obj = str(obj)
|
||||
elif fieldtype == 'json':
|
||||
if not self.native_json:
|
||||
if not 'dumps' in self.driver_auto_json:
|
||||
# always pass a string JSON string
|
||||
if have_serializers:
|
||||
obj = serializers.json(obj)
|
||||
elif simplejson:
|
||||
@@ -2175,7 +2183,7 @@ class BaseAdapter(ConnectionPool):
|
||||
return float(value)
|
||||
|
||||
def parse_json(self, value, field_type):
|
||||
if not self.native_json:
|
||||
if not 'loads' in self.driver_auto_json:
|
||||
if not isinstance(value, basestring):
|
||||
raise RuntimeError('json data not a string')
|
||||
if isinstance(value, unicode):
|
||||
@@ -2843,7 +2851,7 @@ class PostgreSQLAdapter(BaseAdapter):
|
||||
self.srid = srid
|
||||
self.find_or_make_work_folder()
|
||||
self._last_insert = None # for INSERT ... RETURNING ID
|
||||
|
||||
|
||||
ruri = uri.split('://', 1)[1]
|
||||
m = self.REGEX_URI.match(ruri)
|
||||
if not m:
|
||||
@@ -2910,22 +2918,27 @@ class PostgreSQLAdapter(BaseAdapter):
|
||||
else:
|
||||
self.execute("select lastval()")
|
||||
return int(self.cursor.fetchone()[0])
|
||||
|
||||
|
||||
def try_json(self):
|
||||
# check JSON data type support
|
||||
# (to be added to after_connection)
|
||||
if self.driver_name == "pg8000":
|
||||
supports_json = self.connection.server_version >= "9.2.0"
|
||||
elif (self.driver_name == "psycopg2") and \
|
||||
(self.driver.__version__ >= "2.0.12"):
|
||||
elif (self.driver_name == "psycopg2" and
|
||||
self.driver.__version__ >= "2.0.12"):
|
||||
supports_json = self.connection.server_version >= 90200
|
||||
elif self.driver_name == "zxJDBC":
|
||||
supports_json = self.connection.dbversion >= "9.2.0"
|
||||
else: supports_json = None
|
||||
else:
|
||||
supports_json = None
|
||||
if supports_json:
|
||||
self.types["json"] = "JSON"
|
||||
self.native_json = True
|
||||
else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
|
||||
if (self.driver_name == "psycopg2" and
|
||||
self.driver.__version__ >= '2.5.0'):
|
||||
self.driver_auto_json = ['loads']
|
||||
else:
|
||||
LOGGER.debug("Your database version does not support the JSON"
|
||||
" data type (using TEXT instead)")
|
||||
|
||||
def LIKE(self, first, second):
|
||||
args = (self.expand(first), self.expand(second, 'string'))
|
||||
@@ -3607,7 +3620,7 @@ class MSSQL4Adapter(MSSQLAdapter):
|
||||
|
||||
Requires MSSQL >= 2012, uses `OFFSET ... ROWS ... FETCH NEXT ... ROWS ONLY`
|
||||
"""
|
||||
|
||||
|
||||
types = {
|
||||
'boolean': 'BIT',
|
||||
'string': 'VARCHAR(%(length)s)',
|
||||
@@ -4600,7 +4613,7 @@ class DatabaseStoredFile:
|
||||
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));"
|
||||
db.executesql(sql)
|
||||
DatabaseStoredFile.web2py_filesystems.add(db._uri)
|
||||
|
||||
|
||||
def __init__(self, db, filename, mode):
|
||||
if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'):
|
||||
raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now")
|
||||
@@ -5052,6 +5065,17 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
|
||||
def parse_id(self, value, field_type):
|
||||
return value
|
||||
|
||||
def represent(self, obj, fieldtype):
|
||||
if fieldtype == "json":
|
||||
if have_serializers:
|
||||
return serializers.json(obj)
|
||||
elif simplejson:
|
||||
return simplejson.dumps(obj)
|
||||
else:
|
||||
raise Exception("Could not dump json object (missing json library)")
|
||||
else:
|
||||
return NoSQLAdapter.represent(self, obj, fieldtype)
|
||||
|
||||
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
|
||||
myfields = {}
|
||||
for field in table:
|
||||
@@ -5702,8 +5726,8 @@ def cleanup(text):
|
||||
|
||||
|
||||
class MongoDBAdapter(NoSQLAdapter):
|
||||
native_json = True
|
||||
drivers = ('pymongo', )
|
||||
driver_auto_json = ['loads','dumps']
|
||||
|
||||
uploads_in_blob = False
|
||||
|
||||
@@ -5725,7 +5749,7 @@ class MongoDBAdapter(NoSQLAdapter):
|
||||
'reference': long,
|
||||
'list:string': list,
|
||||
'list:integer': list,
|
||||
'list:reference': list,
|
||||
'list:reference': list,
|
||||
}
|
||||
|
||||
error_messages = {"javascript_needed": "This must yet be replaced" +
|
||||
@@ -7360,7 +7384,7 @@ def sqlhtml_validators(field):
|
||||
if field_type in (('string', 'text', 'password')):
|
||||
requires.append(validators.IS_LENGTH(field_length))
|
||||
elif field_type == 'json':
|
||||
requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json)))
|
||||
requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))
|
||||
elif field_type == 'double' or field_type == 'float':
|
||||
requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
|
||||
elif field_type == 'integer':
|
||||
@@ -7746,7 +7770,7 @@ def smart_query(fields, text):
|
||||
elif op == 'notbelongs': new_query = ~field.belongs(value.split(','))
|
||||
elif field.type in ('text', 'string', 'json'):
|
||||
if op == 'contains': new_query = field.contains(value)
|
||||
elif op == 'like': new_query = field.like(value)
|
||||
elif op == 'like': new_query = field.ilike(value)
|
||||
elif op == 'startswith': new_query = field.startswith(value)
|
||||
elif op == 'endswith': new_query = field.endswith(value)
|
||||
else: raise RuntimeError("Invalid operation")
|
||||
@@ -9786,6 +9810,9 @@ class Expression(object):
|
||||
op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE
|
||||
return Query(db, op, self, value)
|
||||
|
||||
def ilike(self, value):
|
||||
return self.like(case_sensitive=False)
|
||||
|
||||
def regexp(self, value):
|
||||
db = self.db
|
||||
return Query(db, db._adapter.REGEXP, self, value)
|
||||
@@ -9833,14 +9860,12 @@ class Expression(object):
|
||||
|
||||
def contains(self, value, all=False, case_sensitive=False):
|
||||
"""
|
||||
The case_sensitive parameters is only useful for PostgreSQL
|
||||
For other RDMBs it is ignored and contains is always case insensitive
|
||||
For MongoDB and GAE contains is always case sensitive
|
||||
"""
|
||||
db = self.db
|
||||
if isinstance(value, (list, tuple)):
|
||||
subqueries = [self.contains(str(v).strip(), case_sensitive=case_sensitive)
|
||||
for v in value if str(v).strip()]
|
||||
subqueries = [self.contains(str(v), case_sensitive=case_sensitive)
|
||||
for v in value if str(v)]
|
||||
if not subqueries:
|
||||
return self.contains('')
|
||||
else:
|
||||
|
||||
@@ -28,12 +28,16 @@ from gluon.settings import global_settings
|
||||
from gluon import recfile
|
||||
import hashlib
|
||||
import portalocker
|
||||
import cPickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
from pickle import Pickler, MARK, DICT, EMPTY_DICT
|
||||
from types import DictionaryType
|
||||
import cStringIO
|
||||
import datetime
|
||||
import re
|
||||
import copy_reg
|
||||
import Cookie
|
||||
import os
|
||||
import sys
|
||||
@@ -829,7 +833,7 @@ class Session(Storage):
|
||||
portalocker.lock(response.session_file,
|
||||
portalocker.LOCK_EX)
|
||||
response.session_locked = True
|
||||
self.update(cPickle.load(response.session_file))
|
||||
self.update(pickle.load(response.session_file))
|
||||
response.session_file.seek(0)
|
||||
oc = response.session_filename.split('/')[-1].split('-')[0]
|
||||
if check_client and response.session_client != oc:
|
||||
@@ -894,7 +898,7 @@ class Session(Storage):
|
||||
if row:
|
||||
# rows[0].update_record(locked=True)
|
||||
# Unpickle the data
|
||||
session_data = cPickle.loads(row.session_data)
|
||||
session_data = pickle.loads(row.session_data)
|
||||
self.update(session_data)
|
||||
response.session_new = False
|
||||
else:
|
||||
@@ -906,7 +910,7 @@ class Session(Storage):
|
||||
else:
|
||||
response.session_id = None
|
||||
response.session_new = True
|
||||
# if there is no session id yet, we'll need to create a
|
||||
# if there is no session id yet, we'll need to create a
|
||||
# new session
|
||||
else:
|
||||
response.session_new = True
|
||||
@@ -924,7 +928,7 @@ class Session(Storage):
|
||||
response.cookies[response.session_id_name]['expires'] = \
|
||||
cookie_expires.strftime(FMT)
|
||||
|
||||
session_pickled = cPickle.dumps(self)
|
||||
session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
|
||||
response.session_hash = hashlib.md5(session_pickled).hexdigest()
|
||||
|
||||
if self.flash:
|
||||
@@ -1083,7 +1087,7 @@ class Session(Storage):
|
||||
return True
|
||||
|
||||
def _unchanged(self, response):
|
||||
session_pickled = cPickle.dumps(self)
|
||||
session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
|
||||
response.session_pickled = session_pickled
|
||||
session_hash = hashlib.md5(session_pickled).hexdigest()
|
||||
return response.session_hash == session_hash
|
||||
@@ -1110,7 +1114,7 @@ class Session(Storage):
|
||||
else:
|
||||
unique_key = response.session_db_unique_key
|
||||
|
||||
session_pickled = response.session_pickled or cPickle.dumps(self)
|
||||
session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
dd = dict(locked=False,
|
||||
client_ip=response.session_client,
|
||||
@@ -1151,7 +1155,7 @@ class Session(Storage):
|
||||
portalocker.lock(response.session_file, portalocker.LOCK_EX)
|
||||
response.session_locked = True
|
||||
if response.session_file:
|
||||
session_pickled = response.session_pickled or cPickle.dumps(self)
|
||||
session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
|
||||
response.session_file.write(session_pickled)
|
||||
response.session_file.truncate()
|
||||
finally:
|
||||
@@ -1176,3 +1180,8 @@ class Session(Storage):
|
||||
del response.session_file
|
||||
except:
|
||||
pass
|
||||
|
||||
def pickle_session(s):
|
||||
return Session, (dict(s),)
|
||||
|
||||
copy_reg.pickle(Session, pickle_session)
|
||||
|
||||
@@ -21,7 +21,10 @@ import sanitizer
|
||||
import itertools
|
||||
import decoder
|
||||
import copy_reg
|
||||
import cPickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
import marshal
|
||||
|
||||
from HTMLParser import HTMLParser
|
||||
@@ -31,6 +34,7 @@ from gluon.storage import Storage
|
||||
from gluon.utils import web2py_uuid, simple_hash, compare
|
||||
from gluon.highlight import highlight
|
||||
|
||||
|
||||
regex_crlf = re.compile('\r|\n')
|
||||
|
||||
join = ''.join
|
||||
@@ -43,6 +47,7 @@ entitydefs.setdefault('apos', u"'".encode('utf-8'))
|
||||
|
||||
__all__ = [
|
||||
'A',
|
||||
'ASSIGNJS',
|
||||
'B',
|
||||
'BEAUTIFY',
|
||||
'BODY',
|
||||
@@ -1240,13 +1245,13 @@ class CAT(DIV):
|
||||
|
||||
|
||||
def TAG_unpickler(data):
|
||||
return cPickle.loads(data)
|
||||
return pickle.loads(data)
|
||||
|
||||
|
||||
def TAG_pickler(data):
|
||||
d = DIV()
|
||||
d.__dict__ = data.__dict__
|
||||
marshal_dump = cPickle.dumps(d)
|
||||
marshal_dump = pickle.dumps(d, pickle.HIGHEST_PROTOCOL)
|
||||
return (TAG_unpickler, (marshal_dump,))
|
||||
|
||||
|
||||
@@ -2825,6 +2830,14 @@ class MARKMIN(XmlComponent):
|
||||
def __str__(self):
|
||||
return self.xml()
|
||||
|
||||
def ASSIGNJS(**kargs):
|
||||
from gluon.serializers import json
|
||||
s = ""
|
||||
for key, value in kargs.items():
|
||||
s+='var %s = %s;\n' % (key, json(value))
|
||||
return XML(s)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
|
||||
@@ -21,7 +21,10 @@ import datetime
|
||||
import platform
|
||||
import portalocker
|
||||
import fileutils
|
||||
import cPickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
from gluon.settings import global_settings
|
||||
|
||||
logger = logging.getLogger("web2py.cron")
|
||||
@@ -139,7 +142,7 @@ class Token(object):
|
||||
ret = None
|
||||
portalocker.lock(self.master, portalocker.LOCK_EX)
|
||||
try:
|
||||
(start, stop) = cPickle.load(self.master)
|
||||
(start, stop) = pickle.load(self.master)
|
||||
except:
|
||||
(start, stop) = (0, 1)
|
||||
if startup or self.now - start > locktime:
|
||||
@@ -149,7 +152,7 @@ class Token(object):
|
||||
logger.warning('WEB2PY CRON: Stale cron.master detected')
|
||||
logger.debug('WEB2PY CRON: Acquiring lock')
|
||||
self.master.seek(0)
|
||||
cPickle.dump((self.now, 0), self.master)
|
||||
pickle.dump((self.now, 0), self.master)
|
||||
self.master.flush()
|
||||
finally:
|
||||
portalocker.unlock(self.master)
|
||||
@@ -166,10 +169,10 @@ class Token(object):
|
||||
portalocker.lock(self.master, portalocker.LOCK_EX)
|
||||
logger.debug('WEB2PY CRON: Releasing cron lock')
|
||||
self.master.seek(0)
|
||||
(start, stop) = cPickle.load(self.master)
|
||||
(start, stop) = pickle.load(self.master)
|
||||
if start == self.now: # if this is my lock
|
||||
self.master.seek(0)
|
||||
cPickle.dump((self.now, time.time()), self.master)
|
||||
pickle.dump((self.now, time.time()), self.master)
|
||||
portalocker.unlock(self.master)
|
||||
self.master.close()
|
||||
|
||||
|
||||
@@ -11,7 +11,10 @@ Restricted environment to execute application's code
|
||||
"""
|
||||
|
||||
import sys
|
||||
import cPickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
import traceback
|
||||
import types
|
||||
import os
|
||||
@@ -55,7 +58,7 @@ class TicketStorage(Storage):
|
||||
try:
|
||||
table = self._get_table(self.db, self.tablename, request.application)
|
||||
table.insert(ticket_id=ticket_id,
|
||||
ticket_data=cPickle.dumps(ticket_data),
|
||||
ticket_data=pickle.dumps(ticket_data, pickle.HIGHEST_PROTOCOL),
|
||||
created_datetime=request.now)
|
||||
self.db.commit()
|
||||
message = 'In FILE: %(layer)s\n\n%(traceback)s\n'
|
||||
@@ -68,7 +71,7 @@ class TicketStorage(Storage):
|
||||
def _store_on_disk(self, request, ticket_id, ticket_data):
|
||||
ef = self._error_file(request, ticket_id, 'wb')
|
||||
try:
|
||||
cPickle.dump(ticket_data, ef)
|
||||
pickle.dump(ticket_data, ef)
|
||||
finally:
|
||||
ef.close()
|
||||
|
||||
@@ -103,13 +106,13 @@ class TicketStorage(Storage):
|
||||
except IOError:
|
||||
return {}
|
||||
try:
|
||||
return cPickle.load(ef)
|
||||
return pickle.load(ef)
|
||||
finally:
|
||||
ef.close()
|
||||
else:
|
||||
table = self._get_table(self.db, self.tablename, app)
|
||||
rows = self.db(table.ticket_id == ticket_id).select()
|
||||
return cPickle.loads(rows[0].ticket_data) if rows else {}
|
||||
return pickle.loads(rows[0].ticket_data) if rows else {}
|
||||
|
||||
|
||||
class RestrictedError(Exception):
|
||||
|
||||
@@ -163,15 +163,18 @@ def ics(events, title=None, link=None, timeshift=0, calname=True,
|
||||
def rss(feed):
|
||||
if not 'entries' in feed and 'items' in feed:
|
||||
feed['entries'] = feed['items']
|
||||
def safestr(obj, key, default=''):
|
||||
return str(obj[key]).encode('utf-8', 'replace') if key in obj else default
|
||||
|
||||
now = datetime.datetime.now()
|
||||
rss = rss2.RSS2(title=str(feed.get('title', '(notitle)').encode('utf-8', 'replace')),
|
||||
link=str(feed.get('link', None).encode('utf-8', 'replace')),
|
||||
description=str(feed.get('description', '').encode('utf-8', 'replace')),
|
||||
rss = rss2.RSS2(title=safestr(feed,'title'),
|
||||
link=safestr(feed,'link'),
|
||||
description=safestr(feed,'description'),
|
||||
lastBuildDate=feed.get('created_on', now),
|
||||
items=[rss2.RSSItem(
|
||||
title=str(entry.get('title', '(notitle)').encode('utf-8', 'replace')),
|
||||
link=str(entry.get('link', None).encode('utf-8', 'replace')),
|
||||
description=str(entry.get('description', '').encode('utf-8', 'replace')),
|
||||
title=safestr(entry,'title','(notitle)'),
|
||||
link=safestr(entry,'link'),
|
||||
description=safestr(entry,'description'),
|
||||
pubDate=entry.get('created_on', now)
|
||||
) for entry in feed.get('entries', [])])
|
||||
return rss.to_xml(encoding='utf-8')
|
||||
|
||||
@@ -652,7 +652,7 @@ class AutocompleteWidget(object):
|
||||
if settings and settings.global_settings.web2py_runtime_gae:
|
||||
rows = self.db(field.__ge__(self.request.vars[self.keyword]) & field.__lt__(self.request.vars[self.keyword] + u'\ufffd')).select(orderby=self.orderby, limitby=self.limitby, *(self.fields+self.help_fields))
|
||||
else:
|
||||
rows = self.db(field.like(self.request.vars[self.keyword] + '%')).select(orderby=self.orderby, limitby=self.limitby, distinct=self.distinct, *(self.fields+self.help_fields))
|
||||
rows = self.db(field.like(self.request.vars[self.keyword] + '%', case_sensitive=False)).select(orderby=self.orderby, limitby=self.limitby, distinct=self.distinct, *(self.fields+self.help_fields))
|
||||
if rows:
|
||||
if self.is_reference:
|
||||
id_field = self.fields[1]
|
||||
|
||||
@@ -12,7 +12,10 @@ Provides:
|
||||
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
|
||||
"""
|
||||
|
||||
import cPickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
import copy_reg
|
||||
import gluon.portalocker as portalocker
|
||||
|
||||
@@ -159,7 +162,7 @@ def load_storage(filename):
|
||||
fp = None
|
||||
try:
|
||||
fp = portalocker.LockedFile(filename, 'rb')
|
||||
storage = cPickle.load(fp)
|
||||
storage = pickle.load(fp)
|
||||
finally:
|
||||
if fp:
|
||||
fp.close()
|
||||
@@ -170,7 +173,7 @@ def save_storage(storage, filename):
|
||||
fp = None
|
||||
try:
|
||||
fp = portalocker.LockedFile(filename, 'wb')
|
||||
cPickle.dump(dict(storage), fp)
|
||||
pickle.dump(dict(storage), fp)
|
||||
finally:
|
||||
if fp:
|
||||
fp.close()
|
||||
|
||||
@@ -129,6 +129,7 @@ class TestFields(unittest.TestCase):
|
||||
isinstance(f.formatter(datetime.datetime.now()), str)
|
||||
|
||||
def testRun(self):
|
||||
"""Test all field types and their return values"""
|
||||
db = DAL(DEFAULT_URI, check_reserved=['all'])
|
||||
for ft in ['string', 'text', 'password', 'upload', 'blob']:
|
||||
db.define_table('tt', Field('aa', ft, default=''))
|
||||
@@ -148,8 +149,22 @@ class TestFields(unittest.TestCase):
|
||||
self.assertEqual(db().select(db.tt.aa)[0].aa, True)
|
||||
db.tt.drop()
|
||||
db.define_table('tt', Field('aa', 'json', default={}))
|
||||
self.assertEqual(db.tt.insert(aa={}), 1)
|
||||
self.assertEqual(db().select(db.tt.aa)[0].aa, {})
|
||||
# test different python objects for correct serialization in json
|
||||
objs = [
|
||||
{'a' : 1, 'b' : 2},
|
||||
[1, 2, 3],
|
||||
'abc',
|
||||
True,
|
||||
False,
|
||||
None,
|
||||
11,
|
||||
14.3,
|
||||
long(11)
|
||||
]
|
||||
for obj in objs:
|
||||
rtn_id = db.tt.insert(aa=obj)
|
||||
rtn = db(db.tt.id == rtn_id).select().first().aa
|
||||
self.assertEqual(obj, rtn)
|
||||
db.tt.drop()
|
||||
db.define_table('tt', Field('aa', 'date',
|
||||
default=datetime.date.today()))
|
||||
|
||||
@@ -37,6 +37,7 @@ def fix_sys_path():
|
||||
fix_sys_path()
|
||||
|
||||
from storage import Storage
|
||||
import pickle
|
||||
|
||||
|
||||
class TestStorage(unittest.TestCase):
|
||||
@@ -96,6 +97,13 @@ class TestStorage(unittest.TestCase):
|
||||
self.assertEquals(s.a, None)
|
||||
self.assertEquals(s['a'], None)
|
||||
self.assertTrue('a' in s)
|
||||
|
||||
def test_pickling(self):
|
||||
""" Test storage pickling """
|
||||
s = Storage(a=1)
|
||||
sd = pickle.dumps(s, pickle.HIGHEST_PROTOCOL)
|
||||
news = pickle.loads(sd)
|
||||
self.assertEqual(news.a, 1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -11,7 +11,10 @@ Auth, Mail, PluginManager and various utilities
|
||||
"""
|
||||
|
||||
import base64
|
||||
import cPickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
import datetime
|
||||
import thread
|
||||
import logging
|
||||
@@ -3008,7 +3011,7 @@ class Auth(object):
|
||||
|
||||
if self.settings.prevent_password_reset_attacks:
|
||||
key = request.vars.key
|
||||
if not key and len(request.args)>0:
|
||||
if not key and len(request.args)>1:
|
||||
key = request.args[-1]
|
||||
if key:
|
||||
session._reset_password_key = key
|
||||
@@ -3188,11 +3191,14 @@ class Auth(object):
|
||||
if log is DEFAULT:
|
||||
log = self.messages['change_password_log']
|
||||
passfield = self.settings.password_field
|
||||
is_crypt = copy.copy([t for t in table_user[passfield].requires
|
||||
if isinstance(t,CRYPT)][0])
|
||||
is_crypt.min_length = 0
|
||||
requires = table_user[passfield].requires
|
||||
if not isinstance(requires,(list, tuple)):
|
||||
requires = [requires]
|
||||
requires = filter(lambda t:isinstance(t,CRYPT), requires)
|
||||
if requires:
|
||||
requires[0].min_length = 0
|
||||
form = SQLFORM.factory(
|
||||
Field('old_password', 'password', requires=[is_crypt],
|
||||
Field('old_password', 'password', requires=requires,
|
||||
label=self.messages.old_password),
|
||||
Field('new_password', 'password',
|
||||
label=self.messages.new_password,
|
||||
@@ -3327,7 +3333,7 @@ class Auth(object):
|
||||
user = table_user(user_id)
|
||||
if not user:
|
||||
raise HTTP(401, "Not Authorized")
|
||||
auth.impersonator = cPickle.dumps(session)
|
||||
auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL)
|
||||
auth.user.update(
|
||||
table_user._filter_fields(user, True))
|
||||
self.user = auth.user
|
||||
@@ -3338,7 +3344,7 @@ class Auth(object):
|
||||
elif user_id in (0, '0'):
|
||||
if self.is_impersonating():
|
||||
session.clear()
|
||||
session.update(cPickle.loads(auth.impersonator))
|
||||
session.update(pickle.loads(auth.impersonator))
|
||||
self.user = session.auth.user
|
||||
self.update_groups()
|
||||
self.run_login_onaccept()
|
||||
|
||||
@@ -23,7 +23,6 @@ import logging
|
||||
import socket
|
||||
import base64
|
||||
import zlib
|
||||
import types
|
||||
|
||||
_struct_2_long_long = struct.Struct('=QQ')
|
||||
|
||||
@@ -160,7 +159,7 @@ def pad(s, n=32, padchar=' '):
|
||||
def secure_dumps(data, encryption_key, hash_key=None, compression_level=None):
|
||||
if not hash_key:
|
||||
hash_key = sha1(encryption_key).hexdigest()
|
||||
dump = pickle.dumps(data)
|
||||
dump = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
|
||||
if compression_level:
|
||||
dump = zlib.compress(dump, compression_level)
|
||||
key = pad(encryption_key[:32])
|
||||
|
||||
@@ -372,14 +372,18 @@ class IS_JSON(Validator):
|
||||
if self.native_json:
|
||||
simplejson.loads(value) # raises error in case of malformed json
|
||||
return (value, None) # the serialized value is not passed
|
||||
return (simplejson.loads(value), None)
|
||||
else:
|
||||
return (simplejson.loads(value), None)
|
||||
except JSONErrors:
|
||||
return (value, translate(self.error_message))
|
||||
|
||||
def formatter(self,value):
|
||||
if value is None:
|
||||
return None
|
||||
return simplejson.dumps(value)
|
||||
if self.native_json:
|
||||
return value
|
||||
else:
|
||||
return simplejson.dumps(value)
|
||||
|
||||
|
||||
class IS_IN_SET(Validator):
|
||||
|
||||
Reference in New Issue
Block a user