Cleanup all the lint (#143)

* Clean up all violations reported by pylint in preperation for turning on landscape.io

* fix all prospector errors.

* don't run pylint on pypy/py3 because we're getting all sorts of import errors under Py3 (and it's sorta slow on pypy)

* back to editable? Somehow _cache_ring isn't always being built.

* skip bootstrap.py on landscape.

* pylint fixes for umysql
This commit is contained in:
Jason Madden 2016-12-10 11:27:11 -06:00 committed by GitHub
parent 37a1ddc658
commit 4bf54e0954
68 changed files with 528 additions and 480 deletions

View File

@ -1,7 +1,7 @@
doc-warnings: no # experimental, raises an exception
test-warnings: no
strictness: veryhigh
max-line-length: 100
max-line-length: 130
# We don't use any of the auto-detected things, and
# auto-detection slows down startup
autodetect: false
@ -10,13 +10,14 @@ requirements:
python-targets:
- 2
- 3
# - 3 # landscape.io seems to fail if we run both py2 and py3?
ignore-paths:
- doc/
- build
- dist
- .eggs
- setup.py
- bootstrap.py
#ignore-patterns:
pyroma:
@ -42,6 +43,9 @@ pyflakes:
pep8:
disable:
# N803: argument should be lowercase. We have 'Binary' and
# camelCase names.
- N803
# N805: first arg should be self; fails on metaclasses and
# classmethods; pylint does a better job
- N805

View File

@ -10,8 +10,7 @@
# comments at the end of the line does the same thing (though Py3 supports
# mixing)
# invalid-name, ; We get lots of these, especially in scripts. should fix many of them
# invalid-name, ; Things like loadBlob get flagged
# protected-access, ; We have many cases of this; legit ones need to be examinid and commented, then this removed
# no-self-use, ; common in superclasses with extension points
# too-few-public-methods, ; Exception and marker classes get tagged with this
@ -29,10 +28,10 @@
# useless-suppression: the only way to avoid repeating it for specific statements everywhere that we
# do Py2/Py3 stuff is to put it here. Sadly this means that we might get better but not realize it.
disable=wrong-import-position,
invalid-name,
wrong-import-order,
missing-docstring,
ungrouped-imports,
invalid-name,
protected-access,
no-self-use,
too-few-public-methods,
@ -43,8 +42,9 @@ disable=wrong-import-position,
cyclic-import,
too-many-arguments,
redefined-builtin,
useless-suppression,
# undefined-all-variable
useless-suppression,
duplicate-code,
# undefined-all-variable
[FORMAT]
@ -72,15 +72,14 @@ generated-members=exc_clear
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set). This supports can work
# with qualified names.
# greenlet, Greenlet, parent, dead: all attempts to fix issues in greenlet.py
# only seen on the service, e.g., self.parent.loop: class parent has no loop
ignored-classes=SSLContext, SSLSocket, greenlet, Greenlet, parent, dead
ignored-classes=SectionValue
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=gevent._corecffi
#ignored-modules=gevent._corecffi
[DESIGN]
max-attributes=12

View File

@ -11,7 +11,7 @@ addons:
python:
- pypy-5.4.1
- 2.7
- 3.4
- 3.5
env:
matrix:
- ENV=mysql
@ -21,7 +21,7 @@ env:
- ENV=umysqldb
matrix:
exclude:
- python: 3.4
- python: 3.5
env: ENV=umysqldb
- python: pypy-5.4.1
env: ENV=pymysql
@ -31,6 +31,7 @@ matrix:
script:
# coverage slows PyPy down from 2minutes to 12+.
# But don't run the pymysql/pypy tests twice.
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pylint --rcfile=.pylintrc relstorage -f parseable -r n; fi
- if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then python -m relstorage.tests.alltests -v; fi
- if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then coverage run -m relstorage.tests.alltests -v; fi
after_success:
@ -43,16 +44,13 @@ before_install:
install:
- pip install -U pip setuptools
- pip install -U tox coveralls
- pip install -U tox coveralls pylint
- if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then pip install -U python-memcached; fi
- if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then pip install -U pylibmc cffi; fi
- pip install -U -e ".[test]"
- .travis/setup-$ENV.sh
# cache: pip seems not to work if `install` is replaced (https://github.com/travis-ci/travis-ci/issues/3239)
cache:
directories:
- $HOME/.cache/pip
- $HOME/.venv
cache: pip
before_cache:
- rm -f $HOME/.cache/pip/log/debug.log

View File

@ -6,7 +6,8 @@ Compatibility shims.
from __future__ import print_function, absolute_import, division
# pylint:disable=unused-import
# pylint:disable=unused-import,invalid-name,no-member,undefined-variable
# pylint:disable=no-name-in-module,redefined-variable-type
import sys
import platform
@ -19,8 +20,10 @@ PYPY = platform.python_implementation() == 'PyPy'
if PY3:
def list_keys(d):
return list(d.keys())
def list_items(d):
return list(d.items())
def list_values(d):
return list(d.values())
iteritems = dict.items

View File

@ -25,7 +25,7 @@ class RowBatcher(object):
"""
row_limit = 100
size_limit = 1<<20
size_limit = 1 << 20
def __init__(self, cursor, row_limit=None):
self.cursor = cursor
@ -101,6 +101,6 @@ class RowBatcher(object):
for row in rows.values():
parts.append(s)
params.extend(row)
parts = ',\n'.join(parts)
stmt = "%s INTO %s VALUES\n%s" % (command, header, parts)
stmt = "%s INTO %s VALUES\n%s" % (command, header, ',\n'.join(parts))
self.cursor.execute(stmt, tuple(params))

View File

@ -56,7 +56,7 @@ class AbstractConnectionManager(object):
"""Set the on_store_opened hook"""
self.on_store_opened = f
def open(self):
def open(self, **kwargs):
"""Open a database connection and return (conn, cursor)."""
raise NotImplementedError()

View File

@ -22,6 +22,7 @@ class DatabaseIterator(object):
def __init__(self, database_type, runner):
self.runner = runner
self.database_type = database_type
def iter_objects(self, cursor, tid):
"""Iterate over object states in a transaction.
@ -110,7 +111,7 @@ class HistoryPreservingDatabaseIterator(DatabaseIterator):
stmt += " AND tid <= %(max_tid)s"
stmt += " ORDER BY tid"
self.runner.run_script_stmt(cursor, stmt,
{'min_tid': start, 'max_tid': stop})
{'min_tid': start, 'max_tid': stop})
return self._transaction_iterator(cursor)
@ -150,7 +151,10 @@ class HistoryFreeDatabaseIterator(DatabaseIterator):
Skips packed transactions.
Yields (tid, username, description, extension) for each transaction.
This always returns an empty iterable.
"""
# pylint:disable=unused-argument
return []
def iter_transactions_range(self, cursor, start=None, stop=None):
@ -171,7 +175,7 @@ class HistoryFreeDatabaseIterator(DatabaseIterator):
stmt += " AND tid <= %(max_tid)s"
stmt += " ORDER BY tid"
self.runner.run_script_stmt(cursor, stmt,
{'min_tid': start, 'max_tid': stop})
{'min_tid': start, 'max_tid': stop})
return ((tid, '', '', '', True) for (tid,) in cursor)
def iter_object_history(self, cursor, oid):

View File

@ -17,7 +17,7 @@ from ZODB.POSException import StorageError
from zope.interface import Attribute
from zope.interface import Interface
#pylint: disable=inherit-non-class,no-method-argument
#pylint: disable=inherit-non-class,no-method-argument,no-self-argument
class IRelStorageAdapter(Interface):
"""A database adapter for RelStorage"""
@ -487,7 +487,7 @@ class ITransactionControl(Interface):
"""Returns the most recent tid."""
def add_transaction(cursor, tid, username, description, extension,
packed=False):
packed=False):
"""Add a transaction."""
def commit_phase1(conn, cursor, tid):

View File

@ -80,6 +80,7 @@ def select_driver(options=None):
@implementer(IRelStorageAdapter)
class MySQLAdapter(object):
"""MySQL adapter for RelStorage."""
# pylint:disable=too-many-instance-attributes
def __init__(self, options=None, **params):
if options is None:
@ -95,28 +96,28 @@ class MySQLAdapter(object):
driver,
params=params,
options=options,
)
)
self.runner = ScriptRunner()
self.locker = MySQLLocker(
options=options,
lock_exceptions=driver.lock_exceptions,
)
)
self.schema = MySQLSchemaInstaller(
connmanager=self.connmanager,
runner=self.runner,
keep_history=self.keep_history,
)
)
self.mover = MySQLObjectMover(
database_type='mysql',
options=options,
Binary=driver.Binary,
)
)
self.connmanager.set_on_store_opened(self.mover.on_store_opened)
self.oidallocator = MySQLOIDAllocator()
self.txncontrol = MySQLTransactionControl(
keep_history=self.keep_history,
Binary=driver.Binary,
)
)
if self.keep_history:
poll_query = "SELECT MAX(tid) FROM transaction"
@ -128,7 +129,7 @@ class MySQLAdapter(object):
runner=self.runner,
revert_when_stale=options.revert_when_stale,
)
# pylint:disable=redefined-variable-type
if self.keep_history:
self.packundo = MySQLHistoryPreservingPackUndo(
database_type='mysql',
@ -136,11 +137,11 @@ class MySQLAdapter(object):
runner=self.runner,
locker=self.locker,
options=options,
)
)
self.dbiter = HistoryPreservingDatabaseIterator(
database_type='mysql',
runner=self.runner,
)
)
else:
self.packundo = MySQLHistoryFreePackUndo(
database_type='mysql',
@ -148,15 +149,15 @@ class MySQLAdapter(object):
runner=self.runner,
locker=self.locker,
options=options,
)
)
self.dbiter = HistoryFreeDatabaseIterator(
database_type='mysql',
runner=self.runner,
)
)
self.stats = MySQLStats(
connmanager=self.connmanager,
)
)
def new_instance(self):
return MySQLAdapter(options=self.options, **self._params)

View File

@ -49,7 +49,7 @@ class MySQLdbConnectionManager(AbstractConnectionManager):
return params
def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED",
replica_selector=None):
replica_selector=None, **kwargs):
"""Open a database connection and return (conn, cursor)."""
if replica_selector is None:
replica_selector = self.replica_selector

View File

@ -15,8 +15,8 @@
"""
MySQL IDBDriver implementations.
"""
from __future__ import print_function, absolute_import
# pylint:disable=redefined-variable-type
import os
import sys
@ -31,6 +31,8 @@ from ..interfaces import IDBDriver, IDBDriverOptions
from .._abstract_drivers import _standard_exceptions
from relstorage._compat import intern
logger = __import__('logging').getLogger(__name__)
database_type = 'mysql'
@ -82,7 +84,7 @@ else: # pragma: no cover
pymysql.err.Error,
IOError,
pymysql.err.DatabaseError
)
)
disconnected_exceptions += (
IOError, # This one can escape mapping;
@ -105,6 +107,7 @@ else: # pragma: no cover
if hasattr(pymysql.converters, 'escape_string'):
orig_escape_string = pymysql.converters.escape_string
def escape_string(value, mapping=None):
if isinstance(value, bytearray) and not value:
return value
@ -167,9 +170,11 @@ else:
from pymysql.err import InternalError, InterfaceError, ProgrammingError
class UConnection(umysqldb.connections.Connection):
# pylint:disable=abstract-method
_umysql_conn = None
def __debug_lock(self, sql, ex=False): # pragma: no cover
if not 'GET_LOCK' in sql:
if 'GET_LOCK' not in sql:
return
try:
@ -270,7 +275,7 @@ else:
assert not self._umysql_conn.is_connected()
self._umysql_conn.close()
del self._umysql_conn
self._umysql_conn = umysql.Connection()
self._umysql_conn = umysql.Connection() # pylint:disable=no-member
self._connect() # Potentially this could raise again?
def connect(self, *_args, **_kwargs): # pragma: no cover
@ -279,7 +284,7 @@ else:
return self._connect()
@implementer(IDBDriver)
class umysqldbDriver(PyMySQLDriver):
class umysqldbDriver(PyMySQLDriver): # noqa
__name__ = 'umysqldb'
connect = UConnection
# umysql has a tendency to crash when given a bytearray (which
@ -291,8 +296,8 @@ else:
if (not preferred_driver_name
or (preferred_driver_name == 'PyMySQL'
and not hasattr(sys, 'pypy_version_info'))):
or (preferred_driver_name == 'PyMySQL'
and not hasattr(sys, 'pypy_version_info'))):
preferred_driver_name = driver.__name__
del driver

View File

@ -83,13 +83,15 @@ class MySQLObjectMover(AbstractObjectMover):
cursor.execute(stmt, (tid,))
@metricmethod_sampled
def update_current(self, cursor, tid):
def update_current(self, cursor, tid): # pylint:disable=method-hidden
"""Update the current object pointers.
tid is the integer tid of the transaction being committed.
"""
if not self.keep_history:
# nothing needs to be updated
# Can elide this check in the future.
self.update_current = lambda cursor, tid: None
return
cursor.execute("""

View File

@ -233,16 +233,16 @@ class MySQLSchemaInstaller(AbstractSchemaInstaller):
self.runner.run_script(cursor, stmt)
# Temp tables are created in a session-by-session basis
def _create_temp_store(self, cursor):
def _create_temp_store(self, _cursor):
return
def _create_temp_blob_chunk(self, cursor):
def _create_temp_blob_chunk(self, _cursor):
return
def _create_temp_pack_visit(self, cursor):
def _create_temp_pack_visit(self, _cursor):
return
def _create_temp_undo(self, cursor):
def _create_temp_undo(self, _cursor):
return
def _init_after_create(self, cursor):

View File

@ -23,7 +23,7 @@ from ..txncontrol import AbstractTransactionControl
@implementer(ITransactionControl)
class MySQLTransactionControl(AbstractTransactionControl):
def __init__(self, keep_history, Binary):
def __init__(self, keep_history, Binary): # noqa
self.keep_history = keep_history
self.Binary = Binary

View File

@ -20,12 +20,6 @@ from __future__ import absolute_import
import six
import abc
from perfmetrics import metricmethod
from relstorage.adapters.interfaces import IOIDAllocator
from zope.interface import implementer
from relstorage._compat import mysql_connection
@six.add_metaclass(abc.ABCMeta)
class AbstractOIDAllocator(object):
# All of these allocators allocate 16 OIDs at a time. In the sequence

View File

@ -47,7 +47,7 @@ def select_driver(options=None):
@implementer(IRelStorageAdapter)
class OracleAdapter(object):
"""Oracle adapter for RelStorage."""
# pylint:disable=too-many-instance-attributes
def __init__(self, user, password, dsn, commit_lock_id=0,
twophase=False, options=None):
"""Create an Oracle adapter.
@ -59,6 +59,7 @@ class OracleAdapter(object):
commit process. This is disabled by default. Even when this option
is disabled, the ZODB two-phase commit is still in effect.
"""
# pylint:disable=unused-argument
self._user = user
self._password = password
self._dsn = dsn
@ -78,18 +79,18 @@ class OracleAdapter(object):
dsn=dsn,
twophase=twophase,
options=options,
)
)
self.runner = CXOracleScriptRunner(driver)
self.locker = OracleLocker(
options=self.options,
lock_exceptions=driver.lock_exceptions,
inputsize_NUMBER=driver.NUMBER,
)
)
self.schema = OracleSchemaInstaller(
connmanager=self.connmanager,
runner=self.runner,
keep_history=self.keep_history,
)
)
inputsizes = {
'blobdata': driver.BLOB,
'rawdata': driver.BINARY,
@ -105,17 +106,17 @@ class OracleAdapter(object):
runner=self.runner,
Binary=driver.Binary,
batcher_factory=lambda cursor, row_limit: OracleRowBatcher(cursor, inputsizes, row_limit),
)
)
self.mover.inputsizes = inputsizes
self.connmanager.set_on_store_opened(self.mover.on_store_opened)
self.oidallocator = OracleOIDAllocator(
connmanager=self.connmanager,
)
)
self.txncontrol = OracleTransactionControl(
keep_history=self.keep_history,
Binary=driver.Binary,
twophase=twophase,
)
)
if self.keep_history:
poll_query = "SELECT MAX(tid) FROM transaction"
@ -128,6 +129,7 @@ class OracleAdapter(object):
revert_when_stale=options.revert_when_stale,
)
# pylint:disable=redefined-variable-type
if self.keep_history:
self.packundo = OracleHistoryPreservingPackUndo(
database_type='oracle',
@ -135,11 +137,11 @@ class OracleAdapter(object):
runner=self.runner,
locker=self.locker,
options=options,
)
)
self.dbiter = HistoryPreservingDatabaseIterator(
database_type='oracle',
runner=self.runner,
)
)
else:
self.packundo = OracleHistoryFreePackUndo(
database_type='oracle',
@ -147,15 +149,15 @@ class OracleAdapter(object):
runner=self.runner,
locker=self.locker,
options=options,
)
)
self.dbiter = HistoryFreeDatabaseIterator(
database_type='oracle',
runner=self.runner,
)
)
self.stats = OracleStats(
connmanager=self.connmanager,
)
)
def new_instance(self):
# This adapter and its components are stateless, so it's
@ -166,7 +168,7 @@ class OracleAdapter(object):
dsn=self._dsn,
twophase=self._twophase,
options=self.options,
)
)
def __str__(self):
parts = [self.__class__.__name__]

View File

@ -40,7 +40,7 @@ class OracleRowBatcher(RowBatcher):
def replace_var(match):
name = match.group(1)
new_name = '%s_%d' % (name, rownum)
new_name = '%s_%d' % (name, rownum) # pylint:disable=undefined-loop-variable
if name in self.inputsizes:
stmt_inputsizes[new_name] = self.inputsizes[name]
params[new_name] = row[name]
@ -69,8 +69,8 @@ class OracleRowBatcher(RowBatcher):
mod_row = oracle_rowvar_re.sub(replace_var, row_schema)
parts.append("INTO %s VALUES (%s)" % (header, mod_row))
parts = '\n'.join(parts)
stmt = "INSERT ALL\n%s\nSELECT * FROM DUAL" % parts
stmt = "INSERT ALL\n%s\nSELECT * FROM DUAL" % '\n'.join(parts)
if stmt_inputsizes:
self.cursor.setinputsizes(**stmt_inputsizes)
self.cursor.execute(stmt, params)

View File

@ -68,7 +68,7 @@ class CXOracleConnectionManager(AbstractConnectionManager):
@metricmethod
def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED",
twophase=False, replica_selector=None):
twophase=False, replica_selector=None, **kwargs):
"""Open a database connection and return (conn, cursor)."""
if replica_selector is None:
replica_selector = self.replica_selector

View File

@ -40,7 +40,7 @@ except ImportError:
else: # pragma: no cover
@implementer(IDBDriver)
class cx_OracleDriver(object):
class cx_OracleDriver(object): # noqa
__name__ = 'cx_Oracle'
disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(cx_Oracle)
disconnected_exceptions += (cx_Oracle.DatabaseError,)

View File

@ -37,7 +37,8 @@ def _to_oracle_ordered(query_tuple):
@implementer(IObjectMover)
class OracleObjectMover(AbstractObjectMover):
inputsizes = ()
# This is assigned to by the adapter.
inputsizes = None
_move_from_temp_hp_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hp_insert_query)
_move_from_temp_hf_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hf_insert_query)
@ -239,7 +240,7 @@ class OracleObjectMover(AbstractObjectMover):
state = :blobdata
WHERE zoid = :oid
"""
cursor.setinputsizes(blobdata=self.inputsizes['blobdata'])
cursor.setinputsizes(blobdata=self.inputsizes['blobdata']) # pylint:disable=unsubscriptable-object
cursor.execute(stmt, oid=oid, prev_tid=prev_tid,
md5sum=md5sum, blobdata=self.Binary(data))
@ -264,7 +265,7 @@ class OracleObjectMover(AbstractObjectMover):
bytecount = 0
# Current versions of cx_Oracle only support offsets up
# to sys.maxint or 4GB, whichever comes first.
maxsize = min(sys.maxsize, 1<<32)
maxsize = min(sys.maxsize, 1 << 32)
try:
cursor.execute(stmt, (oid, tid))
while True:
@ -279,9 +280,11 @@ class OracleObjectMover(AbstractObjectMover):
f = open(filename, 'wb')
# round off the chunk-size to be a multiple of the oracle
# blob chunk size to maximize performance
read_chunk_size = int(max(round(
1.0 * self.blob_chunk_size / blob.getchunksize()), 1) *
blob.getchunksize())
read_chunk_size = int(
max(
round(1.0 * self.blob_chunk_size / blob.getchunksize()),
1)
* blob.getchunksize())
offset = 1 # Oracle still uses 1-based indexing.
reader = iter(lambda: blob.read(offset, read_chunk_size), b'')
for read_chunk in reader:
@ -307,7 +310,7 @@ class OracleObjectMover(AbstractObjectMover):
# Current versions of cx_Oracle only support offsets up
# to sys.maxint or 4GB, whichever comes first. We divide up our
# upload into chunks within this limit.
oracle_blob_chunk_maxsize = min(sys.maxsize, 1<<32)
oracle_blob_chunk_maxsize = min(sys.maxsize, 1 << 32)
@metricmethod_sampled
def upload_blob(self, cursor, oid, tid, filename):
@ -315,6 +318,7 @@ class OracleObjectMover(AbstractObjectMover):
If serial is None, upload to the temporary table.
"""
# pylint:disable=too-many-locals
if tid is not None:
if self.keep_history:
delete_stmt = """

View File

@ -146,9 +146,9 @@ class OracleSchemaInstaller(AbstractSchemaInstaller):
def prepare(self):
"""Create the database schema if it does not already exist."""
def callback(conn, cursor):
def callback(_conn, cursor):
tables = self.list_tables(cursor)
if not 'object_state' in tables:
if 'object_state' not in tables:
self.create(cursor)
else:
self.check_compatibility(cursor, tables)

View File

@ -33,6 +33,7 @@ def format_to_named(stmt):
return _stmt_cache[stmt]
except KeyError:
matches = []
def replace(_match):
matches.append(None)
return ':%d' % len(matches)
@ -74,7 +75,7 @@ class OracleScriptRunner(ScriptRunner):
params[k] = v
else:
stmt = generic_stmt % self.script_vars
params = ()
params = () # pylint:disable=redefined-variable-type
try:
cursor.execute(stmt, params)
@ -116,6 +117,7 @@ class CXOracleScriptRunner(OracleScriptRunner):
error indicating truncation. The run_lob_stmt() method works
around this.
"""
# pylint:disable=unused-argument
if defaultType == self.driver.BLOB:
# Default size for BLOB is 4, we want the whole blob inline.
# Typical chunk size is 8132, we choose a multiple - 32528

View File

@ -23,26 +23,26 @@ class OracleStats(AbstractStats):
"""Returns the number of objects in the database"""
# The tests expect an exact number, but the code below generates
# an estimate, so this is disabled for now.
if True:
return 0
else:
conn, cursor = self.connmanager.open(
self.connmanager.isolation_read_only)
try:
stmt = """
SELECT NUM_ROWS
FROM USER_TABLES
WHERE TABLE_NAME = 'CURRENT_OBJECT'
"""
cursor.execute(stmt)
res = cursor.fetchone()[0]
if res is None:
res = 0
else:
res = int(res)
return res
finally:
self.connmanager.close(conn, cursor)
return 0
def _estimate_object_count(self):
conn, cursor = self.connmanager.open(
self.connmanager.isolation_read_only)
try:
stmt = """
SELECT NUM_ROWS
FROM USER_TABLES
WHERE TABLE_NAME = 'CURRENT_OBJECT'
"""
cursor.execute(stmt)
res = cursor.fetchone()[0]
if res is None:
res = 0
else:
res = int(res)
return res
finally:
self.connmanager.close(conn, cursor)
def get_db_size(self):
"""Returns the approximate size of the database in bytes"""

View File

@ -14,6 +14,8 @@
"""Pack/Undo implementations.
"""
# pylint:disable=too-many-lines,unused-argument
from ZODB.POSException import UndoError
from ZODB.utils import u64
from perfmetrics import metricmethod
@ -34,6 +36,8 @@ class PackUndo(object):
verify_sane_database = False
_script_choose_pack_transaction = None
def __init__(self, database_type, connmanager, runner, locker, options):
self.database_type = database_type
self.connmanager = connmanager
@ -354,7 +358,7 @@ class HistoryPreservingPackUndo(PackUndo):
SELECT zoid, prev_tid FROM temp_undo
"""
self.runner.run_script(cursor, stmt,
{'undo_tid': undo_tid, 'self_tid': self_tid})
{'undo_tid': undo_tid, 'self_tid': self_tid})
res = list(cursor)
stmt = self._script_reset_temp_undo
@ -420,7 +424,7 @@ class HistoryPreservingPackUndo(PackUndo):
state = db_binary_to_bytes(state)
if hasattr(state, 'read'):
# Oracle
state = state.read()
state = state.read() # pylint:disable=no-member
if state:
assert isinstance(state, bytes), type(state) # PY3: used to be str(state)
from_count += 1
@ -429,8 +433,8 @@ class HistoryPreservingPackUndo(PackUndo):
except:
log.error(
"pre_pack: can't unpickle "
"object %d in transaction %d; state length = %d" % (
from_oid, tid, len(state)))
"object %d in transaction %d; state length = %d",
from_oid, tid, len(state))
raise
for to_oid in to_oids:
add_rows.append((from_oid, tid, to_oid))
@ -456,7 +460,7 @@ class HistoryPreservingPackUndo(PackUndo):
to_count = len(add_rows)
log.debug("pre_pack: transaction %d: has %d reference(s) "
"from %d object(s)", tid, to_count, from_count)
"from %d object(s)", tid, to_count, from_count)
return to_count
@metricmethod
@ -519,7 +523,7 @@ class HistoryPreservingPackUndo(PackUndo):
AND tid <= %(pack_tid)s
"""
self.runner.run_script_stmt(
cursor, stmt, {'pack_tid':pack_tid})
cursor, stmt, {'pack_tid': pack_tid})
to_remove += cursor.rowcount
log.info("pre_pack: enumerating transactions to pack")
@ -533,7 +537,7 @@ class HistoryPreservingPackUndo(PackUndo):
cursor.execute(stmt)
log.info("pre_pack: will remove %d object state(s)",
to_remove)
to_remove)
except:
log.exception("pre_pack: failed")
@ -645,10 +649,11 @@ class HistoryPreservingPackUndo(PackUndo):
@metricmethod
def pack(self, pack_tid, sleep=None, packed_func=None):
"""Pack. Requires the information provided by pre_pack."""
# pylint:disable=too-many-locals
# Read committed mode is sufficient.
conn, cursor = self.connmanager.open()
try:
try: # pylint:disable=too-many-nested-blocks
try:
stmt = """
SELECT transaction.tid,
@ -697,9 +702,9 @@ class HistoryPreservingPackUndo(PackUndo):
statecounter += len(packed_list)
if counter >= lastreport + reportstep:
log.info("pack: packed %d (%.1f%%) transaction(s), "
"affecting %d states",
counter, counter/float(total)*100,
statecounter)
"affecting %d states",
counter, counter / float(total) * 100,
statecounter)
lastreport = counter / reportstep * reportstep
del packed_list[:]
self.locker.release_commit_lock(cursor)
@ -726,7 +731,7 @@ class HistoryPreservingPackUndo(PackUndo):
def _pack_transaction(self, cursor, pack_tid, tid, packed,
has_removable, packed_list):
has_removable, packed_list):
"""Pack one transaction. Requires populated pack tables."""
log.debug("pack: transaction %d: packing", tid)
removed_objects = 0
@ -748,7 +753,7 @@ class HistoryPreservingPackUndo(PackUndo):
AND tid <= %(pack_tid)s
"""
self.runner.run_script_stmt(cursor, stmt,
{'pack_tid': pack_tid, 'tid': tid})
{'pack_tid': pack_tid, 'tid': tid})
stmt = """
SELECT pack_state.zoid
@ -932,7 +937,7 @@ class HistoryFreePackUndo(PackUndo):
state = db_binary_to_bytes(state)
if hasattr(state, 'read'):
# Oracle
state = state.read()
state = state.read() # pylint:disable=no-member
add_objects.append((from_oid, tid))
if state:
assert isinstance(state, bytes), type(state)
@ -1046,9 +1051,10 @@ class HistoryFreePackUndo(PackUndo):
Requires the information provided by pre_pack.
"""
# pylint:disable=too-many-locals
# Read committed mode is sufficient.
conn, cursor = self.connmanager.open()
try:
try: # pylint:disable=too-many-nested-blocks
try:
stmt = """
SELECT zoid, keep_tid
@ -1090,7 +1096,7 @@ class HistoryFreePackUndo(PackUndo):
counter = total - len(to_remove)
if counter >= lastreport + reportstep:
log.info("pack: removed %d (%.1f%%) state(s)",
counter, counter/float(total)*100)
counter, counter / float(total) * 100)
lastreport = counter / reportstep * reportstep
self.locker.release_commit_lock(cursor)
self._pause_pack_until_lock(cursor, sleep)

View File

@ -46,16 +46,14 @@ class Poller(object):
that the changes are too complex to list. new_polled_tid can be
0 if there is no data in the database.
"""
# pylint:disable=unused-argument
# find out the tid of the most recent transaction.
cursor.execute(self.poll_query)
rows = list(cursor)
if not rows:
if not rows or not rows[0][0]:
# No data.
return None, 0
new_polled_tid = rows[0][0]
if not new_polled_tid:
# No data.
return None, 0
if prev_polled_tid is None:
# This is the first time the connection has polled.
@ -65,54 +63,7 @@ class Poller(object):
# No transactions have been committed since prev_polled_tid.
return (), new_polled_tid
elif new_polled_tid > prev_polled_tid:
# New transaction(s) have been added.
if self.keep_history:
# If the previously polled transaction no longer exists,
# the cache is too old and needs to be cleared.
# XXX Do we actually need to detect this condition? I think
# if we delete this block of code, all the unreachable
# objects will be garbage collected anyway. So, as a test,
# there is no equivalent of this block of code for
# history-free storage. If something goes wrong, then we'll
# know there's some other edge condition we have to account
# for.
stmt = "SELECT 1 FROM transaction WHERE tid = %(tid)s"
cursor.execute(
intern(stmt % self.runner.script_vars),
{'tid': prev_polled_tid})
rows = cursor.fetchall()
if not rows:
# Transaction not found; perhaps it has been packed.
# The connection cache should be cleared.
return None, new_polled_tid
# Get the list of changed OIDs and return it.
if self.keep_history:
stmt = """
SELECT zoid, tid
FROM current_object
WHERE tid > %(tid)s
"""
else:
stmt = """
SELECT zoid, tid
FROM object_state
WHERE tid > %(tid)s
"""
params = {'tid': prev_polled_tid}
if ignore_tid is not None:
stmt += " AND tid != %(self_tid)s"
params['self_tid'] = ignore_tid
stmt = intern(stmt % self.runner.script_vars)
cursor.execute(stmt, params)
changes = cursor.fetchall()
return changes, new_polled_tid
else:
if new_polled_tid <= prev_polled_tid:
# The database connection is stale. This can happen after
# reading an asynchronous slave that is not fully up to date.
# (It may also suggest that transaction IDs are not being created
@ -127,14 +78,61 @@ class Poller(object):
# We have to invalidate the whole cPickleCache, otherwise
# the cache would be inconsistent with the reverted state.
return None, new_polled_tid
else:
# This client never wants to revert to stale data, so
# raise ReadConflictError to trigger a retry.
# We're probably just waiting for async replication
# to catch up, so retrying could do the trick.
raise ReadConflictError(
"The database connection is stale: new_polled_tid=%d, "
"prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid))
# This client never wants to revert to stale data, so
# raise ReadConflictError to trigger a retry.
# We're probably just waiting for async replication
# to catch up, so retrying could do the trick.
raise ReadConflictError(
"The database connection is stale: new_polled_tid=%d, "
"prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid))
# New transaction(s) have been added.
if self.keep_history:
# If the previously polled transaction no longer exists,
# the cache is too old and needs to be cleared.
# XXX Do we actually need to detect this condition? I think
# if we delete this block of code, all the unreachable
# objects will be garbage collected anyway. So, as a test,
# there is no equivalent of this block of code for
# history-free storage. If something goes wrong, then we'll
# know there's some other edge condition we have to account
# for.
stmt = "SELECT 1 FROM transaction WHERE tid = %(tid)s"
cursor.execute(
intern(stmt % self.runner.script_vars),
{'tid': prev_polled_tid})
rows = cursor.fetchall()
if not rows:
# Transaction not found; perhaps it has been packed.
# The connection cache should be cleared.
return None, new_polled_tid
# Get the list of changed OIDs and return it.
if self.keep_history:
stmt = """
SELECT zoid, tid
FROM current_object
WHERE tid > %(tid)s
"""
else:
stmt = """
SELECT zoid, tid
FROM object_state
WHERE tid > %(tid)s
"""
params = {'tid': prev_polled_tid}
if ignore_tid is not None:
stmt += " AND tid != %(self_tid)s"
params['self_tid'] = ignore_tid
stmt = intern(stmt % self.runner.script_vars)
cursor.execute(stmt, params)
changes = cursor.fetchall()
return changes, new_polled_tid
def list_changes(self, cursor, after_tid, last_tid):
"""Return the (oid, tid) values changed in a range of transactions.

View File

@ -47,6 +47,7 @@ def select_driver(options=None):
class PostgreSQLAdapter(object):
"""PostgreSQL adapter for RelStorage."""
# pylint:disable=too-many-instance-attributes
def __init__(self, dsn='', options=None):
# options is a relstorage.options.Options or None
self._dsn = dsn
@ -63,32 +64,32 @@ class PostgreSQLAdapter(object):
driver,
dsn=dsn,
options=options,
)
)
self.runner = ScriptRunner()
self.locker = PostgreSQLLocker(
options=options,
lock_exceptions=driver.lock_exceptions,
version_detector=self.version_detector,
)
)
self.schema = PostgreSQLSchemaInstaller(
connmanager=self.connmanager,
runner=self.runner,
locker=self.locker,
keep_history=self.keep_history,
)
)
self.mover = PostgreSQLObjectMover(
database_type='postgresql',
options=options,
runner=self.runner,
version_detector=self.version_detector,
Binary=driver.Binary,
)
)
self.connmanager.set_on_store_opened(self.mover.on_store_opened)
self.oidallocator = PostgreSQLOIDAllocator()
self.txncontrol = PostgreSQLTransactionControl(
keep_history=self.keep_history,
driver=driver,
)
)
self.poller = Poller(
poll_query="EXECUTE get_latest_tid",
@ -96,7 +97,7 @@ class PostgreSQLAdapter(object):
runner=self.runner,
revert_when_stale=options.revert_when_stale,
)
# pylint:disable=redefined-variable-type
if self.keep_history:
self.packundo = HistoryPreservingPackUndo(
database_type='postgresql',
@ -104,11 +105,11 @@ class PostgreSQLAdapter(object):
runner=self.runner,
locker=self.locker,
options=options,
)
)
self.dbiter = HistoryPreservingDatabaseIterator(
database_type='postgresql',
runner=self.runner,
)
)
else:
self.packundo = HistoryFreePackUndo(
database_type='postgresql',
@ -116,15 +117,15 @@ class PostgreSQLAdapter(object):
runner=self.runner,
locker=self.locker,
options=options,
)
)
self.dbiter = HistoryFreeDatabaseIterator(
database_type='postgresql',
runner=self.runner,
)
)
self.stats = PostgreSQLStats(
connmanager=self.connmanager,
)
)
def new_instance(self):
inst = type(self)(dsn=self._dsn, options=self.options)

View File

@ -48,7 +48,7 @@ class Psycopg2ConnectionManager(AbstractConnectionManager):
return dsn
@metricmethod
def open(self, isolation=None, replica_selector=None):
def open(self, isolation=None, replica_selector=None, **kwargs):
"""Open a database connection and return (conn, cursor)."""
if isolation is None:
isolation = self.isolation_read_committed

View File

@ -17,7 +17,7 @@ PostgreSQL IDBDriver implementations.
"""
from __future__ import print_function, absolute_import
# pylint:disable=redefined-variable-type
import sys
import os
@ -39,6 +39,7 @@ def _create_connection(mod):
class Psycopg2Connection(mod.extensions.connection):
# The replica attribute holds the name of the replica this
# connection is bound to.
# pylint:disable=slots-on-old-class
__slots__ = ('replica',)
return Psycopg2Connection
@ -163,6 +164,7 @@ else:
class _ReadBlob(object):
closed = False
fetch_size = 1024 * 1024 * 9
def __init__(self, conn, oid):
self._cursor = conn.cursor()
self.oid = oid
@ -243,7 +245,7 @@ else:
key = 'database'
kwds[key] = value
conn = self._connect(**kwds)
assert conn.__class__ is _Connection.__base__
assert conn.__class__ is _Connection.__base__ # pylint:disable=no-member
conn.__class__ = _Connection
return _ConnWrapper(conn) if self._wrap else conn

View File

@ -19,6 +19,7 @@ from relstorage.adapters.interfaces import IObjectMover
from zope.interface import implementer
import os
import functools
from relstorage._compat import xrange
@ -110,7 +111,7 @@ class PostgreSQLObjectMover(AbstractObjectMover):
if f is None:
f = open(filename, 'ab') # Append, chunk 0 was an export
reader = iter(lambda: blob.read(read_chunk_size), b'')
reader = iter(functools.partial(blob.read, read_chunk_size), b'')
for read_chunk in reader:
f.write(read_chunk)
bytecount += len(read_chunk)
@ -127,7 +128,7 @@ class PostgreSQLObjectMover(AbstractObjectMover):
# PostgreSQL < 9.3 only supports up to 2GB of data per BLOB.
# Even above that, we can only use larger blobs on 64-bit builds.
postgresql_blob_chunk_maxsize = 1<<31
postgresql_blob_chunk_maxsize = 1 << 31
@metricmethod_sampled
def upload_blob(self, cursor, oid, tid, filename):
@ -135,6 +136,7 @@ class PostgreSQLObjectMover(AbstractObjectMover):
If serial is None, upload to the temporary table.
"""
# pylint:disable=too-many-branches,too-many-locals
if tid is not None:
if self.keep_history:
delete_stmt = """

View File

@ -97,7 +97,7 @@ class PostgreSQLSchemaInstaller(AbstractSchemaInstaller):
"""Create the database schema if it does not already exist."""
def callback(_conn, cursor):
tables = self.list_tables(cursor)
if not 'object_state' in tables:
if 'object_state' not in tables:
self.create(cursor)
else:
self.check_compatibility(cursor, tables)
@ -111,7 +111,7 @@ class PostgreSQLSchemaInstaller(AbstractSchemaInstaller):
"installing the stored procedures.")
triggers = self.list_triggers(cursor)
if not 'blob_chunk_delete' in triggers:
if 'blob_chunk_delete' not in triggers:
self.install_triggers(cursor)
self.connmanager.open_and_call(callback)

View File

@ -28,7 +28,7 @@ class PostgreSQLStats(AbstractStats):
def get_db_size(self):
"""Returns the approximate size of the database in bytes"""
def callback(conn, cursor):
def callback(_conn, cursor):
cursor.execute("SELECT pg_database_size(current_database())")
return cursor.fetchone()[0]
return self.connmanager.open_and_call(callback)

View File

@ -64,7 +64,7 @@ class PostgreSQLTransactionControl(AbstractTransactionControl):
%s, %s,
%s)
"""
Binary = self._Binary
binary = self._Binary
cursor.execute(stmt, (tid, packed,
Binary(username), Binary(description),
Binary(extension)))
binary(username), binary(description),
binary(extension)))

View File

@ -22,6 +22,9 @@ import time
@implementer(IReplicaSelector)
class ReplicaSelector(object):
# The time at which we checked the config
_config_checked = 0
def __init__(self, fn, replica_timeout):
self.replica_conf = fn
self.replica_timeout = replica_timeout

View File

@ -21,8 +21,6 @@ import logging
from ZODB.POSException import StorageError
import re
log = logging.getLogger("relstorage")
@ -247,16 +245,16 @@ class AbstractSchemaInstaller(object):
"""Create the database schema if it does not already exist."""
# XXX: We can generalize this to handle triggers, procs, etc,
# to make subclasses have easier time.
def callback(conn, cursor):
def callback(_conn, cursor):
tables = self.list_tables(cursor)
if not 'object_state' in tables:
if 'object_state' not in tables:
self.create(cursor)
else:
self.check_compatibility(cursor, tables)
self.update_schema(cursor, tables)
self.connmanager.open_and_call(callback)
def check_compatibility(self, cursor, tables):
def check_compatibility(self, cursor, tables): # pylint:disable=unused-argument
if self.keep_history:
if 'transaction' not in tables and 'current_object' not in tables:
raise StorageError(
@ -271,7 +269,7 @@ class AbstractSchemaInstaller(object):
"can not connect to a history-preserving database. "
"If you need to convert, use the zodbconvert utility."
)
if not 'blob_chunk' in tables:
if 'blob_chunk' not in tables:
raise StorageError(
"Schema mismatch; please create the blob_chunk tables."
"See migration instructions for RelStorage 1.5."
@ -320,7 +318,7 @@ class AbstractSchemaInstaller(object):
def drop_all(self):
"""Drop all tables and sequences."""
def callback(conn, cursor):
def callback(_conn, cursor):
existent = set(self.list_tables(cursor))
todo = list(self.all_tables)
todo.reverse()

View File

@ -1 +1 @@
# Tests package.

View File

@ -28,7 +28,7 @@ class RowBatcherTests(unittest.TestCase):
self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.deletes,
{('mytable', ('id',)): set([("2",)])})
{('mytable', ('id',)): set([("2",)])})
def test_delete_multiple_column(self):
cursor = MockCursor()
@ -38,7 +38,7 @@ class RowBatcherTests(unittest.TestCase):
self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.deletes,
{('mytable', ('id', 'tid')): set([("2", "10")])})
{('mytable', ('id', 'tid')): set([("2", "10")])})
def test_delete_auto_flush(self):
cursor = MockCursor()
@ -47,7 +47,7 @@ class RowBatcherTests(unittest.TestCase):
batcher.delete_from("mytable", id=2)
batcher.delete_from("mytable", id=1)
self.assertEqual(cursor.executed,
[('DELETE FROM mytable WHERE id IN (1,2)', None)])
[('DELETE FROM mytable WHERE id IN (1,2)', None)])
self.assertEqual(batcher.rows_added, 0)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.deletes, {})
@ -130,12 +130,14 @@ class RowBatcherTests(unittest.TestCase):
rowkey=2,
size=5,
)
self.assertEqual(cursor.executed, [(
'INSERT INTO mytable (id, name) VALUES\n'
'(%s, id || %s),\n'
'(%s, id || %s)',
(1, 'a', 2, 'B'))
])
self.assertEqual(
cursor.executed,
[(
'INSERT INTO mytable (id, name) VALUES\n'
'(%s, id || %s),\n'
'(%s, id || %s)',
(1, 'a', 2, 'B'))
])
self.assertEqual(batcher.rows_added, 0)
self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.inserts, {})
@ -200,13 +202,15 @@ class OracleRowBatcherTests(unittest.TestCase):
)
self.assertEqual(cursor.executed, [])
batcher.flush()
self.assertEqual(cursor.executed, [(
'INSERT ALL\n'
'INTO mytable (id, name) VALUES (:id_0, :id_0 || :name_0)\n'
'INTO mytable (id, name) VALUES (:id_1, :id_1 || :name_1)\n'
'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'name_1': 'b', 'name_0': 'a'})
])
self.assertEqual(
cursor.executed,
[(
'INSERT ALL\n'
'INTO mytable (id, name) VALUES (:id_0, :id_0 || :name_0)\n'
'INTO mytable (id, name) VALUES (:id_1, :id_1 || :name_1)\n'
'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'name_1': 'b', 'name_0': 'a'})
])
def test_insert_one_raw_row(self):
class MockRawType(object):
@ -223,7 +227,7 @@ class OracleRowBatcherTests(unittest.TestCase):
batcher.flush()
self.assertEqual(cursor.executed, [
('INSERT INTO mytable (id, data) VALUES (:id, :rawdata)',
{'id': 1, 'rawdata': 'xyz'})
{'id': 1, 'rawdata': 'xyz'})
])
self.assertEqual(cursor.inputsizes, {'rawdata': MockRawType})
@ -247,13 +251,15 @@ class OracleRowBatcherTests(unittest.TestCase):
size=3,
)
batcher.flush()
self.assertEqual(cursor.executed, [(
'INSERT ALL\n'
'INTO mytable (id, data) VALUES (:id_0, :rawdata_0)\n'
'INTO mytable (id, data) VALUES (:id_1, :rawdata_1)\n'
'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'rawdata_0': 'xyz', 'rawdata_1': 'abc'})
])
self.assertEqual(
cursor.executed,
[(
'INSERT ALL\n'
'INTO mytable (id, data) VALUES (:id_0, :rawdata_0)\n'
'INTO mytable (id, data) VALUES (:id_1, :rawdata_1)\n'
'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'rawdata_0': 'xyz', 'rawdata_1': 'abc'})
])
self.assertEqual(cursor.inputsizes, {
'rawdata_0': MockRawType,
'rawdata_1': MockRawType,

View File

@ -46,7 +46,7 @@ class AbstractConnectionManagerTests(unittest.TestCase):
self.assertTrue(conn.rolled_back)
conn.replica = 'other'
self.assertRaises(ReplicaClosedException,
cm.restart_load, conn, MockCursor())
cm.restart_load, conn, MockCursor())
conn = MockConnection()
conn.replica = 'localhost'
@ -54,16 +54,16 @@ class AbstractConnectionManagerTests(unittest.TestCase):
self.assertTrue(conn.rolled_back)
conn.replica = 'other'
self.assertRaises(ReplicaClosedException,
cm.restart_store, conn, MockCursor())
cm.restart_store, conn, MockCursor())
def test_with_ro_replica_conf(self):
import os
import relstorage.tests
tests_dir = relstorage.tests.__file__
replica_conf = os.path.join(os.path.dirname(tests_dir),
'replicas.conf')
'replicas.conf')
ro_replica_conf = os.path.join(os.path.dirname(tests_dir),
'ro_replicas.conf')
'ro_replicas.conf')
options = MockOptions(replica_conf, ro_replica_conf)
from relstorage.adapters.connmanager \
@ -77,7 +77,7 @@ class AbstractConnectionManagerTests(unittest.TestCase):
self.assertTrue(conn.rolled_back)
conn.replica = 'other'
self.assertRaises(ReplicaClosedException,
cm.restart_load, conn, MockCursor())
cm.restart_load, conn, MockCursor())
class MockOptions(object):
@ -87,6 +87,10 @@ class MockOptions(object):
self.replica_timeout = 600.0
class MockConnection(object):
rolled_back = False
closed = False
replica = None
def rollback(self):
self.rolled_back = True
@ -94,6 +98,8 @@ class MockConnection(object):
self.closed = True
class MockCursor(object):
closed = False
def close(self):
self.closed = True

View File

@ -33,7 +33,7 @@ class ReplicaSelectorTests(unittest.TestCase):
from relstorage.adapters.replica import ReplicaSelector
rs = ReplicaSelector(self.fn, 600.0)
self.assertEqual(rs._replicas,
['example.com:1234', 'localhost:4321', 'localhost:9999'])
['example.com:1234', 'localhost:4321', 'localhost:9999'])
def test__read_config_empty(self):
from relstorage.adapters.replica import ReplicaSelector

View File

@ -17,7 +17,7 @@ from tempfile import SpooledTemporaryFile
class AutoTemporaryFile(SpooledTemporaryFile):
# Exists for BWC and to preserve the default threshold
def __init__(self, threshold=10*1024*1024, **kw):
def __init__(self, threshold=10 * 1024 * 1024, **kw):
# STF uses >, the old ATF used >= for the max_size check
SpooledTemporaryFile.__init__(self, max_size=threshold - 1, **kw)

View File

@ -323,6 +323,7 @@ class BlobCacheChecker(object):
self.check(True)
_check_blob_size_thread = None
def check(self, check_loaded=False):
"""If appropriate, run blob cache cleanup in another thread."""
if self._blob_cache_size is None:
@ -341,7 +342,7 @@ class BlobCacheChecker(object):
check_blob_size_thread = threading.Thread(
target=_check_blob_cache_size,
args=(self.blob_dir, target),
)
)
check_blob_size_thread.setDaemon(True)
check_blob_size_thread.start()
self._check_blob_size_thread = check_blob_size_thread

View File

@ -12,24 +12,23 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import absolute_import, print_function, division
"""
Segmented LRU implementations.
"""
from __future__ import absolute_import, print_function, division
import functools
import itertools
try:
izip = itertools.izip
except AttributeError:
# Python 3
izip = zip
izip = zip # pylint:disable=redefined-variable-type
from relstorage.cache import _cache_ring
ffi = _cache_ring.ffi
_FFI_RING = _cache_ring.lib
ffi = _cache_ring.ffi # pylint:disable=no-member
_FFI_RING = _cache_ring.lib # pylint:disable=no-member
_ring_move_to_head = _FFI_RING.rsc_ring_move_to_head
_ring_del = _FFI_RING.rsc_ring_del
@ -174,7 +173,7 @@ class CacheRingNode(object):
def __init__(self, key, value, node=None):
self.key = key
self.value = value
self._cffi_owning_node = None
# Passing the string is faster than passing a cdecl because we
# have the string directly in bytecode without a lookup
if node is None:
@ -257,7 +256,7 @@ class CacheRing(object):
PARENT_CONST = 0
def __init__(self, limit): #, _ring_type=ffi.typeof("RSRing")):
def __init__(self, limit):
self.limit = limit
node = self.ring_home = ffi.new("RSRing")
node.r_next = node

View File

@ -16,6 +16,8 @@ from __future__ import absolute_import, print_function, division
from zope.interface import Interface
from zope.interface import Attribute
#pylint: disable=inherit-non-class,no-method-argument,no-self-argument
class IPersistentCache(Interface):
"""
A cache that can be persisted to a file (or more generally, a stream)

View File

@ -160,7 +160,7 @@ class LocalClient(object):
assert isinstance(key, str), (type(key), key)
assert isinstance(value, bytes)
cvalue = compress(value) if compress else value
cvalue = compress(value) if compress else value # pylint:disable=not-callable
if len(cvalue) >= self._value_limit:
# This value is too big, so don't cache it.

View File

@ -54,6 +54,7 @@ class StorageCache(object):
most global. The first is a LocalClient, which stores the cache
in the Python process, but shares the cache between threads.
"""
# pylint:disable=too-many-instance-attributes,too-many-public-methods
# send_limit: approximate limit on the bytes to buffer before
# sending to the cache.
@ -377,6 +378,7 @@ class StorageCache(object):
Fall back to loading from the database.
"""
# pylint:disable=too-many-statements,too-many-branches,too-many-locals
if not self.checkpoints:
# No poll has occurred yet. For safety, don't use the cache.
self._trace(0x20, oid_int)
@ -534,7 +536,7 @@ class StorageCache(object):
items = [
(startpos, endpos, oid_int)
for (oid_int, (startpos, endpos)) in iteritems(self.queue_contents)
]
]
items.sort()
# Trace these. This is the equivalent of ZEOs
# ClientStorage._update_cache.
@ -601,6 +603,7 @@ class StorageCache(object):
prev_tid_int can be None, in which case the changes
parameter will be ignored. new_tid_int can not be None.
"""
# pylint:disable=too-many-statements,too-many-branches,too-many-locals
new_checkpoints = None
for client in self.clients_global_first:
s = client.get(self.checkpoints_key)
@ -658,8 +661,7 @@ class StorageCache(object):
and changes is not None
and prev_tid_int
and prev_tid_int <= self.current_tid
and new_tid_int >= self.current_tid
):
and new_tid_int >= self.current_tid):
# All the conditions for keeping the checkpoints were met,
# so just update self.delta_after0 and self.current_tid.
m = self.delta_after0

View File

@ -15,6 +15,8 @@ from __future__ import print_function, absolute_import, division
from relstorage.options import Options
# pylint:disable=unused-argument,redefined-variable-type
class MockOptions(Options):
cache_module_name = ''
cache_servers = ''
@ -24,10 +26,11 @@ class MockOptions(Options):
import timeit
import statistics
import statistics # pylint:disable=import-error
try:
import sys
import cProfile, pstats
import cProfile
import pstats
if '--profile' not in sys.argv:
raise ImportError
except ImportError:
@ -66,8 +69,8 @@ def run_and_report_funcs(named_funcs, **kwargs):
for name, func in named_funcs:
times[name] = run_func(func, **kwargs)
for name, time in sorted(times.items()):
print(name, "average", statistics.mean(time), "stddev", statistics.stdev(time))
for name, _time in sorted(times.items()):
print(name, "average", statistics.mean(_time), "stddev", statistics.stdev(_time))
def local_benchmark():
@ -186,7 +189,7 @@ def local_benchmark():
client.reset_stats()
hot_keys = key_groups[0]
i = 0
for k, v in ALL_DATA:
for _k, v in ALL_DATA:
i += 1
client._bucket0[str(i)] = v
@ -224,6 +227,7 @@ class StorageTraceSimulator(object):
def _read_binary_records(self, filename, num_clients=8, write_pct=.30,
mean_size=10000, stddev_size=512):
# pylint:disable=too-many-locals
import struct
keys = []
i = 0
@ -252,11 +256,7 @@ class StorageTraceSimulator(object):
return records
def _read_text_records(self, filename):
try:
from sys import intern as _intern
except ImportError:
# Py2
_intern = intern
from relstorage._compat import intern as _intern
records = []
with self._open_file(filename) as f:
@ -316,13 +316,14 @@ class StorageTraceSimulator(object):
return stats
def _simulate_storage(self, records, cache_local_mb, f):
# pylint:disable=too-many-locals
from relstorage.cache.storage_cache import StorageCache
from relstorage.cache.tests.test_cache import MockAdapter
from ZODB.utils import p64
TRANSACTION_SIZE = 10
options = MockOptions()
options = MockOptions()
options.cache_local_mb = cache_local_mb
options.cache_local_compression = 'none'
#options.cache_delta_size_limit = 30000
@ -483,13 +484,12 @@ class StorageTraceSimulator(object):
def save_load_benchmark():
# pylint:disable=too-many-locals
from relstorage.cache.mapping import SizedLRUMapping as LocalClientBucket
from relstorage.cache import persistence as _Loader
import os
import itertools
import sys
sys.setrecursionlimit(500000)
bucket = LocalClientBucket(500*1024*1024)
print("Testing", type(bucket._dict))
@ -532,8 +532,8 @@ def save_load_benchmark():
b2 = LocalClientBucket(bucket.limit)
_Loader.load_local_cache(cache_options, cache_pfx, b2)
run_and_report_funcs( (('write', write),
('read ', load)))
run_and_report_funcs((('write', write),
('read ', load)))
for fname in fnames:
os.remove(fname)

View File

@ -12,6 +12,7 @@
#
##############################################################################
from __future__ import print_function, absolute_import, division
# pylint:disable=too-many-lines,abstract-method,too-many-public-methods,attribute-defined-outside-init
import unittest
from relstorage.tests.util import skipOnCI
@ -52,7 +53,7 @@ def _check_load_and_store_multiple_files_hit_limit(self, mapping, wrapping_stora
if i > 0:
del mapping[str(i - 1)]
mapping[str(i)] = b'abc'
mapping[str(i)] # Increment so it gets saved
_ = mapping[str(i)] # Increment so it gets saved
persistence.save_local_cache(options, 'test', dump_object)
self.assertEqual(persistence.count_cache_files(options, 'test'),
@ -61,7 +62,7 @@ def _check_load_and_store_multiple_files_hit_limit(self, mapping, wrapping_stora
# make sure it's not in the dict so that even if we find the most recent
# cache file first, we still have something to load. If we don't we can sometimes
# find that file and fail to store anything and prematurely break out of the loop
del mapping[str(i)]
del mapping[str(i)] # pylint:disable=undefined-loop-variable
files_loaded = persistence.load_local_cache(options, 'test', dump_object)
self.assertEqual(files_loaded, 2)
@ -178,7 +179,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_delta_after0_hit(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60
@ -229,7 +229,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint0_hit(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60
@ -240,7 +239,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint0_miss(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60
@ -252,7 +250,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_delta_after1_hit(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60
@ -265,7 +262,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_delta_after1_miss(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60
@ -278,7 +274,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint1_hit(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60
@ -290,7 +285,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint1_miss(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60
@ -317,7 +311,6 @@ class StorageCacheTests(unittest.TestCase):
def test_send_queue_small(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
c = self._makeOne()
c.tpc_begin()
c.store_temp(2, b'abc')
@ -332,7 +325,6 @@ class StorageCacheTests(unittest.TestCase):
def test_send_queue_large(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
c = self._makeOne()
c.send_limit = 100
c.tpc_begin()
@ -347,7 +339,6 @@ class StorageCacheTests(unittest.TestCase):
def test_send_queue_none(self):
from relstorage.tests.fakecache import data
from ZODB.utils import p64
c = self._makeOne()
c.tpc_begin()
tid = p64(55)
@ -355,7 +346,6 @@ class StorageCacheTests(unittest.TestCase):
self.assertEqual(data, {})
def test_after_tpc_finish(self):
from ZODB.utils import p64
c = self._makeOne()
c.tpc_begin()
c.after_tpc_finish(p64(55))
@ -722,6 +712,7 @@ class SizedLRUMappingTests(unittest.TestCase):
return bio
def test_load_and_store(self, options=None):
# pylint:disable=too-many-statements
from io import BytesIO
if options is None:
options = MockOptions()
@ -748,7 +739,7 @@ class SizedLRUMappingTests(unittest.TestCase):
client1.reset_stats()
client1['def'] = b'123'
client1['def']
_ = client1['def']
self.assertEqual(2, len(client1))
client1_max_size = client1.size
self._save(bio, client1, options)
@ -924,6 +915,7 @@ class LocalClientTests(unittest.TestCase):
self.assertEqual(c.get_multi(['k2', 'k3']), {})
def test_bucket_sizes_without_compression(self):
# pylint:disable=too-many-statements
# LocalClient is a simple w-TinyLRU cache. Confirm it keeps the right keys.
c = self._makeOne(cache_local_compression='none')
# This limit will result in
@ -1053,6 +1045,7 @@ class LocalClientTests(unittest.TestCase):
def test_bucket_sizes_with_compression(self):
# pylint:disable=too-many-statements
c = self._makeOne(cache_local_compression='zlib')
c.limit = 23 * 2 + 1
c.flush_all()
@ -1218,7 +1211,7 @@ class LocalClientTests(unittest.TestCase):
self.assertEqual(0, len(cache_files))
# Now lets break saving
def badwrite(*args):
def badwrite(*_args):
raise OSError("Nope")
c2._bucket0.write_to_stream = badwrite
@ -1445,13 +1438,13 @@ class MockAdapter(object):
class MockObjectMover(object):
def __init__(self):
self.data = {} # {oid_int: (state, tid_int)}
def load_current(self, cursor, oid_int):
def load_current(self, _cursor, oid_int):
return self.data.get(oid_int, (None, None))
class MockPoller(object):
def __init__(self):
self.changes = [] # [(oid, tid)]
def list_changes(self, cursor, after_tid, last_tid):
def list_changes(self, _cursor, after_tid, last_tid):
return ((oid, tid) for (oid, tid) in self.changes
if tid > after_tid and tid <= last_tid)

View File

@ -61,7 +61,7 @@ class ZEOTracer(object):
_trace_file_write(
_pack(
_int(now), encoded, _len(oid), tid, end_tid) + oid,
)
)
except: # pragma: no cover
log.exception("Problem writing trace info for %r at tid %r and end tid %r",
oid, tid, end_tid)
@ -78,7 +78,7 @@ class ZEOTracer(object):
with self._lock:
now = time.time()
for startpos, endpos, oid_int in items:
self._trace(0x52, oid_int, tid_int, dlen=endpos-startpos, now=now)
self._trace(0x52, oid_int, tid_int, dlen=endpos - startpos, now=now)
def close(self):
self._trace_file.close()

View File

@ -14,12 +14,15 @@
"""ZConfig directive implementations for binding RelStorage to Zope"""
from __future__ import absolute_import
from ZODB.config import BaseConfig
from relstorage.options import Options
from relstorage.storage import RelStorage
class BaseConfig(object):
def __init__(self, config):
self.config = config
self.name = config.getSectionName()
class RelStorageFactory(BaseConfig):
"""Open a storage configured via ZConfig"""
@ -38,8 +41,7 @@ class PostgreSQLAdapterFactory(BaseConfig):
from .adapters.postgresql import PostgreSQLAdapter
return PostgreSQLAdapter(
dsn=self.config.dsn,
options=options,
)
options=options)
class OracleAdapterFactory(BaseConfig):
@ -50,8 +52,7 @@ class OracleAdapterFactory(BaseConfig):
user=config.user,
password=config.password,
dsn=config.dsn,
options=options,
)
options=options)
class MySQLAdapterFactory(BaseConfig):

View File

@ -18,7 +18,7 @@ to zope.conf and set the 'cache-servers' parameter as well.
"""
import pylibmc
from _pylibmc import MemcachedError # pylibmc >= 0.9
from pylibmc import Error as MemcachedError
import logging
from functools import wraps

View File

@ -17,6 +17,8 @@ Stores pickles in the database.
"""
from __future__ import absolute_import, print_function
# pylint:disable=too-many-lines
from ZODB import ConflictResolution
from ZODB.BaseStorage import DataRecord
@ -39,7 +41,7 @@ from ZODB.utils import u64
from perfmetrics import Metric
from perfmetrics import metricmethod
from persistent.TimeStamp import TimeStamp
from persistent.TimeStamp import TimeStamp # pylint:disable=import-error
from relstorage.blobhelper import BlobHelper
from relstorage.cache import StorageCache
@ -107,6 +109,8 @@ class RelStorage(UndoLogCompatible,
ConflictResolution.ConflictResolvingStorage):
"""Storage to a relational database, based on invalidation polling"""
# pylint:disable=too-many-public-methods,too-many-instance-attributes
_transaction = None # Transaction that is being committed
_tstatus = ' ' # Transaction status, used for copying data
_is_read_only = False
@ -187,6 +191,7 @@ class RelStorage(UndoLogCompatible,
# objects don't need to.
_use_locks=True,
**kwoptions):
# pylint:disable=too-many-branches
self._adapter = adapter
if options is None:
@ -550,6 +555,7 @@ class RelStorage(UndoLogCompatible,
@Metric(method=True, rate=0.1)
def load(self, oid, version=''):
# pylint:disable=unused-argument
if self._stale_error is not None:
raise self._stale_error
@ -634,7 +640,8 @@ class RelStorage(UndoLogCompatible,
if state is None:
# This can happen if something attempts to load
# an object whose creation has been undone, see load()
# This change fixes the test in TransactionalUndoStorage.checkUndoCreationBranch1
# This change fixes the test in
# TransactionalUndoStorage.checkUndoCreationBranch1
# self._log_keyerror doesn't work here, only in certain states.
raise POSKeyError(oid)
end_int = self._adapter.mover.get_object_tid_after(
@ -687,6 +694,7 @@ class RelStorage(UndoLogCompatible,
# Like store(), but used for importing transactions. See the
# comments in FileStorage.restore(). The prev_txn optimization
# is not used.
# pylint:disable=unused-argument
if self._stale_error is not None:
raise self._stale_error
@ -884,6 +892,7 @@ class RelStorage(UndoLogCompatible,
Returns a sequence of OIDs that were resolved to be received by
Connection._handle_serial().
"""
# pylint:disable=too-many-locals
assert self._tid is not None
cursor = self._store_cursor
adapter = self._adapter
@ -927,8 +936,9 @@ class RelStorage(UndoLogCompatible,
txn_has_blobs = self.blobhelper.txn_has_blobs
else:
txn_has_blobs = False
oid_ints = adapter.mover.move_from_temp(cursor, tid_int, txn_has_blobs)
# This returns the OID ints stored, but we don't use them here
adapter.mover.move_from_temp(cursor, tid_int, txn_has_blobs)
return resolved
@ -1087,7 +1097,7 @@ class RelStorage(UndoLogCompatible,
if self._preallocated_oids:
oid_int = self._preallocated_oids.pop()
else:
def f(conn, cursor):
def f(_conn, cursor):
return list(self._adapter.oidallocator.new_oids(cursor))
preallocated = self._with_store(f)
preallocated.sort(reverse=True)
@ -1103,6 +1113,7 @@ class RelStorage(UndoLogCompatible,
return False
def modifiedInVersion(self, oid):
# pylint:disable=unused-argument
return ''
def supportsUndo(self):
@ -1113,6 +1124,7 @@ class RelStorage(UndoLogCompatible,
@metricmethod
def undoLog(self, first=0, last=-20, filter=None):
# pylint:disable=too-many-locals
if self._stale_error is not None:
raise self._stale_error
if last < 0:
@ -1135,7 +1147,7 @@ class RelStorage(UndoLogCompatible,
# This is largely cleaned up with transaction 2.0/ZODB 5, where the storage
# interface is defined in terms of bytes only.
d = {
'id': base64_encodebytes(tid)[:-1],
'id': base64_encodebytes(tid)[:-1], # pylint:disable=deprecated-method
'time': TimeStamp(tid).timeTime(),
'user_name': user or b'',
'description': desc or b'',
@ -1156,6 +1168,7 @@ class RelStorage(UndoLogCompatible,
@metricmethod
def history(self, oid, version=None, size=1, filter=None):
# pylint:disable=unused-argument,too-many-locals
if self._stale_error is not None:
raise self._stale_error
with self._lock:
@ -1205,7 +1218,7 @@ class RelStorage(UndoLogCompatible,
if transaction is not self._transaction:
raise StorageTransactionError(self, transaction)
undo_tid = base64_decodebytes(transaction_id + b'\n')
undo_tid = base64_decodebytes(transaction_id + b'\n') # pylint:disable=deprecated-method
assert len(undo_tid) == 8
undo_tid_int = u64(undo_tid)
@ -1246,6 +1259,7 @@ class RelStorage(UndoLogCompatible,
@metricmethod
def pack(self, t, referencesf, prepack_only=False, skip_prepack=False,
sleep=None):
# pylint:disable=too-many-branches
if self._is_read_only:
raise ReadOnlyError()
@ -1468,6 +1482,7 @@ class RelStorage(UndoLogCompatible,
self.blobhelper.restoreBlob(cursor, oid, serial, blobfilename)
def copyTransactionsFrom(self, other):
# pylint:disable=too-many-locals
# adapted from ZODB.blob.BlobStorageMixin
begin_time = time.time()
txnum = 0

View File

@ -15,6 +15,7 @@
# This is copied from ZODB.tests.RecoveryStorage and expanded to fit
# history-free storages.
# pylint:disable=no-member,too-many-locals
from ZODB.blob import is_blob_record
from transaction import Transaction
@ -117,7 +118,7 @@ class IteratorDeepCompare(object):
in src. Also note that the dest does not retain transaction
metadata.
"""
missing = object()
src_objects = {} # {oid: (tid, data, blob or None)}
for txn in src.iterator():
for rec in txn:
@ -195,10 +196,10 @@ class BasicRecoveryStorage(IteratorDeepCompare):
txn.commit()
# Now pack the destination.
snooze()
self._dst.pack(time.time(), referencesf)
self._dst.pack(time.time(), referencesf)
# And check to see that the root object exists, but not the other
# objects.
data, serial = self._dst.load(root._p_oid, '')
_data, _serial = self._dst.load(root._p_oid, '')
raises(KeyError, self._dst.load, obj1._p_oid, '')
raises(KeyError, self._dst.load, obj2._p_oid, '')
@ -232,9 +233,9 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
db = DB(self._storage)
c = db.open()
r = c.root()
obj = r["obj1"] = MinPO(1)
r["obj1"] = MinPO(1)
transaction.commit()
obj = r["obj2"] = MinPO(1)
r["obj2"] = MinPO(1)
transaction.commit()
self._dst.copyTransactionsFrom(self._storage)
@ -248,6 +249,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
# Get the last transaction and its record iterator. Record iterators
# can't be accessed out-of-order, so we need to do this in a bit
# complicated way:
final = None
for final in it:
records = list(final)
@ -259,6 +261,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
self._dst.tpc_finish(final)
def checkRestoreWithMultipleObjectsInUndoRedo(self):
# pylint:disable=too-many-statements
from ZODB.FileStorage import FileStorage
# Undo creates backpointers in (at least) FileStorage. ZODB 3.2.1
@ -314,7 +317,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
tid = info[0]['id']
t = Transaction()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
_oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
@ -338,7 +341,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
tid = info[0]['id']
t = Transaction()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
_oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)

View File

@ -32,7 +32,7 @@ def bigmark():
for i in xrange(1, oid_count):
if random() < 0.2:
refs = []
for j in range(randint(0, 20)):
for _ in range(randint(0, 20)):
refs.append((i * k, randint(0, oid_count) * k))
marker.add_refs(refs)
refcount += len(refs)

View File

@ -79,7 +79,7 @@ directory.
>>> def onfail():
... return cache_size('blobs')
>>> from relstorage.tests.util import wait_until
>>> from ZEO.tests.forker import wait_until
>>> wait_until("size is reduced", check, 99, onfail)
If we read all of the blobs, data will be downloaded again, as

View File

@ -49,11 +49,11 @@ def new_time():
the packing time actually is before the commit time.
"""
now = new_time = time.time()
while new_time <= now:
new_time = time.time()
now = anew_time = time.time()
while anew_time <= now:
anew_time = time.time()
time.sleep(1)
return new_time
return anew_time
with open(__file__, 'rb') as _f:
@ -85,21 +85,21 @@ def random_file(size, fd):
b.rotate(1)
datagen = fdata()
bytes = 0
md5sum = md5()
hasher = md5()
while bytes < size:
data = next(datagen)
md5sum.update(data)
hasher.update(data)
fd.write(data)
bytes += len(data)
return md5sum.hexdigest()
return hasher.hexdigest()
def md5sum(fd):
md5sum = md5()
blocksize = md5sum.block_size << 8
hasher = md5()
blocksize = hasher.block_size << 8
for data in iter(lambda: fd.read(blocksize), b''):
md5sum.update(data)
return md5sum.hexdigest()
hasher.update(data)
return hasher.hexdigest()
def sizeof_fmt(num):
@ -113,7 +113,7 @@ class BlobTestBase(ZODB.tests.StorageTestBase.StorageTestBase):
def setUp(self):
ZODB.tests.StorageTestBase.StorageTestBase.setUp(self)
self._storage = self.create_storage()
self._storage = self.create_storage() # pylint:disable=no-member
class BlobUndoTests(BlobTestBase):
@ -249,7 +249,7 @@ class RecoveryBlobStorage(BlobTestBase,
def setUp(self):
BlobTestBase.setUp(self)
self._dst = self.create_storage('dest')
self._dst = self.create_storage('dest') # pylint:disable=no-member
def tearDown(self):
self._dst.close()
@ -502,35 +502,35 @@ def do_not_depend_on_cwd():
>>> bs.close()
"""
if False:
# ZODB 3.8 fails this test because it creates a single
# 'savepoints' directory.
def savepoint_isolation():
"""Make sure savepoint data is distinct accross transactions
# if False:
# # ZODB 3.8 fails this test because it creates a single
# # 'savepoints' directory.
# def savepoint_isolation():
# """Make sure savepoint data is distinct accross transactions
>>> bs = create_storage()
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root().b = ZODB.blob.Blob()
>>> conn.root().b.open('w').write('initial')
>>> transaction.commit()
>>> conn.root().b.open('w').write('1')
>>> _ = transaction.savepoint()
>>> tm = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm)
>>> conn2.root().b.open('w').write('2')
>>> _ = tm.savepoint()
>>> conn.root().b.open().read()
'1'
>>> conn2.root().b.open().read()
'2'
>>> transaction.abort()
>>> tm.commit()
>>> conn.sync()
>>> conn.root().b.open().read()
'2'
>>> db.close()
"""
# >>> bs = create_storage()
# >>> db = DB(bs)
# >>> conn = db.open()
# >>> conn.root().b = ZODB.blob.Blob()
# >>> conn.root().b.open('w').write('initial')
# >>> transaction.commit()
# >>> conn.root().b.open('w').write('1')
# >>> _ = transaction.savepoint()
# >>> tm = transaction.TransactionManager()
# >>> conn2 = db.open(transaction_manager=tm)
# >>> conn2.root().b.open('w').write('2')
# >>> _ = tm.savepoint()
# >>> conn.root().b.open().read()
# '1'
# >>> conn2.root().b.open().read()
# '2'
# >>> transaction.abort()
# >>> tm.commit()
# >>> conn.sync()
# >>> conn.root().b.open().read()
# '2'
# >>> db.close()
# """
def savepoint_cleanup():
"""Make sure savepoint data gets cleaned up.
@ -651,6 +651,7 @@ def storage_reusable_suite(prefix, factory,
Pass a factory taking a name and a blob directory name.
"""
# pylint:disable=unused-argument
def setup(test):
setUp(test)
def create_storage(name='data', blob_dir=None, **kw):

View File

@ -17,7 +17,7 @@ from relstorage.tests.RecoveryStorage import BasicRecoveryStorage
from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage
from relstorage.tests.reltestbase import GenericRelStorageTests
from relstorage.tests.reltestbase import RelStorageTestBase
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.serialize import referencesf
from ZODB.tests.ConflictResolution import PCounter
@ -27,13 +27,12 @@ from ZODB.tests.PackableStorage import Root
from ZODB.tests.PackableStorage import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
from ZODB.tests.StorageTestBase import zodb_unpickle
from relstorage._compat import loads
import time
class HistoryFreeRelStorageTests(
GenericRelStorageTests,
):
class HistoryFreeRelStorageTests(GenericRelStorageTests):
# pylint:disable=too-many-ancestors,abstract-method,too-many-locals,too-many-statements
keep_history = False
@ -41,6 +40,7 @@ class HistoryFreeRelStorageTests(
# collects garbage but does not retain old versions.
def checkPackAllRevisions(self):
from relstorage._compat import loads
self._initroot()
eq = self.assertEqual
raises = self.assertRaises
@ -235,7 +235,7 @@ class HistoryFreeRelStorageTests(
s1.poll_invalidations()
# commit a change
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
_revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# commit a conflicting change using s1
main_storage = self._storage
@ -243,12 +243,12 @@ class HistoryFreeRelStorageTests(
try:
# we can resolve this conflict because s1 has an open
# transaction that can read the old state of the object.
revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
_revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
s1.release()
finally:
self._storage = main_storage
data, serialno = self._storage.load(oid, '')
data, _serialno = self._storage.load(oid, '')
inst = zodb_unpickle(data)
self.assertEqual(inst._value, 5)
@ -282,11 +282,9 @@ class HistoryFreeRelStorageTests(
db.close()
class HistoryFreeToFileStorage(
RelStorageTestBase,
BasicRecoveryStorage,
):
class HistoryFreeToFileStorage(RelStorageTestBase,
BasicRecoveryStorage):
# pylint:disable=abstract-method,too-many-ancestors
keep_history = False
def setUp(self):
@ -303,11 +301,9 @@ class HistoryFreeToFileStorage(
return FileStorage('Dest.fs')
class HistoryFreeFromFileStorage(
RelStorageTestBase,
UndoableRecoveryStorage,
):
class HistoryFreeFromFileStorage(RelStorageTestBase,
UndoableRecoveryStorage):
# pylint:disable=abstract-method,too-many-ancestors
keep_history = False
def setUp(self):

View File

@ -17,7 +17,7 @@ from persistent.mapping import PersistentMapping
from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage
from relstorage.tests.reltestbase import GenericRelStorageTests
from relstorage.tests.reltestbase import RelStorageTestBase
from relstorage._compat import TRANSACTION_DATA_IS_TEXT
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.serialize import referencesf
@ -34,16 +34,14 @@ import transaction
import unittest
class HistoryPreservingRelStorageTests(
GenericRelStorageTests,
TransactionalUndoStorage.TransactionalUndoStorage,
IteratorStorage.IteratorStorage,
IteratorStorage.ExtendedIteratorStorage,
RevisionStorage.RevisionStorage,
PackableStorage.PackableUndoStorage,
HistoryStorage.HistoryStorage,
):
class HistoryPreservingRelStorageTests(GenericRelStorageTests,
TransactionalUndoStorage.TransactionalUndoStorage,
IteratorStorage.IteratorStorage,
IteratorStorage.ExtendedIteratorStorage,
RevisionStorage.RevisionStorage,
PackableStorage.PackableUndoStorage,
HistoryStorage.HistoryStorage):
# pylint:disable=too-many-ancestors,abstract-method,too-many-locals
keep_history = True
def checkUndoMultipleConflictResolution(self, *_args, **_kwargs):
@ -144,7 +142,7 @@ class HistoryPreservingRelStorageTests(
if isinstance(ugly_string, bytes):
# Always text. Use latin 1 because it can decode any arbitrary
# bytes.
ugly_string = ugly_string.decode('latin-1')
ugly_string = ugly_string.decode('latin-1') # pylint:disable=redefined-variable-type
# The storage layer is defined to take bytes (implicitly in
# older ZODB releases, explicitly in ZODB 5.something), but historically
@ -211,7 +209,7 @@ class HistoryPreservingRelStorageTests(
def checkPackGCReusePrePackData(self):
self._storage = self.make_storage(pack_prepack_only=True)
oid = self.checkPackGC(expect_object_deleted=False,close=False)
oid = self.checkPackGC(expect_object_deleted=False, close=False)
# We now have pre-pack analysis data
self._storage._options.pack_prepack_only = False
self._storage.pack(0, referencesf, skip_prepack=True)
@ -250,7 +248,9 @@ class HistoryPreservingRelStorageTests(
db.close()
def checkHistoricalConnection(self):
import datetime, persistent, ZODB.POSException
import datetime
import persistent
import ZODB.POSException
db = DB(self._storage)
conn = db.open()
root = conn.root()
@ -300,11 +300,9 @@ class HistoryPreservingRelStorageTests(
self.assertFalse(ZODB.interfaces.IExternalGC.providedBy(self._storage))
self.assertRaises(AttributeError, self._storage.deleteObject)
class HistoryPreservingToFileStorage(
RelStorageTestBase,
UndoableRecoveryStorage,
):
class HistoryPreservingToFileStorage(RelStorageTestBase,
UndoableRecoveryStorage):
# pylint:disable=too-many-ancestors,abstract-method,too-many-locals
keep_history = True
def setUp(self):
@ -321,11 +319,9 @@ class HistoryPreservingToFileStorage(
return FileStorage('Dest.fs')
class HistoryPreservingFromFileStorage(
RelStorageTestBase,
UndoableRecoveryStorage,
):
class HistoryPreservingFromFileStorage(RelStorageTestBase,
UndoableRecoveryStorage):
# pylint:disable=too-many-ancestors,abstract-method,too-many-locals
keep_history = True
def setUp(self):

View File

@ -14,33 +14,27 @@ logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
use = 'oracle'
keep_history=True
keep_history = True
# pylint:disable=redefined-variable-type
if use == 'mysql':
from relstorage.adapters.mysql import MySQLAdapter
a = MySQLAdapter(
db='packtest',
user='relstoragetest',
passwd='relstoragetest',
options=Options(keep_history=keep_history),
)
a = MySQLAdapter(db='packtest',
user='relstoragetest',
passwd='relstoragetest',
options=Options(keep_history=keep_history),)
elif use == 'postgresql':
from relstorage.adapters.postgresql import PostgreSQLAdapter
a = PostgreSQLAdapter(dsn=
"dbname='packtest' "
'user=relstoragetest '
'password=relstoragetest',
options=Options(keep_history=keep_history),
)
a = PostgreSQLAdapter(dsn="dbname='packtest' "
'user=relstoragetest '
'password=relstoragetest',
options=Options(keep_history=keep_history),)
elif use == 'oracle':
from relstorage.adapters.oracle import OracleAdapter
dsn = os.environ.get('ORACLE_TEST_DSN', 'XE')
a = OracleAdapter(
user='packtest',
password='relstoragetest',
dsn=dsn,
options=Options(keep_history=keep_history),
)
a = OracleAdapter(user='packtest',
password='relstoragetest',
dsn=dsn,
options=Options(keep_history=keep_history),)
else:
raise AssertionError("which database?")
@ -51,26 +45,26 @@ c = d.open()
print('size:')
print(d.getSize())
if 1:
print('initializing...')
container = PersistentMapping()
c.root()['container'] = container
container_size = 10000
for i in range(container_size):
container[i] = PersistentMapping()
print('initializing...')
container = PersistentMapping()
c.root()['container'] = container
container_size = 10000
for i in range(container_size):
container[i] = PersistentMapping()
transaction.commit()
print('generating transactions...')
for trans in range(100):
print(trans)
sources = (random.randint(0, container_size - 1) for j in range(100))
for source in sources:
obj = container[source]
obj[trans] = container[random.randint(0, container_size - 1)]
transaction.commit()
print('generating transactions...')
for trans in range(100):
print(trans)
sources = (random.randint(0, container_size - 1) for j in range(100))
for source in sources:
obj = container[source]
obj[trans] = container[random.randint(0, container_size - 1)]
transaction.commit()
print('size:')
print(d.getSize())
print('size:')
print(d.getSize())
print('packing...')
d.pack()

View File

@ -12,7 +12,7 @@
#
##############################################################################
"""A foundation for RelStorage tests"""
# pylint:disable=too-many-ancestors,abstract-method,too-many-public-methods
from ZODB.DB import DB
from ZODB.POSException import ReadConflictError
from ZODB.serialize import referencesf
@ -50,8 +50,9 @@ class StorageCreatingMixin(object):
return storage
def make_storage(self, zap=True, **kw):
if ('cache_servers' not in kw and 'cache_module_name' not in kw
and kw.get('share_local_cache', True)):
if ('cache_servers' not in kw
and 'cache_module_name' not in kw
and kw.get('share_local_cache', True)):
if util.CACHE_SERVERS and util.CACHE_MODULE_NAME:
kw['cache_servers'] = util.CACHE_SERVERS
kw['cache_module_name'] = util.CACHE_MODULE_NAME
@ -288,7 +289,7 @@ class GenericRelStorageTests(
data = b'a 16 byte string' * (1024 * 1024)
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, serialno = self._storage.load(oid, '')
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
@ -296,19 +297,18 @@ class GenericRelStorageTests(
# Store 99 objects each with 1900 bytes. This is intended
# to exercise possible buffer overfilling that the batching
# code might cause.
import transaction
data = b'0123456789012345678' * 100
t = transaction.Transaction()
self._storage.tpc_begin(t)
oids = []
for i in range(99):
for _ in range(99):
oid = self._storage.new_oid()
self._storage.store(oid, b'\0'*8, data, '', t)
oids.append(oid)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
for oid in oids:
got, serialno = self._storage.load(oid, '')
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
@ -610,7 +610,7 @@ class GenericRelStorageTests(
# extra1 should have been garbage collected
self.assertRaises(KeyError,
self._storage.load, extra1._p_oid, '')
self._storage.load, extra1._p_oid, '')
# extra2 and extra3 should both still exist
self._storage.load(extra2._p_oid, '')
self._storage.load(extra3._p_oid, '')
@ -832,6 +832,7 @@ class AbstractRSZodbConvertTests(StorageCreatingMixin,
keep_history = True
filestorage_name = 'source'
relstorage_name = 'destination'
filestorage_file = None
def _relstorage_contents(self):
raise NotImplementedError()
@ -905,5 +906,5 @@ class DoubleCommitter(Persistent):
"""A crazy persistent class that changes self in __getstate__"""
def __getstate__(self):
if not hasattr(self, 'new_attribute'):
self.new_attribute = 1
self.new_attribute = 1 # pylint:disable=attribute-defined-outside-init
return Persistent.__getstate__(self)

View File

@ -1,11 +1,12 @@
"""Tests of relstorage.blobhelper"""
# pylint:disable=too-many-public-methods,unused-argument
from relstorage.tests.util import support_blob_cache
import os
import unittest
import tempfile
from ZODB.blob import remove_committed_dir
from relstorage._compat import PY3
from relstorage._compat import dumps
test_oid = b'\0' * 7 + b'\x01'
test_tid = b'\0' * 7 + b'\x02'
@ -14,11 +15,11 @@ test_tid = b'\0' * 7 + b'\x02'
class BlobHelperTest(unittest.TestCase):
def setUp(self):
import tempfile
self.uploaded = None
self.blob_dir = tempfile.mkdtemp()
def tearDown(self):
from ZODB.blob import remove_committed_dir
remove_committed_dir(self.blob_dir)
def _class(self):
@ -157,7 +158,7 @@ class BlobHelperTest(unittest.TestCase):
obj = self._make_default(shared=False)
with obj.openCommittedBlobFile(None, test_oid, test_tid) as f:
if not PY3:
self.assertEqual(f.__class__, file)
self.assertEqual(f.__class__, file) # pylint:disable=undefined-variable
self.assertEqual(f.read(), b'blob here')
def test_openCommittedBlobFile_as_blobfile(self):
@ -190,7 +191,7 @@ class BlobHelperTest(unittest.TestCase):
with obj.openCommittedBlobFile(None, test_oid, test_tid) as f:
self.assertEqual(loadBlob_calls, [1])
if not PY3:
self.assertEqual(f.__class__, file)
self.assertEqual(f.__class__, file) # pylint:disable=undefined-variable
self.assertEqual(f.read(), b'blob here')
def test_openCommittedBlobFile_retry_as_blobfile(self):
@ -273,11 +274,11 @@ class BlobHelperTest(unittest.TestCase):
obj = self._make_default(shared=False)
self.assertFalse(obj.txn_has_blobs)
obj.storeBlob(None, store_func, test_oid, test_tid, 'blob pickle',
fn, '', dummy_txn)
fn, '', dummy_txn)
self.assertFalse(os.path.exists(fn))
self.assertTrue(obj.txn_has_blobs)
self.assertEqual(called,
[(test_oid, test_tid, 'blob pickle', '', dummy_txn)])
[(test_oid, test_tid, 'blob pickle', '', dummy_txn)])
self.assertEqual(self.uploaded[:2], (1, None))
target_fn = self.uploaded[2]
self.assertEqual(read_file(target_fn), 'here a blob')

View File

@ -119,15 +119,13 @@ class AbstractZODBConvertBase(unittest.TestCase):
self._check_value_of_key_in_dest(None)
def test_incremental(self):
x = 10
self._write_value_for_key_in_src(x)
self._write_value_for_key_in_src(10)
main(['', self.cfgfile])
self._check_value_of_key_in_dest(x)
self._check_value_of_key_in_dest(10)
x = "hi"
self._write_value_for_key_in_src(x)
self._write_value_for_key_in_src("hi")
main(['', '--incremental', self.cfgfile])
self._check_value_of_key_in_dest(x)
self._check_value_of_key_in_dest("hi")
def test_incremental_empty_src_dest(self):
# Should work and not raise a POSKeyError

View File

@ -73,7 +73,6 @@ class ZODBPackScriptTests(unittest.TestCase):
def test_pack_with_1_day(self):
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.POSException import POSKeyError
import time
import transaction
from relstorage.zodbpack import main

View File

@ -27,6 +27,7 @@ import logging
import os
import unittest
# pylint:disable=no-member,too-many-ancestors
base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
@ -203,7 +204,6 @@ def test_suite():
suite.addTest(unittest.makeSuite(HPMySQLDestZODBConvertTests))
suite.addTest(unittest.makeSuite(HPMySQLSrcZODBConvertTests))
import ZODB.blob
from relstorage.tests.blob.testblob import storage_reusable_suite
from relstorage.tests.util import shared_blob_dir_choices
for shared_blob_dir in shared_blob_dir_choices:

View File

@ -25,6 +25,8 @@ import os
import sys
import unittest
# pylint:disable=no-member,too-many-ancestors
base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
@ -49,6 +51,7 @@ class UseOracleAdapter(object):
class ZConfigTests(object):
def checkConfigureViaZConfig(self):
# pylint:disable=too-many-locals
import tempfile
dsn = os.environ.get('ORACLE_TEST_DSN', 'XE')
fd, replica_conf = tempfile.mkstemp()
@ -144,6 +147,7 @@ db_names = {
}
def test_suite():
# pylint:disable=too-many-locals
import relstorage.adapters.oracle as _adapter
try:
_adapter.select_driver()
@ -163,7 +167,7 @@ def test_suite():
]:
suite.addTest(unittest.makeSuite(klass, "check"))
import ZODB.blob
from .util import RUNNING_ON_CI
if RUNNING_ON_CI or os.environ.get("RS_ORCL_SMALL_BLOB"):
# cx_Oracle blob support can only address up to sys.maxint on

View File

@ -26,6 +26,8 @@ import logging
import os
import unittest
# pylint:disable=no-member,too-many-ancestors
base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
@ -121,18 +123,18 @@ class HPPostgreSQLSrcZODBConvertTests(UsePostgreSQLAdapter, _PgSQLCfgMixin, Abst
pass
class HPPostgreSQLTests(UsePostgreSQLAdapter, HistoryPreservingRelStorageTests,
ZConfigTests):
ZConfigTests):
pass
class HPPostgreSQLToFile(UsePostgreSQLAdapter, HistoryPreservingToFileStorage):
pass
class HPPostgreSQLFromFile(UsePostgreSQLAdapter,
HistoryPreservingFromFileStorage):
HistoryPreservingFromFileStorage):
pass
class HFPostgreSQLTests(UsePostgreSQLAdapter, HistoryFreeRelStorageTests,
ZConfigTests):
ZConfigTests):
pass
class HFPostgreSQLToFile(UsePostgreSQLAdapter, HistoryFreeToFileStorage):
@ -149,6 +151,7 @@ db_names = {
}
def test_suite():
# pylint:disable=too-many-locals
import relstorage.adapters.postgresql as _adapter
try:
_adapter.select_driver()
@ -171,7 +174,6 @@ def test_suite():
suite.addTest(unittest.makeSuite(HPPostgreSQLDestZODBConvertTests))
suite.addTest(unittest.makeSuite(HPPostgreSQLSrcZODBConvertTests))
import ZODB.blob
from .util import RUNNING_ON_CI
if RUNNING_ON_CI or os.environ.get("RS_PG_SMALL_BLOB"):
# Avoid creating 2GB blobs to be friendly to neighbors

View File

@ -1,9 +1,6 @@
import os
import time
import unittest
from relstorage._compat import string_types
from ZEO.tests.forker import wait_until
import unittest
# ZODB >= 3.9. The blob directory can be a private cache.
@ -17,7 +14,7 @@ RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
if RUNNING_ON_CI:
skipOnCI = unittest.skip
else:
def skipOnCI(reason):
def skipOnCI(reason): # pylint:disable=unused-argument
def dec(f):
return f
return dec
@ -28,6 +25,7 @@ CACHE_MODULE_NAME = None
if RUNNING_ON_TRAVIS:
# We expect to have access to a local memcache server
# on travis. Use it if we can import drivers.
# pylint:disable=unused-import
try:
import pylibmc
CACHE_SERVERS = ["localhost:11211"]

View File

@ -111,6 +111,7 @@ class TreeMarker(object):
new OIDs marked and `next_pass` is the collection of OIDs to
follow in the next pass.
"""
# pylint:disable=too-many-locals
# next_pass: {oid_hi: IISet32X}
next_pass = collections.defaultdict(IISet32X)
found = 0

View File

@ -19,7 +19,7 @@ from __future__ import print_function
import logging
import argparse
from persistent.TimeStamp import TimeStamp
from persistent.TimeStamp import TimeStamp # pylint:disable=import-error
from io import StringIO
import sys
import ZConfig
@ -69,6 +69,7 @@ class _DefaultStartStorageIteration(object):
return getattr(self._source, name)
def main(argv=None):
# pylint:disable=too-many-branches
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(description=__doc__)
@ -123,7 +124,7 @@ def main(argv=None):
# This *should* be a byte string.
last_tid = u64(last_tid)
next_tid = p64(last_tid+1)
next_tid = p64(last_tid + 1)
# Compensate for the RelStorage bug(?) and get a reusable iterator
# that starts where we want it to. There's no harm in wrapping it for
# other sources like FileStorage too.

View File

@ -32,6 +32,7 @@ class SuffixMultiplier(object):
self._default = default
# all keys must be the same size
self._keysz = None
def check(a, b):
if len(a) != len(b):
raise ValueError("suffix length mismatch")
@ -45,10 +46,11 @@ class SuffixMultiplier(object):
return int(v[:-self._keysz]) * m
return int(v) * self._default
convert_bytesize = SuffixMultiplier({'kb': 1024,
'mb': 1024*1024,
'gb': 1024*1024*1024,
})
convert_bytesize = SuffixMultiplier({
'kb': 1024,
'mb': 1024 * 1024,
'gb': 1024 * 1024 * 1024,
})
def convert_int(value):
@ -109,7 +111,7 @@ class PostgreSQLAdapterHelper(Resolver):
kw, unused = self.interpret_kwargs(kw)
dsn_args.extend(kw.items())
dsn = ' '.join("%s='%s'"%arg for arg in dsn_args)
dsn = ' '.join("%s='%s'" % arg for arg in dsn_args)
def factory(options):
from relstorage.adapters.postgresql import PostgreSQLAdapter
@ -189,9 +191,7 @@ class RelStorageURIResolver(Resolver):
def factory():
adapter = adapter_factory(options)
storage = RelStorage(adapter=adapter, options=options)
if demostorage:
storage = DemoStorage(base=storage)
return storage
return storage if not demostorage else DemoStorage(base=storage)
return factory, unused
postgresql_resolver = RelStorageURIResolver(PostgreSQLAdapterHelper())