Cleanup all the lint (#143)

* Clean up all violations reported by pylint in preperation for turning on landscape.io

* fix all prospector errors.

* don't run pylint on pypy/py3 because we're getting all sorts of import errors under Py3 (and it's sorta slow on pypy)

* back to editable? Somehow _cache_ring isn't always being built.

* skip bootstrap.py on landscape.

* pylint fixes for umysql
This commit is contained in:
Jason Madden 2016-12-10 11:27:11 -06:00 committed by GitHub
parent 37a1ddc658
commit 4bf54e0954
68 changed files with 528 additions and 480 deletions

View File

@ -1,7 +1,7 @@
doc-warnings: no # experimental, raises an exception doc-warnings: no # experimental, raises an exception
test-warnings: no test-warnings: no
strictness: veryhigh strictness: veryhigh
max-line-length: 100 max-line-length: 130
# We don't use any of the auto-detected things, and # We don't use any of the auto-detected things, and
# auto-detection slows down startup # auto-detection slows down startup
autodetect: false autodetect: false
@ -10,13 +10,14 @@ requirements:
python-targets: python-targets:
- 2 - 2
- 3 # - 3 # landscape.io seems to fail if we run both py2 and py3?
ignore-paths: ignore-paths:
- doc/ - doc/
- build - build
- dist - dist
- .eggs - .eggs
- setup.py - setup.py
- bootstrap.py
#ignore-patterns: #ignore-patterns:
pyroma: pyroma:
@ -42,6 +43,9 @@ pyflakes:
pep8: pep8:
disable: disable:
# N803: argument should be lowercase. We have 'Binary' and
# camelCase names.
- N803
# N805: first arg should be self; fails on metaclasses and # N805: first arg should be self; fails on metaclasses and
# classmethods; pylint does a better job # classmethods; pylint does a better job
- N805 - N805

View File

@ -10,8 +10,7 @@
# comments at the end of the line does the same thing (though Py3 supports # comments at the end of the line does the same thing (though Py3 supports
# mixing) # mixing)
# invalid-name, ; Things like loadBlob get flagged
# invalid-name, ; We get lots of these, especially in scripts. should fix many of them
# protected-access, ; We have many cases of this; legit ones need to be examinid and commented, then this removed # protected-access, ; We have many cases of this; legit ones need to be examinid and commented, then this removed
# no-self-use, ; common in superclasses with extension points # no-self-use, ; common in superclasses with extension points
# too-few-public-methods, ; Exception and marker classes get tagged with this # too-few-public-methods, ; Exception and marker classes get tagged with this
@ -29,10 +28,10 @@
# useless-suppression: the only way to avoid repeating it for specific statements everywhere that we # useless-suppression: the only way to avoid repeating it for specific statements everywhere that we
# do Py2/Py3 stuff is to put it here. Sadly this means that we might get better but not realize it. # do Py2/Py3 stuff is to put it here. Sadly this means that we might get better but not realize it.
disable=wrong-import-position, disable=wrong-import-position,
invalid-name,
wrong-import-order, wrong-import-order,
missing-docstring, missing-docstring,
ungrouped-imports, ungrouped-imports,
invalid-name,
protected-access, protected-access,
no-self-use, no-self-use,
too-few-public-methods, too-few-public-methods,
@ -43,8 +42,9 @@ disable=wrong-import-position,
cyclic-import, cyclic-import,
too-many-arguments, too-many-arguments,
redefined-builtin, redefined-builtin,
useless-suppression, useless-suppression,
# undefined-all-variable duplicate-code,
# undefined-all-variable
[FORMAT] [FORMAT]
@ -72,15 +72,14 @@ generated-members=exc_clear
# List of classes names for which member attributes should not be checked # List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set). This supports can work # (useful for classes with attributes dynamically set). This supports can work
# with qualified names. # with qualified names.
# greenlet, Greenlet, parent, dead: all attempts to fix issues in greenlet.py
# only seen on the service, e.g., self.parent.loop: class parent has no loop ignored-classes=SectionValue
ignored-classes=SSLContext, SSLSocket, greenlet, Greenlet, parent, dead
# List of module names for which member attributes should not be checked # List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime # (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It # and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching. # supports qualified module names, as well as Unix pattern matching.
ignored-modules=gevent._corecffi #ignored-modules=gevent._corecffi
[DESIGN] [DESIGN]
max-attributes=12 max-attributes=12

View File

@ -11,7 +11,7 @@ addons:
python: python:
- pypy-5.4.1 - pypy-5.4.1
- 2.7 - 2.7
- 3.4 - 3.5
env: env:
matrix: matrix:
- ENV=mysql - ENV=mysql
@ -21,7 +21,7 @@ env:
- ENV=umysqldb - ENV=umysqldb
matrix: matrix:
exclude: exclude:
- python: 3.4 - python: 3.5
env: ENV=umysqldb env: ENV=umysqldb
- python: pypy-5.4.1 - python: pypy-5.4.1
env: ENV=pymysql env: ENV=pymysql
@ -31,6 +31,7 @@ matrix:
script: script:
# coverage slows PyPy down from 2minutes to 12+. # coverage slows PyPy down from 2minutes to 12+.
# But don't run the pymysql/pypy tests twice. # But don't run the pymysql/pypy tests twice.
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pylint --rcfile=.pylintrc relstorage -f parseable -r n; fi
- if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then python -m relstorage.tests.alltests -v; fi - if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then python -m relstorage.tests.alltests -v; fi
- if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then coverage run -m relstorage.tests.alltests -v; fi - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then coverage run -m relstorage.tests.alltests -v; fi
after_success: after_success:
@ -43,16 +44,13 @@ before_install:
install: install:
- pip install -U pip setuptools - pip install -U pip setuptools
- pip install -U tox coveralls - pip install -U tox coveralls pylint
- if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then pip install -U python-memcached; fi - if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then pip install -U python-memcached; fi
- if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then pip install -U pylibmc cffi; fi - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then pip install -U pylibmc cffi; fi
- pip install -U -e ".[test]" - pip install -U -e ".[test]"
- .travis/setup-$ENV.sh - .travis/setup-$ENV.sh
# cache: pip seems not to work if `install` is replaced (https://github.com/travis-ci/travis-ci/issues/3239)
cache: cache: pip
directories:
- $HOME/.cache/pip
- $HOME/.venv
before_cache: before_cache:
- rm -f $HOME/.cache/pip/log/debug.log - rm -f $HOME/.cache/pip/log/debug.log

View File

@ -6,7 +6,8 @@ Compatibility shims.
from __future__ import print_function, absolute_import, division from __future__ import print_function, absolute_import, division
# pylint:disable=unused-import # pylint:disable=unused-import,invalid-name,no-member,undefined-variable
# pylint:disable=no-name-in-module,redefined-variable-type
import sys import sys
import platform import platform
@ -19,8 +20,10 @@ PYPY = platform.python_implementation() == 'PyPy'
if PY3: if PY3:
def list_keys(d): def list_keys(d):
return list(d.keys()) return list(d.keys())
def list_items(d): def list_items(d):
return list(d.items()) return list(d.items())
def list_values(d): def list_values(d):
return list(d.values()) return list(d.values())
iteritems = dict.items iteritems = dict.items

View File

@ -25,7 +25,7 @@ class RowBatcher(object):
""" """
row_limit = 100 row_limit = 100
size_limit = 1<<20 size_limit = 1 << 20
def __init__(self, cursor, row_limit=None): def __init__(self, cursor, row_limit=None):
self.cursor = cursor self.cursor = cursor
@ -101,6 +101,6 @@ class RowBatcher(object):
for row in rows.values(): for row in rows.values():
parts.append(s) parts.append(s)
params.extend(row) params.extend(row)
parts = ',\n'.join(parts)
stmt = "%s INTO %s VALUES\n%s" % (command, header, parts) stmt = "%s INTO %s VALUES\n%s" % (command, header, ',\n'.join(parts))
self.cursor.execute(stmt, tuple(params)) self.cursor.execute(stmt, tuple(params))

View File

@ -56,7 +56,7 @@ class AbstractConnectionManager(object):
"""Set the on_store_opened hook""" """Set the on_store_opened hook"""
self.on_store_opened = f self.on_store_opened = f
def open(self): def open(self, **kwargs):
"""Open a database connection and return (conn, cursor).""" """Open a database connection and return (conn, cursor)."""
raise NotImplementedError() raise NotImplementedError()

View File

@ -22,6 +22,7 @@ class DatabaseIterator(object):
def __init__(self, database_type, runner): def __init__(self, database_type, runner):
self.runner = runner self.runner = runner
self.database_type = database_type
def iter_objects(self, cursor, tid): def iter_objects(self, cursor, tid):
"""Iterate over object states in a transaction. """Iterate over object states in a transaction.
@ -110,7 +111,7 @@ class HistoryPreservingDatabaseIterator(DatabaseIterator):
stmt += " AND tid <= %(max_tid)s" stmt += " AND tid <= %(max_tid)s"
stmt += " ORDER BY tid" stmt += " ORDER BY tid"
self.runner.run_script_stmt(cursor, stmt, self.runner.run_script_stmt(cursor, stmt,
{'min_tid': start, 'max_tid': stop}) {'min_tid': start, 'max_tid': stop})
return self._transaction_iterator(cursor) return self._transaction_iterator(cursor)
@ -150,7 +151,10 @@ class HistoryFreeDatabaseIterator(DatabaseIterator):
Skips packed transactions. Skips packed transactions.
Yields (tid, username, description, extension) for each transaction. Yields (tid, username, description, extension) for each transaction.
This always returns an empty iterable.
""" """
# pylint:disable=unused-argument
return [] return []
def iter_transactions_range(self, cursor, start=None, stop=None): def iter_transactions_range(self, cursor, start=None, stop=None):
@ -171,7 +175,7 @@ class HistoryFreeDatabaseIterator(DatabaseIterator):
stmt += " AND tid <= %(max_tid)s" stmt += " AND tid <= %(max_tid)s"
stmt += " ORDER BY tid" stmt += " ORDER BY tid"
self.runner.run_script_stmt(cursor, stmt, self.runner.run_script_stmt(cursor, stmt,
{'min_tid': start, 'max_tid': stop}) {'min_tid': start, 'max_tid': stop})
return ((tid, '', '', '', True) for (tid,) in cursor) return ((tid, '', '', '', True) for (tid,) in cursor)
def iter_object_history(self, cursor, oid): def iter_object_history(self, cursor, oid):

View File

@ -17,7 +17,7 @@ from ZODB.POSException import StorageError
from zope.interface import Attribute from zope.interface import Attribute
from zope.interface import Interface from zope.interface import Interface
#pylint: disable=inherit-non-class,no-method-argument #pylint: disable=inherit-non-class,no-method-argument,no-self-argument
class IRelStorageAdapter(Interface): class IRelStorageAdapter(Interface):
"""A database adapter for RelStorage""" """A database adapter for RelStorage"""
@ -487,7 +487,7 @@ class ITransactionControl(Interface):
"""Returns the most recent tid.""" """Returns the most recent tid."""
def add_transaction(cursor, tid, username, description, extension, def add_transaction(cursor, tid, username, description, extension,
packed=False): packed=False):
"""Add a transaction.""" """Add a transaction."""
def commit_phase1(conn, cursor, tid): def commit_phase1(conn, cursor, tid):

View File

@ -80,6 +80,7 @@ def select_driver(options=None):
@implementer(IRelStorageAdapter) @implementer(IRelStorageAdapter)
class MySQLAdapter(object): class MySQLAdapter(object):
"""MySQL adapter for RelStorage.""" """MySQL adapter for RelStorage."""
# pylint:disable=too-many-instance-attributes
def __init__(self, options=None, **params): def __init__(self, options=None, **params):
if options is None: if options is None:
@ -95,28 +96,28 @@ class MySQLAdapter(object):
driver, driver,
params=params, params=params,
options=options, options=options,
) )
self.runner = ScriptRunner() self.runner = ScriptRunner()
self.locker = MySQLLocker( self.locker = MySQLLocker(
options=options, options=options,
lock_exceptions=driver.lock_exceptions, lock_exceptions=driver.lock_exceptions,
) )
self.schema = MySQLSchemaInstaller( self.schema = MySQLSchemaInstaller(
connmanager=self.connmanager, connmanager=self.connmanager,
runner=self.runner, runner=self.runner,
keep_history=self.keep_history, keep_history=self.keep_history,
) )
self.mover = MySQLObjectMover( self.mover = MySQLObjectMover(
database_type='mysql', database_type='mysql',
options=options, options=options,
Binary=driver.Binary, Binary=driver.Binary,
) )
self.connmanager.set_on_store_opened(self.mover.on_store_opened) self.connmanager.set_on_store_opened(self.mover.on_store_opened)
self.oidallocator = MySQLOIDAllocator() self.oidallocator = MySQLOIDAllocator()
self.txncontrol = MySQLTransactionControl( self.txncontrol = MySQLTransactionControl(
keep_history=self.keep_history, keep_history=self.keep_history,
Binary=driver.Binary, Binary=driver.Binary,
) )
if self.keep_history: if self.keep_history:
poll_query = "SELECT MAX(tid) FROM transaction" poll_query = "SELECT MAX(tid) FROM transaction"
@ -128,7 +129,7 @@ class MySQLAdapter(object):
runner=self.runner, runner=self.runner,
revert_when_stale=options.revert_when_stale, revert_when_stale=options.revert_when_stale,
) )
# pylint:disable=redefined-variable-type
if self.keep_history: if self.keep_history:
self.packundo = MySQLHistoryPreservingPackUndo( self.packundo = MySQLHistoryPreservingPackUndo(
database_type='mysql', database_type='mysql',
@ -136,11 +137,11 @@ class MySQLAdapter(object):
runner=self.runner, runner=self.runner,
locker=self.locker, locker=self.locker,
options=options, options=options,
) )
self.dbiter = HistoryPreservingDatabaseIterator( self.dbiter = HistoryPreservingDatabaseIterator(
database_type='mysql', database_type='mysql',
runner=self.runner, runner=self.runner,
) )
else: else:
self.packundo = MySQLHistoryFreePackUndo( self.packundo = MySQLHistoryFreePackUndo(
database_type='mysql', database_type='mysql',
@ -148,15 +149,15 @@ class MySQLAdapter(object):
runner=self.runner, runner=self.runner,
locker=self.locker, locker=self.locker,
options=options, options=options,
) )
self.dbiter = HistoryFreeDatabaseIterator( self.dbiter = HistoryFreeDatabaseIterator(
database_type='mysql', database_type='mysql',
runner=self.runner, runner=self.runner,
) )
self.stats = MySQLStats( self.stats = MySQLStats(
connmanager=self.connmanager, connmanager=self.connmanager,
) )
def new_instance(self): def new_instance(self):
return MySQLAdapter(options=self.options, **self._params) return MySQLAdapter(options=self.options, **self._params)

View File

@ -49,7 +49,7 @@ class MySQLdbConnectionManager(AbstractConnectionManager):
return params return params
def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED", def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED",
replica_selector=None): replica_selector=None, **kwargs):
"""Open a database connection and return (conn, cursor).""" """Open a database connection and return (conn, cursor)."""
if replica_selector is None: if replica_selector is None:
replica_selector = self.replica_selector replica_selector = self.replica_selector

View File

@ -15,8 +15,8 @@
""" """
MySQL IDBDriver implementations. MySQL IDBDriver implementations.
""" """
from __future__ import print_function, absolute_import from __future__ import print_function, absolute_import
# pylint:disable=redefined-variable-type
import os import os
import sys import sys
@ -31,6 +31,8 @@ from ..interfaces import IDBDriver, IDBDriverOptions
from .._abstract_drivers import _standard_exceptions from .._abstract_drivers import _standard_exceptions
from relstorage._compat import intern
logger = __import__('logging').getLogger(__name__) logger = __import__('logging').getLogger(__name__)
database_type = 'mysql' database_type = 'mysql'
@ -82,7 +84,7 @@ else: # pragma: no cover
pymysql.err.Error, pymysql.err.Error,
IOError, IOError,
pymysql.err.DatabaseError pymysql.err.DatabaseError
) )
disconnected_exceptions += ( disconnected_exceptions += (
IOError, # This one can escape mapping; IOError, # This one can escape mapping;
@ -105,6 +107,7 @@ else: # pragma: no cover
if hasattr(pymysql.converters, 'escape_string'): if hasattr(pymysql.converters, 'escape_string'):
orig_escape_string = pymysql.converters.escape_string orig_escape_string = pymysql.converters.escape_string
def escape_string(value, mapping=None): def escape_string(value, mapping=None):
if isinstance(value, bytearray) and not value: if isinstance(value, bytearray) and not value:
return value return value
@ -167,9 +170,11 @@ else:
from pymysql.err import InternalError, InterfaceError, ProgrammingError from pymysql.err import InternalError, InterfaceError, ProgrammingError
class UConnection(umysqldb.connections.Connection): class UConnection(umysqldb.connections.Connection):
# pylint:disable=abstract-method
_umysql_conn = None
def __debug_lock(self, sql, ex=False): # pragma: no cover def __debug_lock(self, sql, ex=False): # pragma: no cover
if not 'GET_LOCK' in sql: if 'GET_LOCK' not in sql:
return return
try: try:
@ -270,7 +275,7 @@ else:
assert not self._umysql_conn.is_connected() assert not self._umysql_conn.is_connected()
self._umysql_conn.close() self._umysql_conn.close()
del self._umysql_conn del self._umysql_conn
self._umysql_conn = umysql.Connection() self._umysql_conn = umysql.Connection() # pylint:disable=no-member
self._connect() # Potentially this could raise again? self._connect() # Potentially this could raise again?
def connect(self, *_args, **_kwargs): # pragma: no cover def connect(self, *_args, **_kwargs): # pragma: no cover
@ -279,7 +284,7 @@ else:
return self._connect() return self._connect()
@implementer(IDBDriver) @implementer(IDBDriver)
class umysqldbDriver(PyMySQLDriver): class umysqldbDriver(PyMySQLDriver): # noqa
__name__ = 'umysqldb' __name__ = 'umysqldb'
connect = UConnection connect = UConnection
# umysql has a tendency to crash when given a bytearray (which # umysql has a tendency to crash when given a bytearray (which
@ -291,8 +296,8 @@ else:
if (not preferred_driver_name if (not preferred_driver_name
or (preferred_driver_name == 'PyMySQL' or (preferred_driver_name == 'PyMySQL'
and not hasattr(sys, 'pypy_version_info'))): and not hasattr(sys, 'pypy_version_info'))):
preferred_driver_name = driver.__name__ preferred_driver_name = driver.__name__
del driver del driver

View File

@ -83,13 +83,15 @@ class MySQLObjectMover(AbstractObjectMover):
cursor.execute(stmt, (tid,)) cursor.execute(stmt, (tid,))
@metricmethod_sampled @metricmethod_sampled
def update_current(self, cursor, tid): def update_current(self, cursor, tid): # pylint:disable=method-hidden
"""Update the current object pointers. """Update the current object pointers.
tid is the integer tid of the transaction being committed. tid is the integer tid of the transaction being committed.
""" """
if not self.keep_history: if not self.keep_history:
# nothing needs to be updated # nothing needs to be updated
# Can elide this check in the future.
self.update_current = lambda cursor, tid: None
return return
cursor.execute(""" cursor.execute("""

View File

@ -233,16 +233,16 @@ class MySQLSchemaInstaller(AbstractSchemaInstaller):
self.runner.run_script(cursor, stmt) self.runner.run_script(cursor, stmt)
# Temp tables are created in a session-by-session basis # Temp tables are created in a session-by-session basis
def _create_temp_store(self, cursor): def _create_temp_store(self, _cursor):
return return
def _create_temp_blob_chunk(self, cursor): def _create_temp_blob_chunk(self, _cursor):
return return
def _create_temp_pack_visit(self, cursor): def _create_temp_pack_visit(self, _cursor):
return return
def _create_temp_undo(self, cursor): def _create_temp_undo(self, _cursor):
return return
def _init_after_create(self, cursor): def _init_after_create(self, cursor):

View File

@ -23,7 +23,7 @@ from ..txncontrol import AbstractTransactionControl
@implementer(ITransactionControl) @implementer(ITransactionControl)
class MySQLTransactionControl(AbstractTransactionControl): class MySQLTransactionControl(AbstractTransactionControl):
def __init__(self, keep_history, Binary): def __init__(self, keep_history, Binary): # noqa
self.keep_history = keep_history self.keep_history = keep_history
self.Binary = Binary self.Binary = Binary

View File

@ -20,12 +20,6 @@ from __future__ import absolute_import
import six import six
import abc import abc
from perfmetrics import metricmethod
from relstorage.adapters.interfaces import IOIDAllocator
from zope.interface import implementer
from relstorage._compat import mysql_connection
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
class AbstractOIDAllocator(object): class AbstractOIDAllocator(object):
# All of these allocators allocate 16 OIDs at a time. In the sequence # All of these allocators allocate 16 OIDs at a time. In the sequence

View File

@ -47,7 +47,7 @@ def select_driver(options=None):
@implementer(IRelStorageAdapter) @implementer(IRelStorageAdapter)
class OracleAdapter(object): class OracleAdapter(object):
"""Oracle adapter for RelStorage.""" """Oracle adapter for RelStorage."""
# pylint:disable=too-many-instance-attributes
def __init__(self, user, password, dsn, commit_lock_id=0, def __init__(self, user, password, dsn, commit_lock_id=0,
twophase=False, options=None): twophase=False, options=None):
"""Create an Oracle adapter. """Create an Oracle adapter.
@ -59,6 +59,7 @@ class OracleAdapter(object):
commit process. This is disabled by default. Even when this option commit process. This is disabled by default. Even when this option
is disabled, the ZODB two-phase commit is still in effect. is disabled, the ZODB two-phase commit is still in effect.
""" """
# pylint:disable=unused-argument
self._user = user self._user = user
self._password = password self._password = password
self._dsn = dsn self._dsn = dsn
@ -78,18 +79,18 @@ class OracleAdapter(object):
dsn=dsn, dsn=dsn,
twophase=twophase, twophase=twophase,
options=options, options=options,
) )
self.runner = CXOracleScriptRunner(driver) self.runner = CXOracleScriptRunner(driver)
self.locker = OracleLocker( self.locker = OracleLocker(
options=self.options, options=self.options,
lock_exceptions=driver.lock_exceptions, lock_exceptions=driver.lock_exceptions,
inputsize_NUMBER=driver.NUMBER, inputsize_NUMBER=driver.NUMBER,
) )
self.schema = OracleSchemaInstaller( self.schema = OracleSchemaInstaller(
connmanager=self.connmanager, connmanager=self.connmanager,
runner=self.runner, runner=self.runner,
keep_history=self.keep_history, keep_history=self.keep_history,
) )
inputsizes = { inputsizes = {
'blobdata': driver.BLOB, 'blobdata': driver.BLOB,
'rawdata': driver.BINARY, 'rawdata': driver.BINARY,
@ -105,17 +106,17 @@ class OracleAdapter(object):
runner=self.runner, runner=self.runner,
Binary=driver.Binary, Binary=driver.Binary,
batcher_factory=lambda cursor, row_limit: OracleRowBatcher(cursor, inputsizes, row_limit), batcher_factory=lambda cursor, row_limit: OracleRowBatcher(cursor, inputsizes, row_limit),
) )
self.mover.inputsizes = inputsizes self.mover.inputsizes = inputsizes
self.connmanager.set_on_store_opened(self.mover.on_store_opened) self.connmanager.set_on_store_opened(self.mover.on_store_opened)
self.oidallocator = OracleOIDAllocator( self.oidallocator = OracleOIDAllocator(
connmanager=self.connmanager, connmanager=self.connmanager,
) )
self.txncontrol = OracleTransactionControl( self.txncontrol = OracleTransactionControl(
keep_history=self.keep_history, keep_history=self.keep_history,
Binary=driver.Binary, Binary=driver.Binary,
twophase=twophase, twophase=twophase,
) )
if self.keep_history: if self.keep_history:
poll_query = "SELECT MAX(tid) FROM transaction" poll_query = "SELECT MAX(tid) FROM transaction"
@ -128,6 +129,7 @@ class OracleAdapter(object):
revert_when_stale=options.revert_when_stale, revert_when_stale=options.revert_when_stale,
) )
# pylint:disable=redefined-variable-type
if self.keep_history: if self.keep_history:
self.packundo = OracleHistoryPreservingPackUndo( self.packundo = OracleHistoryPreservingPackUndo(
database_type='oracle', database_type='oracle',
@ -135,11 +137,11 @@ class OracleAdapter(object):
runner=self.runner, runner=self.runner,
locker=self.locker, locker=self.locker,
options=options, options=options,
) )
self.dbiter = HistoryPreservingDatabaseIterator( self.dbiter = HistoryPreservingDatabaseIterator(
database_type='oracle', database_type='oracle',
runner=self.runner, runner=self.runner,
) )
else: else:
self.packundo = OracleHistoryFreePackUndo( self.packundo = OracleHistoryFreePackUndo(
database_type='oracle', database_type='oracle',
@ -147,15 +149,15 @@ class OracleAdapter(object):
runner=self.runner, runner=self.runner,
locker=self.locker, locker=self.locker,
options=options, options=options,
) )
self.dbiter = HistoryFreeDatabaseIterator( self.dbiter = HistoryFreeDatabaseIterator(
database_type='oracle', database_type='oracle',
runner=self.runner, runner=self.runner,
) )
self.stats = OracleStats( self.stats = OracleStats(
connmanager=self.connmanager, connmanager=self.connmanager,
) )
def new_instance(self): def new_instance(self):
# This adapter and its components are stateless, so it's # This adapter and its components are stateless, so it's
@ -166,7 +168,7 @@ class OracleAdapter(object):
dsn=self._dsn, dsn=self._dsn,
twophase=self._twophase, twophase=self._twophase,
options=self.options, options=self.options,
) )
def __str__(self): def __str__(self):
parts = [self.__class__.__name__] parts = [self.__class__.__name__]

View File

@ -40,7 +40,7 @@ class OracleRowBatcher(RowBatcher):
def replace_var(match): def replace_var(match):
name = match.group(1) name = match.group(1)
new_name = '%s_%d' % (name, rownum) new_name = '%s_%d' % (name, rownum) # pylint:disable=undefined-loop-variable
if name in self.inputsizes: if name in self.inputsizes:
stmt_inputsizes[new_name] = self.inputsizes[name] stmt_inputsizes[new_name] = self.inputsizes[name]
params[new_name] = row[name] params[new_name] = row[name]
@ -69,8 +69,8 @@ class OracleRowBatcher(RowBatcher):
mod_row = oracle_rowvar_re.sub(replace_var, row_schema) mod_row = oracle_rowvar_re.sub(replace_var, row_schema)
parts.append("INTO %s VALUES (%s)" % (header, mod_row)) parts.append("INTO %s VALUES (%s)" % (header, mod_row))
parts = '\n'.join(parts)
stmt = "INSERT ALL\n%s\nSELECT * FROM DUAL" % parts stmt = "INSERT ALL\n%s\nSELECT * FROM DUAL" % '\n'.join(parts)
if stmt_inputsizes: if stmt_inputsizes:
self.cursor.setinputsizes(**stmt_inputsizes) self.cursor.setinputsizes(**stmt_inputsizes)
self.cursor.execute(stmt, params) self.cursor.execute(stmt, params)

View File

@ -68,7 +68,7 @@ class CXOracleConnectionManager(AbstractConnectionManager):
@metricmethod @metricmethod
def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED", def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED",
twophase=False, replica_selector=None): twophase=False, replica_selector=None, **kwargs):
"""Open a database connection and return (conn, cursor).""" """Open a database connection and return (conn, cursor)."""
if replica_selector is None: if replica_selector is None:
replica_selector = self.replica_selector replica_selector = self.replica_selector

View File

@ -40,7 +40,7 @@ except ImportError:
else: # pragma: no cover else: # pragma: no cover
@implementer(IDBDriver) @implementer(IDBDriver)
class cx_OracleDriver(object): class cx_OracleDriver(object): # noqa
__name__ = 'cx_Oracle' __name__ = 'cx_Oracle'
disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(cx_Oracle) disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(cx_Oracle)
disconnected_exceptions += (cx_Oracle.DatabaseError,) disconnected_exceptions += (cx_Oracle.DatabaseError,)

View File

@ -37,7 +37,8 @@ def _to_oracle_ordered(query_tuple):
@implementer(IObjectMover) @implementer(IObjectMover)
class OracleObjectMover(AbstractObjectMover): class OracleObjectMover(AbstractObjectMover):
inputsizes = () # This is assigned to by the adapter.
inputsizes = None
_move_from_temp_hp_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hp_insert_query) _move_from_temp_hp_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hp_insert_query)
_move_from_temp_hf_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hf_insert_query) _move_from_temp_hf_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hf_insert_query)
@ -239,7 +240,7 @@ class OracleObjectMover(AbstractObjectMover):
state = :blobdata state = :blobdata
WHERE zoid = :oid WHERE zoid = :oid
""" """
cursor.setinputsizes(blobdata=self.inputsizes['blobdata']) cursor.setinputsizes(blobdata=self.inputsizes['blobdata']) # pylint:disable=unsubscriptable-object
cursor.execute(stmt, oid=oid, prev_tid=prev_tid, cursor.execute(stmt, oid=oid, prev_tid=prev_tid,
md5sum=md5sum, blobdata=self.Binary(data)) md5sum=md5sum, blobdata=self.Binary(data))
@ -264,7 +265,7 @@ class OracleObjectMover(AbstractObjectMover):
bytecount = 0 bytecount = 0
# Current versions of cx_Oracle only support offsets up # Current versions of cx_Oracle only support offsets up
# to sys.maxint or 4GB, whichever comes first. # to sys.maxint or 4GB, whichever comes first.
maxsize = min(sys.maxsize, 1<<32) maxsize = min(sys.maxsize, 1 << 32)
try: try:
cursor.execute(stmt, (oid, tid)) cursor.execute(stmt, (oid, tid))
while True: while True:
@ -279,9 +280,11 @@ class OracleObjectMover(AbstractObjectMover):
f = open(filename, 'wb') f = open(filename, 'wb')
# round off the chunk-size to be a multiple of the oracle # round off the chunk-size to be a multiple of the oracle
# blob chunk size to maximize performance # blob chunk size to maximize performance
read_chunk_size = int(max(round( read_chunk_size = int(
1.0 * self.blob_chunk_size / blob.getchunksize()), 1) * max(
blob.getchunksize()) round(1.0 * self.blob_chunk_size / blob.getchunksize()),
1)
* blob.getchunksize())
offset = 1 # Oracle still uses 1-based indexing. offset = 1 # Oracle still uses 1-based indexing.
reader = iter(lambda: blob.read(offset, read_chunk_size), b'') reader = iter(lambda: blob.read(offset, read_chunk_size), b'')
for read_chunk in reader: for read_chunk in reader:
@ -307,7 +310,7 @@ class OracleObjectMover(AbstractObjectMover):
# Current versions of cx_Oracle only support offsets up # Current versions of cx_Oracle only support offsets up
# to sys.maxint or 4GB, whichever comes first. We divide up our # to sys.maxint or 4GB, whichever comes first. We divide up our
# upload into chunks within this limit. # upload into chunks within this limit.
oracle_blob_chunk_maxsize = min(sys.maxsize, 1<<32) oracle_blob_chunk_maxsize = min(sys.maxsize, 1 << 32)
@metricmethod_sampled @metricmethod_sampled
def upload_blob(self, cursor, oid, tid, filename): def upload_blob(self, cursor, oid, tid, filename):
@ -315,6 +318,7 @@ class OracleObjectMover(AbstractObjectMover):
If serial is None, upload to the temporary table. If serial is None, upload to the temporary table.
""" """
# pylint:disable=too-many-locals
if tid is not None: if tid is not None:
if self.keep_history: if self.keep_history:
delete_stmt = """ delete_stmt = """

View File

@ -146,9 +146,9 @@ class OracleSchemaInstaller(AbstractSchemaInstaller):
def prepare(self): def prepare(self):
"""Create the database schema if it does not already exist.""" """Create the database schema if it does not already exist."""
def callback(conn, cursor): def callback(_conn, cursor):
tables = self.list_tables(cursor) tables = self.list_tables(cursor)
if not 'object_state' in tables: if 'object_state' not in tables:
self.create(cursor) self.create(cursor)
else: else:
self.check_compatibility(cursor, tables) self.check_compatibility(cursor, tables)

View File

@ -33,6 +33,7 @@ def format_to_named(stmt):
return _stmt_cache[stmt] return _stmt_cache[stmt]
except KeyError: except KeyError:
matches = [] matches = []
def replace(_match): def replace(_match):
matches.append(None) matches.append(None)
return ':%d' % len(matches) return ':%d' % len(matches)
@ -74,7 +75,7 @@ class OracleScriptRunner(ScriptRunner):
params[k] = v params[k] = v
else: else:
stmt = generic_stmt % self.script_vars stmt = generic_stmt % self.script_vars
params = () params = () # pylint:disable=redefined-variable-type
try: try:
cursor.execute(stmt, params) cursor.execute(stmt, params)
@ -116,6 +117,7 @@ class CXOracleScriptRunner(OracleScriptRunner):
error indicating truncation. The run_lob_stmt() method works error indicating truncation. The run_lob_stmt() method works
around this. around this.
""" """
# pylint:disable=unused-argument
if defaultType == self.driver.BLOB: if defaultType == self.driver.BLOB:
# Default size for BLOB is 4, we want the whole blob inline. # Default size for BLOB is 4, we want the whole blob inline.
# Typical chunk size is 8132, we choose a multiple - 32528 # Typical chunk size is 8132, we choose a multiple - 32528

View File

@ -23,26 +23,26 @@ class OracleStats(AbstractStats):
"""Returns the number of objects in the database""" """Returns the number of objects in the database"""
# The tests expect an exact number, but the code below generates # The tests expect an exact number, but the code below generates
# an estimate, so this is disabled for now. # an estimate, so this is disabled for now.
if True: return 0
return 0
else: def _estimate_object_count(self):
conn, cursor = self.connmanager.open( conn, cursor = self.connmanager.open(
self.connmanager.isolation_read_only) self.connmanager.isolation_read_only)
try: try:
stmt = """ stmt = """
SELECT NUM_ROWS SELECT NUM_ROWS
FROM USER_TABLES FROM USER_TABLES
WHERE TABLE_NAME = 'CURRENT_OBJECT' WHERE TABLE_NAME = 'CURRENT_OBJECT'
""" """
cursor.execute(stmt) cursor.execute(stmt)
res = cursor.fetchone()[0] res = cursor.fetchone()[0]
if res is None: if res is None:
res = 0 res = 0
else: else:
res = int(res) res = int(res)
return res return res
finally: finally:
self.connmanager.close(conn, cursor) self.connmanager.close(conn, cursor)
def get_db_size(self): def get_db_size(self):
"""Returns the approximate size of the database in bytes""" """Returns the approximate size of the database in bytes"""

View File

@ -14,6 +14,8 @@
"""Pack/Undo implementations. """Pack/Undo implementations.
""" """
# pylint:disable=too-many-lines,unused-argument
from ZODB.POSException import UndoError from ZODB.POSException import UndoError
from ZODB.utils import u64 from ZODB.utils import u64
from perfmetrics import metricmethod from perfmetrics import metricmethod
@ -34,6 +36,8 @@ class PackUndo(object):
verify_sane_database = False verify_sane_database = False
_script_choose_pack_transaction = None
def __init__(self, database_type, connmanager, runner, locker, options): def __init__(self, database_type, connmanager, runner, locker, options):
self.database_type = database_type self.database_type = database_type
self.connmanager = connmanager self.connmanager = connmanager
@ -354,7 +358,7 @@ class HistoryPreservingPackUndo(PackUndo):
SELECT zoid, prev_tid FROM temp_undo SELECT zoid, prev_tid FROM temp_undo
""" """
self.runner.run_script(cursor, stmt, self.runner.run_script(cursor, stmt,
{'undo_tid': undo_tid, 'self_tid': self_tid}) {'undo_tid': undo_tid, 'self_tid': self_tid})
res = list(cursor) res = list(cursor)
stmt = self._script_reset_temp_undo stmt = self._script_reset_temp_undo
@ -420,7 +424,7 @@ class HistoryPreservingPackUndo(PackUndo):
state = db_binary_to_bytes(state) state = db_binary_to_bytes(state)
if hasattr(state, 'read'): if hasattr(state, 'read'):
# Oracle # Oracle
state = state.read() state = state.read() # pylint:disable=no-member
if state: if state:
assert isinstance(state, bytes), type(state) # PY3: used to be str(state) assert isinstance(state, bytes), type(state) # PY3: used to be str(state)
from_count += 1 from_count += 1
@ -429,8 +433,8 @@ class HistoryPreservingPackUndo(PackUndo):
except: except:
log.error( log.error(
"pre_pack: can't unpickle " "pre_pack: can't unpickle "
"object %d in transaction %d; state length = %d" % ( "object %d in transaction %d; state length = %d",
from_oid, tid, len(state))) from_oid, tid, len(state))
raise raise
for to_oid in to_oids: for to_oid in to_oids:
add_rows.append((from_oid, tid, to_oid)) add_rows.append((from_oid, tid, to_oid))
@ -456,7 +460,7 @@ class HistoryPreservingPackUndo(PackUndo):
to_count = len(add_rows) to_count = len(add_rows)
log.debug("pre_pack: transaction %d: has %d reference(s) " log.debug("pre_pack: transaction %d: has %d reference(s) "
"from %d object(s)", tid, to_count, from_count) "from %d object(s)", tid, to_count, from_count)
return to_count return to_count
@metricmethod @metricmethod
@ -519,7 +523,7 @@ class HistoryPreservingPackUndo(PackUndo):
AND tid <= %(pack_tid)s AND tid <= %(pack_tid)s
""" """
self.runner.run_script_stmt( self.runner.run_script_stmt(
cursor, stmt, {'pack_tid':pack_tid}) cursor, stmt, {'pack_tid': pack_tid})
to_remove += cursor.rowcount to_remove += cursor.rowcount
log.info("pre_pack: enumerating transactions to pack") log.info("pre_pack: enumerating transactions to pack")
@ -533,7 +537,7 @@ class HistoryPreservingPackUndo(PackUndo):
cursor.execute(stmt) cursor.execute(stmt)
log.info("pre_pack: will remove %d object state(s)", log.info("pre_pack: will remove %d object state(s)",
to_remove) to_remove)
except: except:
log.exception("pre_pack: failed") log.exception("pre_pack: failed")
@ -645,10 +649,11 @@ class HistoryPreservingPackUndo(PackUndo):
@metricmethod @metricmethod
def pack(self, pack_tid, sleep=None, packed_func=None): def pack(self, pack_tid, sleep=None, packed_func=None):
"""Pack. Requires the information provided by pre_pack.""" """Pack. Requires the information provided by pre_pack."""
# pylint:disable=too-many-locals
# Read committed mode is sufficient. # Read committed mode is sufficient.
conn, cursor = self.connmanager.open() conn, cursor = self.connmanager.open()
try: try: # pylint:disable=too-many-nested-blocks
try: try:
stmt = """ stmt = """
SELECT transaction.tid, SELECT transaction.tid,
@ -697,9 +702,9 @@ class HistoryPreservingPackUndo(PackUndo):
statecounter += len(packed_list) statecounter += len(packed_list)
if counter >= lastreport + reportstep: if counter >= lastreport + reportstep:
log.info("pack: packed %d (%.1f%%) transaction(s), " log.info("pack: packed %d (%.1f%%) transaction(s), "
"affecting %d states", "affecting %d states",
counter, counter/float(total)*100, counter, counter / float(total) * 100,
statecounter) statecounter)
lastreport = counter / reportstep * reportstep lastreport = counter / reportstep * reportstep
del packed_list[:] del packed_list[:]
self.locker.release_commit_lock(cursor) self.locker.release_commit_lock(cursor)
@ -726,7 +731,7 @@ class HistoryPreservingPackUndo(PackUndo):
def _pack_transaction(self, cursor, pack_tid, tid, packed, def _pack_transaction(self, cursor, pack_tid, tid, packed,
has_removable, packed_list): has_removable, packed_list):
"""Pack one transaction. Requires populated pack tables.""" """Pack one transaction. Requires populated pack tables."""
log.debug("pack: transaction %d: packing", tid) log.debug("pack: transaction %d: packing", tid)
removed_objects = 0 removed_objects = 0
@ -748,7 +753,7 @@ class HistoryPreservingPackUndo(PackUndo):
AND tid <= %(pack_tid)s AND tid <= %(pack_tid)s
""" """
self.runner.run_script_stmt(cursor, stmt, self.runner.run_script_stmt(cursor, stmt,
{'pack_tid': pack_tid, 'tid': tid}) {'pack_tid': pack_tid, 'tid': tid})
stmt = """ stmt = """
SELECT pack_state.zoid SELECT pack_state.zoid
@ -932,7 +937,7 @@ class HistoryFreePackUndo(PackUndo):
state = db_binary_to_bytes(state) state = db_binary_to_bytes(state)
if hasattr(state, 'read'): if hasattr(state, 'read'):
# Oracle # Oracle
state = state.read() state = state.read() # pylint:disable=no-member
add_objects.append((from_oid, tid)) add_objects.append((from_oid, tid))
if state: if state:
assert isinstance(state, bytes), type(state) assert isinstance(state, bytes), type(state)
@ -1046,9 +1051,10 @@ class HistoryFreePackUndo(PackUndo):
Requires the information provided by pre_pack. Requires the information provided by pre_pack.
""" """
# pylint:disable=too-many-locals
# Read committed mode is sufficient. # Read committed mode is sufficient.
conn, cursor = self.connmanager.open() conn, cursor = self.connmanager.open()
try: try: # pylint:disable=too-many-nested-blocks
try: try:
stmt = """ stmt = """
SELECT zoid, keep_tid SELECT zoid, keep_tid
@ -1090,7 +1096,7 @@ class HistoryFreePackUndo(PackUndo):
counter = total - len(to_remove) counter = total - len(to_remove)
if counter >= lastreport + reportstep: if counter >= lastreport + reportstep:
log.info("pack: removed %d (%.1f%%) state(s)", log.info("pack: removed %d (%.1f%%) state(s)",
counter, counter/float(total)*100) counter, counter / float(total) * 100)
lastreport = counter / reportstep * reportstep lastreport = counter / reportstep * reportstep
self.locker.release_commit_lock(cursor) self.locker.release_commit_lock(cursor)
self._pause_pack_until_lock(cursor, sleep) self._pause_pack_until_lock(cursor, sleep)

View File

@ -46,16 +46,14 @@ class Poller(object):
that the changes are too complex to list. new_polled_tid can be that the changes are too complex to list. new_polled_tid can be
0 if there is no data in the database. 0 if there is no data in the database.
""" """
# pylint:disable=unused-argument
# find out the tid of the most recent transaction. # find out the tid of the most recent transaction.
cursor.execute(self.poll_query) cursor.execute(self.poll_query)
rows = list(cursor) rows = list(cursor)
if not rows: if not rows or not rows[0][0]:
# No data. # No data.
return None, 0 return None, 0
new_polled_tid = rows[0][0] new_polled_tid = rows[0][0]
if not new_polled_tid:
# No data.
return None, 0
if prev_polled_tid is None: if prev_polled_tid is None:
# This is the first time the connection has polled. # This is the first time the connection has polled.
@ -65,54 +63,7 @@ class Poller(object):
# No transactions have been committed since prev_polled_tid. # No transactions have been committed since prev_polled_tid.
return (), new_polled_tid return (), new_polled_tid
elif new_polled_tid > prev_polled_tid: if new_polled_tid <= prev_polled_tid:
# New transaction(s) have been added.
if self.keep_history:
# If the previously polled transaction no longer exists,
# the cache is too old and needs to be cleared.
# XXX Do we actually need to detect this condition? I think
# if we delete this block of code, all the unreachable
# objects will be garbage collected anyway. So, as a test,
# there is no equivalent of this block of code for
# history-free storage. If something goes wrong, then we'll
# know there's some other edge condition we have to account
# for.
stmt = "SELECT 1 FROM transaction WHERE tid = %(tid)s"
cursor.execute(
intern(stmt % self.runner.script_vars),
{'tid': prev_polled_tid})
rows = cursor.fetchall()
if not rows:
# Transaction not found; perhaps it has been packed.
# The connection cache should be cleared.
return None, new_polled_tid
# Get the list of changed OIDs and return it.
if self.keep_history:
stmt = """
SELECT zoid, tid
FROM current_object
WHERE tid > %(tid)s
"""
else:
stmt = """
SELECT zoid, tid
FROM object_state
WHERE tid > %(tid)s
"""
params = {'tid': prev_polled_tid}
if ignore_tid is not None:
stmt += " AND tid != %(self_tid)s"
params['self_tid'] = ignore_tid
stmt = intern(stmt % self.runner.script_vars)
cursor.execute(stmt, params)
changes = cursor.fetchall()
return changes, new_polled_tid
else:
# The database connection is stale. This can happen after # The database connection is stale. This can happen after
# reading an asynchronous slave that is not fully up to date. # reading an asynchronous slave that is not fully up to date.
# (It may also suggest that transaction IDs are not being created # (It may also suggest that transaction IDs are not being created
@ -127,14 +78,61 @@ class Poller(object):
# We have to invalidate the whole cPickleCache, otherwise # We have to invalidate the whole cPickleCache, otherwise
# the cache would be inconsistent with the reverted state. # the cache would be inconsistent with the reverted state.
return None, new_polled_tid return None, new_polled_tid
else:
# This client never wants to revert to stale data, so # This client never wants to revert to stale data, so
# raise ReadConflictError to trigger a retry. # raise ReadConflictError to trigger a retry.
# We're probably just waiting for async replication # We're probably just waiting for async replication
# to catch up, so retrying could do the trick. # to catch up, so retrying could do the trick.
raise ReadConflictError( raise ReadConflictError(
"The database connection is stale: new_polled_tid=%d, " "The database connection is stale: new_polled_tid=%d, "
"prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid)) "prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid))
# New transaction(s) have been added.
if self.keep_history:
# If the previously polled transaction no longer exists,
# the cache is too old and needs to be cleared.
# XXX Do we actually need to detect this condition? I think
# if we delete this block of code, all the unreachable
# objects will be garbage collected anyway. So, as a test,
# there is no equivalent of this block of code for
# history-free storage. If something goes wrong, then we'll
# know there's some other edge condition we have to account
# for.
stmt = "SELECT 1 FROM transaction WHERE tid = %(tid)s"
cursor.execute(
intern(stmt % self.runner.script_vars),
{'tid': prev_polled_tid})
rows = cursor.fetchall()
if not rows:
# Transaction not found; perhaps it has been packed.
# The connection cache should be cleared.
return None, new_polled_tid
# Get the list of changed OIDs and return it.
if self.keep_history:
stmt = """
SELECT zoid, tid
FROM current_object
WHERE tid > %(tid)s
"""
else:
stmt = """
SELECT zoid, tid
FROM object_state
WHERE tid > %(tid)s
"""
params = {'tid': prev_polled_tid}
if ignore_tid is not None:
stmt += " AND tid != %(self_tid)s"
params['self_tid'] = ignore_tid
stmt = intern(stmt % self.runner.script_vars)
cursor.execute(stmt, params)
changes = cursor.fetchall()
return changes, new_polled_tid
def list_changes(self, cursor, after_tid, last_tid): def list_changes(self, cursor, after_tid, last_tid):
"""Return the (oid, tid) values changed in a range of transactions. """Return the (oid, tid) values changed in a range of transactions.

View File

@ -47,6 +47,7 @@ def select_driver(options=None):
class PostgreSQLAdapter(object): class PostgreSQLAdapter(object):
"""PostgreSQL adapter for RelStorage.""" """PostgreSQL adapter for RelStorage."""
# pylint:disable=too-many-instance-attributes
def __init__(self, dsn='', options=None): def __init__(self, dsn='', options=None):
# options is a relstorage.options.Options or None # options is a relstorage.options.Options or None
self._dsn = dsn self._dsn = dsn
@ -63,32 +64,32 @@ class PostgreSQLAdapter(object):
driver, driver,
dsn=dsn, dsn=dsn,
options=options, options=options,
) )
self.runner = ScriptRunner() self.runner = ScriptRunner()
self.locker = PostgreSQLLocker( self.locker = PostgreSQLLocker(
options=options, options=options,
lock_exceptions=driver.lock_exceptions, lock_exceptions=driver.lock_exceptions,
version_detector=self.version_detector, version_detector=self.version_detector,
) )
self.schema = PostgreSQLSchemaInstaller( self.schema = PostgreSQLSchemaInstaller(
connmanager=self.connmanager, connmanager=self.connmanager,
runner=self.runner, runner=self.runner,
locker=self.locker, locker=self.locker,
keep_history=self.keep_history, keep_history=self.keep_history,
) )
self.mover = PostgreSQLObjectMover( self.mover = PostgreSQLObjectMover(
database_type='postgresql', database_type='postgresql',
options=options, options=options,
runner=self.runner, runner=self.runner,
version_detector=self.version_detector, version_detector=self.version_detector,
Binary=driver.Binary, Binary=driver.Binary,
) )
self.connmanager.set_on_store_opened(self.mover.on_store_opened) self.connmanager.set_on_store_opened(self.mover.on_store_opened)
self.oidallocator = PostgreSQLOIDAllocator() self.oidallocator = PostgreSQLOIDAllocator()
self.txncontrol = PostgreSQLTransactionControl( self.txncontrol = PostgreSQLTransactionControl(
keep_history=self.keep_history, keep_history=self.keep_history,
driver=driver, driver=driver,
) )
self.poller = Poller( self.poller = Poller(
poll_query="EXECUTE get_latest_tid", poll_query="EXECUTE get_latest_tid",
@ -96,7 +97,7 @@ class PostgreSQLAdapter(object):
runner=self.runner, runner=self.runner,
revert_when_stale=options.revert_when_stale, revert_when_stale=options.revert_when_stale,
) )
# pylint:disable=redefined-variable-type
if self.keep_history: if self.keep_history:
self.packundo = HistoryPreservingPackUndo( self.packundo = HistoryPreservingPackUndo(
database_type='postgresql', database_type='postgresql',
@ -104,11 +105,11 @@ class PostgreSQLAdapter(object):
runner=self.runner, runner=self.runner,
locker=self.locker, locker=self.locker,
options=options, options=options,
) )
self.dbiter = HistoryPreservingDatabaseIterator( self.dbiter = HistoryPreservingDatabaseIterator(
database_type='postgresql', database_type='postgresql',
runner=self.runner, runner=self.runner,
) )
else: else:
self.packundo = HistoryFreePackUndo( self.packundo = HistoryFreePackUndo(
database_type='postgresql', database_type='postgresql',
@ -116,15 +117,15 @@ class PostgreSQLAdapter(object):
runner=self.runner, runner=self.runner,
locker=self.locker, locker=self.locker,
options=options, options=options,
) )
self.dbiter = HistoryFreeDatabaseIterator( self.dbiter = HistoryFreeDatabaseIterator(
database_type='postgresql', database_type='postgresql',
runner=self.runner, runner=self.runner,
) )
self.stats = PostgreSQLStats( self.stats = PostgreSQLStats(
connmanager=self.connmanager, connmanager=self.connmanager,
) )
def new_instance(self): def new_instance(self):
inst = type(self)(dsn=self._dsn, options=self.options) inst = type(self)(dsn=self._dsn, options=self.options)

View File

@ -48,7 +48,7 @@ class Psycopg2ConnectionManager(AbstractConnectionManager):
return dsn return dsn
@metricmethod @metricmethod
def open(self, isolation=None, replica_selector=None): def open(self, isolation=None, replica_selector=None, **kwargs):
"""Open a database connection and return (conn, cursor).""" """Open a database connection and return (conn, cursor)."""
if isolation is None: if isolation is None:
isolation = self.isolation_read_committed isolation = self.isolation_read_committed

View File

@ -17,7 +17,7 @@ PostgreSQL IDBDriver implementations.
""" """
from __future__ import print_function, absolute_import from __future__ import print_function, absolute_import
# pylint:disable=redefined-variable-type
import sys import sys
import os import os
@ -39,6 +39,7 @@ def _create_connection(mod):
class Psycopg2Connection(mod.extensions.connection): class Psycopg2Connection(mod.extensions.connection):
# The replica attribute holds the name of the replica this # The replica attribute holds the name of the replica this
# connection is bound to. # connection is bound to.
# pylint:disable=slots-on-old-class
__slots__ = ('replica',) __slots__ = ('replica',)
return Psycopg2Connection return Psycopg2Connection
@ -163,6 +164,7 @@ else:
class _ReadBlob(object): class _ReadBlob(object):
closed = False closed = False
fetch_size = 1024 * 1024 * 9 fetch_size = 1024 * 1024 * 9
def __init__(self, conn, oid): def __init__(self, conn, oid):
self._cursor = conn.cursor() self._cursor = conn.cursor()
self.oid = oid self.oid = oid
@ -243,7 +245,7 @@ else:
key = 'database' key = 'database'
kwds[key] = value kwds[key] = value
conn = self._connect(**kwds) conn = self._connect(**kwds)
assert conn.__class__ is _Connection.__base__ assert conn.__class__ is _Connection.__base__ # pylint:disable=no-member
conn.__class__ = _Connection conn.__class__ = _Connection
return _ConnWrapper(conn) if self._wrap else conn return _ConnWrapper(conn) if self._wrap else conn

View File

@ -19,6 +19,7 @@ from relstorage.adapters.interfaces import IObjectMover
from zope.interface import implementer from zope.interface import implementer
import os import os
import functools
from relstorage._compat import xrange from relstorage._compat import xrange
@ -110,7 +111,7 @@ class PostgreSQLObjectMover(AbstractObjectMover):
if f is None: if f is None:
f = open(filename, 'ab') # Append, chunk 0 was an export f = open(filename, 'ab') # Append, chunk 0 was an export
reader = iter(lambda: blob.read(read_chunk_size), b'') reader = iter(functools.partial(blob.read, read_chunk_size), b'')
for read_chunk in reader: for read_chunk in reader:
f.write(read_chunk) f.write(read_chunk)
bytecount += len(read_chunk) bytecount += len(read_chunk)
@ -127,7 +128,7 @@ class PostgreSQLObjectMover(AbstractObjectMover):
# PostgreSQL < 9.3 only supports up to 2GB of data per BLOB. # PostgreSQL < 9.3 only supports up to 2GB of data per BLOB.
# Even above that, we can only use larger blobs on 64-bit builds. # Even above that, we can only use larger blobs on 64-bit builds.
postgresql_blob_chunk_maxsize = 1<<31 postgresql_blob_chunk_maxsize = 1 << 31
@metricmethod_sampled @metricmethod_sampled
def upload_blob(self, cursor, oid, tid, filename): def upload_blob(self, cursor, oid, tid, filename):
@ -135,6 +136,7 @@ class PostgreSQLObjectMover(AbstractObjectMover):
If serial is None, upload to the temporary table. If serial is None, upload to the temporary table.
""" """
# pylint:disable=too-many-branches,too-many-locals
if tid is not None: if tid is not None:
if self.keep_history: if self.keep_history:
delete_stmt = """ delete_stmt = """

View File

@ -97,7 +97,7 @@ class PostgreSQLSchemaInstaller(AbstractSchemaInstaller):
"""Create the database schema if it does not already exist.""" """Create the database schema if it does not already exist."""
def callback(_conn, cursor): def callback(_conn, cursor):
tables = self.list_tables(cursor) tables = self.list_tables(cursor)
if not 'object_state' in tables: if 'object_state' not in tables:
self.create(cursor) self.create(cursor)
else: else:
self.check_compatibility(cursor, tables) self.check_compatibility(cursor, tables)
@ -111,7 +111,7 @@ class PostgreSQLSchemaInstaller(AbstractSchemaInstaller):
"installing the stored procedures.") "installing the stored procedures.")
triggers = self.list_triggers(cursor) triggers = self.list_triggers(cursor)
if not 'blob_chunk_delete' in triggers: if 'blob_chunk_delete' not in triggers:
self.install_triggers(cursor) self.install_triggers(cursor)
self.connmanager.open_and_call(callback) self.connmanager.open_and_call(callback)

View File

@ -28,7 +28,7 @@ class PostgreSQLStats(AbstractStats):
def get_db_size(self): def get_db_size(self):
"""Returns the approximate size of the database in bytes""" """Returns the approximate size of the database in bytes"""
def callback(conn, cursor): def callback(_conn, cursor):
cursor.execute("SELECT pg_database_size(current_database())") cursor.execute("SELECT pg_database_size(current_database())")
return cursor.fetchone()[0] return cursor.fetchone()[0]
return self.connmanager.open_and_call(callback) return self.connmanager.open_and_call(callback)

View File

@ -64,7 +64,7 @@ class PostgreSQLTransactionControl(AbstractTransactionControl):
%s, %s, %s, %s,
%s) %s)
""" """
Binary = self._Binary binary = self._Binary
cursor.execute(stmt, (tid, packed, cursor.execute(stmt, (tid, packed,
Binary(username), Binary(description), binary(username), binary(description),
Binary(extension))) binary(extension)))

View File

@ -22,6 +22,9 @@ import time
@implementer(IReplicaSelector) @implementer(IReplicaSelector)
class ReplicaSelector(object): class ReplicaSelector(object):
# The time at which we checked the config
_config_checked = 0
def __init__(self, fn, replica_timeout): def __init__(self, fn, replica_timeout):
self.replica_conf = fn self.replica_conf = fn
self.replica_timeout = replica_timeout self.replica_timeout = replica_timeout

View File

@ -21,8 +21,6 @@ import logging
from ZODB.POSException import StorageError from ZODB.POSException import StorageError
import re
log = logging.getLogger("relstorage") log = logging.getLogger("relstorage")
@ -247,16 +245,16 @@ class AbstractSchemaInstaller(object):
"""Create the database schema if it does not already exist.""" """Create the database schema if it does not already exist."""
# XXX: We can generalize this to handle triggers, procs, etc, # XXX: We can generalize this to handle triggers, procs, etc,
# to make subclasses have easier time. # to make subclasses have easier time.
def callback(conn, cursor): def callback(_conn, cursor):
tables = self.list_tables(cursor) tables = self.list_tables(cursor)
if not 'object_state' in tables: if 'object_state' not in tables:
self.create(cursor) self.create(cursor)
else: else:
self.check_compatibility(cursor, tables) self.check_compatibility(cursor, tables)
self.update_schema(cursor, tables) self.update_schema(cursor, tables)
self.connmanager.open_and_call(callback) self.connmanager.open_and_call(callback)
def check_compatibility(self, cursor, tables): def check_compatibility(self, cursor, tables): # pylint:disable=unused-argument
if self.keep_history: if self.keep_history:
if 'transaction' not in tables and 'current_object' not in tables: if 'transaction' not in tables and 'current_object' not in tables:
raise StorageError( raise StorageError(
@ -271,7 +269,7 @@ class AbstractSchemaInstaller(object):
"can not connect to a history-preserving database. " "can not connect to a history-preserving database. "
"If you need to convert, use the zodbconvert utility." "If you need to convert, use the zodbconvert utility."
) )
if not 'blob_chunk' in tables: if 'blob_chunk' not in tables:
raise StorageError( raise StorageError(
"Schema mismatch; please create the blob_chunk tables." "Schema mismatch; please create the blob_chunk tables."
"See migration instructions for RelStorage 1.5." "See migration instructions for RelStorage 1.5."
@ -320,7 +318,7 @@ class AbstractSchemaInstaller(object):
def drop_all(self): def drop_all(self):
"""Drop all tables and sequences.""" """Drop all tables and sequences."""
def callback(conn, cursor): def callback(_conn, cursor):
existent = set(self.list_tables(cursor)) existent = set(self.list_tables(cursor))
todo = list(self.all_tables) todo = list(self.all_tables)
todo.reverse() todo.reverse()

View File

@ -1 +1 @@
# Tests package.

View File

@ -28,7 +28,7 @@ class RowBatcherTests(unittest.TestCase):
self.assertEqual(batcher.rows_added, 1) self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 0) self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.deletes, self.assertEqual(batcher.deletes,
{('mytable', ('id',)): set([("2",)])}) {('mytable', ('id',)): set([("2",)])})
def test_delete_multiple_column(self): def test_delete_multiple_column(self):
cursor = MockCursor() cursor = MockCursor()
@ -38,7 +38,7 @@ class RowBatcherTests(unittest.TestCase):
self.assertEqual(batcher.rows_added, 1) self.assertEqual(batcher.rows_added, 1)
self.assertEqual(batcher.size_added, 0) self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.deletes, self.assertEqual(batcher.deletes,
{('mytable', ('id', 'tid')): set([("2", "10")])}) {('mytable', ('id', 'tid')): set([("2", "10")])})
def test_delete_auto_flush(self): def test_delete_auto_flush(self):
cursor = MockCursor() cursor = MockCursor()
@ -47,7 +47,7 @@ class RowBatcherTests(unittest.TestCase):
batcher.delete_from("mytable", id=2) batcher.delete_from("mytable", id=2)
batcher.delete_from("mytable", id=1) batcher.delete_from("mytable", id=1)
self.assertEqual(cursor.executed, self.assertEqual(cursor.executed,
[('DELETE FROM mytable WHERE id IN (1,2)', None)]) [('DELETE FROM mytable WHERE id IN (1,2)', None)])
self.assertEqual(batcher.rows_added, 0) self.assertEqual(batcher.rows_added, 0)
self.assertEqual(batcher.size_added, 0) self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.deletes, {}) self.assertEqual(batcher.deletes, {})
@ -130,12 +130,14 @@ class RowBatcherTests(unittest.TestCase):
rowkey=2, rowkey=2,
size=5, size=5,
) )
self.assertEqual(cursor.executed, [( self.assertEqual(
'INSERT INTO mytable (id, name) VALUES\n' cursor.executed,
'(%s, id || %s),\n' [(
'(%s, id || %s)', 'INSERT INTO mytable (id, name) VALUES\n'
(1, 'a', 2, 'B')) '(%s, id || %s),\n'
]) '(%s, id || %s)',
(1, 'a', 2, 'B'))
])
self.assertEqual(batcher.rows_added, 0) self.assertEqual(batcher.rows_added, 0)
self.assertEqual(batcher.size_added, 0) self.assertEqual(batcher.size_added, 0)
self.assertEqual(batcher.inserts, {}) self.assertEqual(batcher.inserts, {})
@ -200,13 +202,15 @@ class OracleRowBatcherTests(unittest.TestCase):
) )
self.assertEqual(cursor.executed, []) self.assertEqual(cursor.executed, [])
batcher.flush() batcher.flush()
self.assertEqual(cursor.executed, [( self.assertEqual(
'INSERT ALL\n' cursor.executed,
'INTO mytable (id, name) VALUES (:id_0, :id_0 || :name_0)\n' [(
'INTO mytable (id, name) VALUES (:id_1, :id_1 || :name_1)\n' 'INSERT ALL\n'
'SELECT * FROM DUAL', 'INTO mytable (id, name) VALUES (:id_0, :id_0 || :name_0)\n'
{'id_0': 1, 'id_1': 2, 'name_1': 'b', 'name_0': 'a'}) 'INTO mytable (id, name) VALUES (:id_1, :id_1 || :name_1)\n'
]) 'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'name_1': 'b', 'name_0': 'a'})
])
def test_insert_one_raw_row(self): def test_insert_one_raw_row(self):
class MockRawType(object): class MockRawType(object):
@ -223,7 +227,7 @@ class OracleRowBatcherTests(unittest.TestCase):
batcher.flush() batcher.flush()
self.assertEqual(cursor.executed, [ self.assertEqual(cursor.executed, [
('INSERT INTO mytable (id, data) VALUES (:id, :rawdata)', ('INSERT INTO mytable (id, data) VALUES (:id, :rawdata)',
{'id': 1, 'rawdata': 'xyz'}) {'id': 1, 'rawdata': 'xyz'})
]) ])
self.assertEqual(cursor.inputsizes, {'rawdata': MockRawType}) self.assertEqual(cursor.inputsizes, {'rawdata': MockRawType})
@ -247,13 +251,15 @@ class OracleRowBatcherTests(unittest.TestCase):
size=3, size=3,
) )
batcher.flush() batcher.flush()
self.assertEqual(cursor.executed, [( self.assertEqual(
'INSERT ALL\n' cursor.executed,
'INTO mytable (id, data) VALUES (:id_0, :rawdata_0)\n' [(
'INTO mytable (id, data) VALUES (:id_1, :rawdata_1)\n' 'INSERT ALL\n'
'SELECT * FROM DUAL', 'INTO mytable (id, data) VALUES (:id_0, :rawdata_0)\n'
{'id_0': 1, 'id_1': 2, 'rawdata_0': 'xyz', 'rawdata_1': 'abc'}) 'INTO mytable (id, data) VALUES (:id_1, :rawdata_1)\n'
]) 'SELECT * FROM DUAL',
{'id_0': 1, 'id_1': 2, 'rawdata_0': 'xyz', 'rawdata_1': 'abc'})
])
self.assertEqual(cursor.inputsizes, { self.assertEqual(cursor.inputsizes, {
'rawdata_0': MockRawType, 'rawdata_0': MockRawType,
'rawdata_1': MockRawType, 'rawdata_1': MockRawType,

View File

@ -46,7 +46,7 @@ class AbstractConnectionManagerTests(unittest.TestCase):
self.assertTrue(conn.rolled_back) self.assertTrue(conn.rolled_back)
conn.replica = 'other' conn.replica = 'other'
self.assertRaises(ReplicaClosedException, self.assertRaises(ReplicaClosedException,
cm.restart_load, conn, MockCursor()) cm.restart_load, conn, MockCursor())
conn = MockConnection() conn = MockConnection()
conn.replica = 'localhost' conn.replica = 'localhost'
@ -54,16 +54,16 @@ class AbstractConnectionManagerTests(unittest.TestCase):
self.assertTrue(conn.rolled_back) self.assertTrue(conn.rolled_back)
conn.replica = 'other' conn.replica = 'other'
self.assertRaises(ReplicaClosedException, self.assertRaises(ReplicaClosedException,
cm.restart_store, conn, MockCursor()) cm.restart_store, conn, MockCursor())
def test_with_ro_replica_conf(self): def test_with_ro_replica_conf(self):
import os import os
import relstorage.tests import relstorage.tests
tests_dir = relstorage.tests.__file__ tests_dir = relstorage.tests.__file__
replica_conf = os.path.join(os.path.dirname(tests_dir), replica_conf = os.path.join(os.path.dirname(tests_dir),
'replicas.conf') 'replicas.conf')
ro_replica_conf = os.path.join(os.path.dirname(tests_dir), ro_replica_conf = os.path.join(os.path.dirname(tests_dir),
'ro_replicas.conf') 'ro_replicas.conf')
options = MockOptions(replica_conf, ro_replica_conf) options = MockOptions(replica_conf, ro_replica_conf)
from relstorage.adapters.connmanager \ from relstorage.adapters.connmanager \
@ -77,7 +77,7 @@ class AbstractConnectionManagerTests(unittest.TestCase):
self.assertTrue(conn.rolled_back) self.assertTrue(conn.rolled_back)
conn.replica = 'other' conn.replica = 'other'
self.assertRaises(ReplicaClosedException, self.assertRaises(ReplicaClosedException,
cm.restart_load, conn, MockCursor()) cm.restart_load, conn, MockCursor())
class MockOptions(object): class MockOptions(object):
@ -87,6 +87,10 @@ class MockOptions(object):
self.replica_timeout = 600.0 self.replica_timeout = 600.0
class MockConnection(object): class MockConnection(object):
rolled_back = False
closed = False
replica = None
def rollback(self): def rollback(self):
self.rolled_back = True self.rolled_back = True
@ -94,6 +98,8 @@ class MockConnection(object):
self.closed = True self.closed = True
class MockCursor(object): class MockCursor(object):
closed = False
def close(self): def close(self):
self.closed = True self.closed = True

View File

@ -33,7 +33,7 @@ class ReplicaSelectorTests(unittest.TestCase):
from relstorage.adapters.replica import ReplicaSelector from relstorage.adapters.replica import ReplicaSelector
rs = ReplicaSelector(self.fn, 600.0) rs = ReplicaSelector(self.fn, 600.0)
self.assertEqual(rs._replicas, self.assertEqual(rs._replicas,
['example.com:1234', 'localhost:4321', 'localhost:9999']) ['example.com:1234', 'localhost:4321', 'localhost:9999'])
def test__read_config_empty(self): def test__read_config_empty(self):
from relstorage.adapters.replica import ReplicaSelector from relstorage.adapters.replica import ReplicaSelector

View File

@ -17,7 +17,7 @@ from tempfile import SpooledTemporaryFile
class AutoTemporaryFile(SpooledTemporaryFile): class AutoTemporaryFile(SpooledTemporaryFile):
# Exists for BWC and to preserve the default threshold # Exists for BWC and to preserve the default threshold
def __init__(self, threshold=10*1024*1024, **kw): def __init__(self, threshold=10 * 1024 * 1024, **kw):
# STF uses >, the old ATF used >= for the max_size check # STF uses >, the old ATF used >= for the max_size check
SpooledTemporaryFile.__init__(self, max_size=threshold - 1, **kw) SpooledTemporaryFile.__init__(self, max_size=threshold - 1, **kw)

View File

@ -323,6 +323,7 @@ class BlobCacheChecker(object):
self.check(True) self.check(True)
_check_blob_size_thread = None _check_blob_size_thread = None
def check(self, check_loaded=False): def check(self, check_loaded=False):
"""If appropriate, run blob cache cleanup in another thread.""" """If appropriate, run blob cache cleanup in another thread."""
if self._blob_cache_size is None: if self._blob_cache_size is None:
@ -341,7 +342,7 @@ class BlobCacheChecker(object):
check_blob_size_thread = threading.Thread( check_blob_size_thread = threading.Thread(
target=_check_blob_cache_size, target=_check_blob_cache_size,
args=(self.blob_dir, target), args=(self.blob_dir, target),
) )
check_blob_size_thread.setDaemon(True) check_blob_size_thread.setDaemon(True)
check_blob_size_thread.start() check_blob_size_thread.start()
self._check_blob_size_thread = check_blob_size_thread self._check_blob_size_thread = check_blob_size_thread

View File

@ -12,24 +12,23 @@
# FOR A PARTICULAR PURPOSE. # FOR A PARTICULAR PURPOSE.
# #
############################################################################## ##############################################################################
from __future__ import absolute_import, print_function, division
""" """
Segmented LRU implementations. Segmented LRU implementations.
""" """
from __future__ import absolute_import, print_function, division
import functools import functools
import itertools import itertools
try: try:
izip = itertools.izip izip = itertools.izip
except AttributeError: except AttributeError:
# Python 3 # Python 3
izip = zip izip = zip # pylint:disable=redefined-variable-type
from relstorage.cache import _cache_ring from relstorage.cache import _cache_ring
ffi = _cache_ring.ffi ffi = _cache_ring.ffi # pylint:disable=no-member
_FFI_RING = _cache_ring.lib _FFI_RING = _cache_ring.lib # pylint:disable=no-member
_ring_move_to_head = _FFI_RING.rsc_ring_move_to_head _ring_move_to_head = _FFI_RING.rsc_ring_move_to_head
_ring_del = _FFI_RING.rsc_ring_del _ring_del = _FFI_RING.rsc_ring_del
@ -174,7 +173,7 @@ class CacheRingNode(object):
def __init__(self, key, value, node=None): def __init__(self, key, value, node=None):
self.key = key self.key = key
self.value = value self.value = value
self._cffi_owning_node = None
# Passing the string is faster than passing a cdecl because we # Passing the string is faster than passing a cdecl because we
# have the string directly in bytecode without a lookup # have the string directly in bytecode without a lookup
if node is None: if node is None:
@ -257,7 +256,7 @@ class CacheRing(object):
PARENT_CONST = 0 PARENT_CONST = 0
def __init__(self, limit): #, _ring_type=ffi.typeof("RSRing")): def __init__(self, limit):
self.limit = limit self.limit = limit
node = self.ring_home = ffi.new("RSRing") node = self.ring_home = ffi.new("RSRing")
node.r_next = node node.r_next = node

View File

@ -16,6 +16,8 @@ from __future__ import absolute_import, print_function, division
from zope.interface import Interface from zope.interface import Interface
from zope.interface import Attribute from zope.interface import Attribute
#pylint: disable=inherit-non-class,no-method-argument,no-self-argument
class IPersistentCache(Interface): class IPersistentCache(Interface):
""" """
A cache that can be persisted to a file (or more generally, a stream) A cache that can be persisted to a file (or more generally, a stream)

View File

@ -160,7 +160,7 @@ class LocalClient(object):
assert isinstance(key, str), (type(key), key) assert isinstance(key, str), (type(key), key)
assert isinstance(value, bytes) assert isinstance(value, bytes)
cvalue = compress(value) if compress else value cvalue = compress(value) if compress else value # pylint:disable=not-callable
if len(cvalue) >= self._value_limit: if len(cvalue) >= self._value_limit:
# This value is too big, so don't cache it. # This value is too big, so don't cache it.

View File

@ -54,6 +54,7 @@ class StorageCache(object):
most global. The first is a LocalClient, which stores the cache most global. The first is a LocalClient, which stores the cache
in the Python process, but shares the cache between threads. in the Python process, but shares the cache between threads.
""" """
# pylint:disable=too-many-instance-attributes,too-many-public-methods
# send_limit: approximate limit on the bytes to buffer before # send_limit: approximate limit on the bytes to buffer before
# sending to the cache. # sending to the cache.
@ -377,6 +378,7 @@ class StorageCache(object):
Fall back to loading from the database. Fall back to loading from the database.
""" """
# pylint:disable=too-many-statements,too-many-branches,too-many-locals
if not self.checkpoints: if not self.checkpoints:
# No poll has occurred yet. For safety, don't use the cache. # No poll has occurred yet. For safety, don't use the cache.
self._trace(0x20, oid_int) self._trace(0x20, oid_int)
@ -534,7 +536,7 @@ class StorageCache(object):
items = [ items = [
(startpos, endpos, oid_int) (startpos, endpos, oid_int)
for (oid_int, (startpos, endpos)) in iteritems(self.queue_contents) for (oid_int, (startpos, endpos)) in iteritems(self.queue_contents)
] ]
items.sort() items.sort()
# Trace these. This is the equivalent of ZEOs # Trace these. This is the equivalent of ZEOs
# ClientStorage._update_cache. # ClientStorage._update_cache.
@ -601,6 +603,7 @@ class StorageCache(object):
prev_tid_int can be None, in which case the changes prev_tid_int can be None, in which case the changes
parameter will be ignored. new_tid_int can not be None. parameter will be ignored. new_tid_int can not be None.
""" """
# pylint:disable=too-many-statements,too-many-branches,too-many-locals
new_checkpoints = None new_checkpoints = None
for client in self.clients_global_first: for client in self.clients_global_first:
s = client.get(self.checkpoints_key) s = client.get(self.checkpoints_key)
@ -658,8 +661,7 @@ class StorageCache(object):
and changes is not None and changes is not None
and prev_tid_int and prev_tid_int
and prev_tid_int <= self.current_tid and prev_tid_int <= self.current_tid
and new_tid_int >= self.current_tid and new_tid_int >= self.current_tid):
):
# All the conditions for keeping the checkpoints were met, # All the conditions for keeping the checkpoints were met,
# so just update self.delta_after0 and self.current_tid. # so just update self.delta_after0 and self.current_tid.
m = self.delta_after0 m = self.delta_after0

View File

@ -15,6 +15,8 @@ from __future__ import print_function, absolute_import, division
from relstorage.options import Options from relstorage.options import Options
# pylint:disable=unused-argument,redefined-variable-type
class MockOptions(Options): class MockOptions(Options):
cache_module_name = '' cache_module_name = ''
cache_servers = '' cache_servers = ''
@ -24,10 +26,11 @@ class MockOptions(Options):
import timeit import timeit
import statistics import statistics # pylint:disable=import-error
try: try:
import sys import sys
import cProfile, pstats import cProfile
import pstats
if '--profile' not in sys.argv: if '--profile' not in sys.argv:
raise ImportError raise ImportError
except ImportError: except ImportError:
@ -66,8 +69,8 @@ def run_and_report_funcs(named_funcs, **kwargs):
for name, func in named_funcs: for name, func in named_funcs:
times[name] = run_func(func, **kwargs) times[name] = run_func(func, **kwargs)
for name, time in sorted(times.items()): for name, _time in sorted(times.items()):
print(name, "average", statistics.mean(time), "stddev", statistics.stdev(time)) print(name, "average", statistics.mean(_time), "stddev", statistics.stdev(_time))
def local_benchmark(): def local_benchmark():
@ -186,7 +189,7 @@ def local_benchmark():
client.reset_stats() client.reset_stats()
hot_keys = key_groups[0] hot_keys = key_groups[0]
i = 0 i = 0
for k, v in ALL_DATA: for _k, v in ALL_DATA:
i += 1 i += 1
client._bucket0[str(i)] = v client._bucket0[str(i)] = v
@ -224,6 +227,7 @@ class StorageTraceSimulator(object):
def _read_binary_records(self, filename, num_clients=8, write_pct=.30, def _read_binary_records(self, filename, num_clients=8, write_pct=.30,
mean_size=10000, stddev_size=512): mean_size=10000, stddev_size=512):
# pylint:disable=too-many-locals
import struct import struct
keys = [] keys = []
i = 0 i = 0
@ -252,11 +256,7 @@ class StorageTraceSimulator(object):
return records return records
def _read_text_records(self, filename): def _read_text_records(self, filename):
try: from relstorage._compat import intern as _intern
from sys import intern as _intern
except ImportError:
# Py2
_intern = intern
records = [] records = []
with self._open_file(filename) as f: with self._open_file(filename) as f:
@ -316,13 +316,14 @@ class StorageTraceSimulator(object):
return stats return stats
def _simulate_storage(self, records, cache_local_mb, f): def _simulate_storage(self, records, cache_local_mb, f):
# pylint:disable=too-many-locals
from relstorage.cache.storage_cache import StorageCache from relstorage.cache.storage_cache import StorageCache
from relstorage.cache.tests.test_cache import MockAdapter from relstorage.cache.tests.test_cache import MockAdapter
from ZODB.utils import p64 from ZODB.utils import p64
TRANSACTION_SIZE = 10 TRANSACTION_SIZE = 10
options = MockOptions() options = MockOptions()
options.cache_local_mb = cache_local_mb options.cache_local_mb = cache_local_mb
options.cache_local_compression = 'none' options.cache_local_compression = 'none'
#options.cache_delta_size_limit = 30000 #options.cache_delta_size_limit = 30000
@ -483,13 +484,12 @@ class StorageTraceSimulator(object):
def save_load_benchmark(): def save_load_benchmark():
# pylint:disable=too-many-locals
from relstorage.cache.mapping import SizedLRUMapping as LocalClientBucket from relstorage.cache.mapping import SizedLRUMapping as LocalClientBucket
from relstorage.cache import persistence as _Loader from relstorage.cache import persistence as _Loader
import os
import itertools import itertools
import sys
sys.setrecursionlimit(500000) sys.setrecursionlimit(500000)
bucket = LocalClientBucket(500*1024*1024) bucket = LocalClientBucket(500*1024*1024)
print("Testing", type(bucket._dict)) print("Testing", type(bucket._dict))
@ -532,8 +532,8 @@ def save_load_benchmark():
b2 = LocalClientBucket(bucket.limit) b2 = LocalClientBucket(bucket.limit)
_Loader.load_local_cache(cache_options, cache_pfx, b2) _Loader.load_local_cache(cache_options, cache_pfx, b2)
run_and_report_funcs( (('write', write), run_and_report_funcs((('write', write),
('read ', load))) ('read ', load)))
for fname in fnames: for fname in fnames:
os.remove(fname) os.remove(fname)

View File

@ -12,6 +12,7 @@
# #
############################################################################## ##############################################################################
from __future__ import print_function, absolute_import, division from __future__ import print_function, absolute_import, division
# pylint:disable=too-many-lines,abstract-method,too-many-public-methods,attribute-defined-outside-init
import unittest import unittest
from relstorage.tests.util import skipOnCI from relstorage.tests.util import skipOnCI
@ -52,7 +53,7 @@ def _check_load_and_store_multiple_files_hit_limit(self, mapping, wrapping_stora
if i > 0: if i > 0:
del mapping[str(i - 1)] del mapping[str(i - 1)]
mapping[str(i)] = b'abc' mapping[str(i)] = b'abc'
mapping[str(i)] # Increment so it gets saved _ = mapping[str(i)] # Increment so it gets saved
persistence.save_local_cache(options, 'test', dump_object) persistence.save_local_cache(options, 'test', dump_object)
self.assertEqual(persistence.count_cache_files(options, 'test'), self.assertEqual(persistence.count_cache_files(options, 'test'),
@ -61,7 +62,7 @@ def _check_load_and_store_multiple_files_hit_limit(self, mapping, wrapping_stora
# make sure it's not in the dict so that even if we find the most recent # make sure it's not in the dict so that even if we find the most recent
# cache file first, we still have something to load. If we don't we can sometimes # cache file first, we still have something to load. If we don't we can sometimes
# find that file and fail to store anything and prematurely break out of the loop # find that file and fail to store anything and prematurely break out of the loop
del mapping[str(i)] del mapping[str(i)] # pylint:disable=undefined-loop-variable
files_loaded = persistence.load_local_cache(options, 'test', dump_object) files_loaded = persistence.load_local_cache(options, 'test', dump_object)
self.assertEqual(files_loaded, 2) self.assertEqual(files_loaded, 2)
@ -178,7 +179,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_delta_after0_hit(self): def test_load_using_delta_after0_hit(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter() adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix') c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60 c.current_tid = 60
@ -229,7 +229,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint0_hit(self): def test_load_using_checkpoint0_hit(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter() adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix') c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60 c.current_tid = 60
@ -240,7 +239,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint0_miss(self): def test_load_using_checkpoint0_miss(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter() adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix') c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60 c.current_tid = 60
@ -252,7 +250,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_delta_after1_hit(self): def test_load_using_delta_after1_hit(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter() adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix') c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60 c.current_tid = 60
@ -265,7 +262,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_delta_after1_miss(self): def test_load_using_delta_after1_miss(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter() adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix') c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60 c.current_tid = 60
@ -278,7 +274,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint1_hit(self): def test_load_using_checkpoint1_hit(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter() adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix') c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60 c.current_tid = 60
@ -290,7 +285,6 @@ class StorageCacheTests(unittest.TestCase):
def test_load_using_checkpoint1_miss(self): def test_load_using_checkpoint1_miss(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
adapter = MockAdapter() adapter = MockAdapter()
c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix') c = self.getClass()(adapter, MockOptionsWithFakeCache(), 'myprefix')
c.current_tid = 60 c.current_tid = 60
@ -317,7 +311,6 @@ class StorageCacheTests(unittest.TestCase):
def test_send_queue_small(self): def test_send_queue_small(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
c = self._makeOne() c = self._makeOne()
c.tpc_begin() c.tpc_begin()
c.store_temp(2, b'abc') c.store_temp(2, b'abc')
@ -332,7 +325,6 @@ class StorageCacheTests(unittest.TestCase):
def test_send_queue_large(self): def test_send_queue_large(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
c = self._makeOne() c = self._makeOne()
c.send_limit = 100 c.send_limit = 100
c.tpc_begin() c.tpc_begin()
@ -347,7 +339,6 @@ class StorageCacheTests(unittest.TestCase):
def test_send_queue_none(self): def test_send_queue_none(self):
from relstorage.tests.fakecache import data from relstorage.tests.fakecache import data
from ZODB.utils import p64
c = self._makeOne() c = self._makeOne()
c.tpc_begin() c.tpc_begin()
tid = p64(55) tid = p64(55)
@ -355,7 +346,6 @@ class StorageCacheTests(unittest.TestCase):
self.assertEqual(data, {}) self.assertEqual(data, {})
def test_after_tpc_finish(self): def test_after_tpc_finish(self):
from ZODB.utils import p64
c = self._makeOne() c = self._makeOne()
c.tpc_begin() c.tpc_begin()
c.after_tpc_finish(p64(55)) c.after_tpc_finish(p64(55))
@ -722,6 +712,7 @@ class SizedLRUMappingTests(unittest.TestCase):
return bio return bio
def test_load_and_store(self, options=None): def test_load_and_store(self, options=None):
# pylint:disable=too-many-statements
from io import BytesIO from io import BytesIO
if options is None: if options is None:
options = MockOptions() options = MockOptions()
@ -748,7 +739,7 @@ class SizedLRUMappingTests(unittest.TestCase):
client1.reset_stats() client1.reset_stats()
client1['def'] = b'123' client1['def'] = b'123'
client1['def'] _ = client1['def']
self.assertEqual(2, len(client1)) self.assertEqual(2, len(client1))
client1_max_size = client1.size client1_max_size = client1.size
self._save(bio, client1, options) self._save(bio, client1, options)
@ -924,6 +915,7 @@ class LocalClientTests(unittest.TestCase):
self.assertEqual(c.get_multi(['k2', 'k3']), {}) self.assertEqual(c.get_multi(['k2', 'k3']), {})
def test_bucket_sizes_without_compression(self): def test_bucket_sizes_without_compression(self):
# pylint:disable=too-many-statements
# LocalClient is a simple w-TinyLRU cache. Confirm it keeps the right keys. # LocalClient is a simple w-TinyLRU cache. Confirm it keeps the right keys.
c = self._makeOne(cache_local_compression='none') c = self._makeOne(cache_local_compression='none')
# This limit will result in # This limit will result in
@ -1053,6 +1045,7 @@ class LocalClientTests(unittest.TestCase):
def test_bucket_sizes_with_compression(self): def test_bucket_sizes_with_compression(self):
# pylint:disable=too-many-statements
c = self._makeOne(cache_local_compression='zlib') c = self._makeOne(cache_local_compression='zlib')
c.limit = 23 * 2 + 1 c.limit = 23 * 2 + 1
c.flush_all() c.flush_all()
@ -1218,7 +1211,7 @@ class LocalClientTests(unittest.TestCase):
self.assertEqual(0, len(cache_files)) self.assertEqual(0, len(cache_files))
# Now lets break saving # Now lets break saving
def badwrite(*args): def badwrite(*_args):
raise OSError("Nope") raise OSError("Nope")
c2._bucket0.write_to_stream = badwrite c2._bucket0.write_to_stream = badwrite
@ -1445,13 +1438,13 @@ class MockAdapter(object):
class MockObjectMover(object): class MockObjectMover(object):
def __init__(self): def __init__(self):
self.data = {} # {oid_int: (state, tid_int)} self.data = {} # {oid_int: (state, tid_int)}
def load_current(self, cursor, oid_int): def load_current(self, _cursor, oid_int):
return self.data.get(oid_int, (None, None)) return self.data.get(oid_int, (None, None))
class MockPoller(object): class MockPoller(object):
def __init__(self): def __init__(self):
self.changes = [] # [(oid, tid)] self.changes = [] # [(oid, tid)]
def list_changes(self, cursor, after_tid, last_tid): def list_changes(self, _cursor, after_tid, last_tid):
return ((oid, tid) for (oid, tid) in self.changes return ((oid, tid) for (oid, tid) in self.changes
if tid > after_tid and tid <= last_tid) if tid > after_tid and tid <= last_tid)

View File

@ -61,7 +61,7 @@ class ZEOTracer(object):
_trace_file_write( _trace_file_write(
_pack( _pack(
_int(now), encoded, _len(oid), tid, end_tid) + oid, _int(now), encoded, _len(oid), tid, end_tid) + oid,
) )
except: # pragma: no cover except: # pragma: no cover
log.exception("Problem writing trace info for %r at tid %r and end tid %r", log.exception("Problem writing trace info for %r at tid %r and end tid %r",
oid, tid, end_tid) oid, tid, end_tid)
@ -78,7 +78,7 @@ class ZEOTracer(object):
with self._lock: with self._lock:
now = time.time() now = time.time()
for startpos, endpos, oid_int in items: for startpos, endpos, oid_int in items:
self._trace(0x52, oid_int, tid_int, dlen=endpos-startpos, now=now) self._trace(0x52, oid_int, tid_int, dlen=endpos - startpos, now=now)
def close(self): def close(self):
self._trace_file.close() self._trace_file.close()

View File

@ -14,12 +14,15 @@
"""ZConfig directive implementations for binding RelStorage to Zope""" """ZConfig directive implementations for binding RelStorage to Zope"""
from __future__ import absolute_import from __future__ import absolute_import
from ZODB.config import BaseConfig
from relstorage.options import Options from relstorage.options import Options
from relstorage.storage import RelStorage from relstorage.storage import RelStorage
class BaseConfig(object):
def __init__(self, config):
self.config = config
self.name = config.getSectionName()
class RelStorageFactory(BaseConfig): class RelStorageFactory(BaseConfig):
"""Open a storage configured via ZConfig""" """Open a storage configured via ZConfig"""
@ -38,8 +41,7 @@ class PostgreSQLAdapterFactory(BaseConfig):
from .adapters.postgresql import PostgreSQLAdapter from .adapters.postgresql import PostgreSQLAdapter
return PostgreSQLAdapter( return PostgreSQLAdapter(
dsn=self.config.dsn, dsn=self.config.dsn,
options=options, options=options)
)
class OracleAdapterFactory(BaseConfig): class OracleAdapterFactory(BaseConfig):
@ -50,8 +52,7 @@ class OracleAdapterFactory(BaseConfig):
user=config.user, user=config.user,
password=config.password, password=config.password,
dsn=config.dsn, dsn=config.dsn,
options=options, options=options)
)
class MySQLAdapterFactory(BaseConfig): class MySQLAdapterFactory(BaseConfig):

View File

@ -18,7 +18,7 @@ to zope.conf and set the 'cache-servers' parameter as well.
""" """
import pylibmc import pylibmc
from _pylibmc import MemcachedError # pylibmc >= 0.9 from pylibmc import Error as MemcachedError
import logging import logging
from functools import wraps from functools import wraps

View File

@ -17,6 +17,8 @@ Stores pickles in the database.
""" """
from __future__ import absolute_import, print_function from __future__ import absolute_import, print_function
# pylint:disable=too-many-lines
from ZODB import ConflictResolution from ZODB import ConflictResolution
from ZODB.BaseStorage import DataRecord from ZODB.BaseStorage import DataRecord
@ -39,7 +41,7 @@ from ZODB.utils import u64
from perfmetrics import Metric from perfmetrics import Metric
from perfmetrics import metricmethod from perfmetrics import metricmethod
from persistent.TimeStamp import TimeStamp from persistent.TimeStamp import TimeStamp # pylint:disable=import-error
from relstorage.blobhelper import BlobHelper from relstorage.blobhelper import BlobHelper
from relstorage.cache import StorageCache from relstorage.cache import StorageCache
@ -107,6 +109,8 @@ class RelStorage(UndoLogCompatible,
ConflictResolution.ConflictResolvingStorage): ConflictResolution.ConflictResolvingStorage):
"""Storage to a relational database, based on invalidation polling""" """Storage to a relational database, based on invalidation polling"""
# pylint:disable=too-many-public-methods,too-many-instance-attributes
_transaction = None # Transaction that is being committed _transaction = None # Transaction that is being committed
_tstatus = ' ' # Transaction status, used for copying data _tstatus = ' ' # Transaction status, used for copying data
_is_read_only = False _is_read_only = False
@ -187,6 +191,7 @@ class RelStorage(UndoLogCompatible,
# objects don't need to. # objects don't need to.
_use_locks=True, _use_locks=True,
**kwoptions): **kwoptions):
# pylint:disable=too-many-branches
self._adapter = adapter self._adapter = adapter
if options is None: if options is None:
@ -550,6 +555,7 @@ class RelStorage(UndoLogCompatible,
@Metric(method=True, rate=0.1) @Metric(method=True, rate=0.1)
def load(self, oid, version=''): def load(self, oid, version=''):
# pylint:disable=unused-argument
if self._stale_error is not None: if self._stale_error is not None:
raise self._stale_error raise self._stale_error
@ -634,7 +640,8 @@ class RelStorage(UndoLogCompatible,
if state is None: if state is None:
# This can happen if something attempts to load # This can happen if something attempts to load
# an object whose creation has been undone, see load() # an object whose creation has been undone, see load()
# This change fixes the test in TransactionalUndoStorage.checkUndoCreationBranch1 # This change fixes the test in
# TransactionalUndoStorage.checkUndoCreationBranch1
# self._log_keyerror doesn't work here, only in certain states. # self._log_keyerror doesn't work here, only in certain states.
raise POSKeyError(oid) raise POSKeyError(oid)
end_int = self._adapter.mover.get_object_tid_after( end_int = self._adapter.mover.get_object_tid_after(
@ -687,6 +694,7 @@ class RelStorage(UndoLogCompatible,
# Like store(), but used for importing transactions. See the # Like store(), but used for importing transactions. See the
# comments in FileStorage.restore(). The prev_txn optimization # comments in FileStorage.restore(). The prev_txn optimization
# is not used. # is not used.
# pylint:disable=unused-argument
if self._stale_error is not None: if self._stale_error is not None:
raise self._stale_error raise self._stale_error
@ -884,6 +892,7 @@ class RelStorage(UndoLogCompatible,
Returns a sequence of OIDs that were resolved to be received by Returns a sequence of OIDs that were resolved to be received by
Connection._handle_serial(). Connection._handle_serial().
""" """
# pylint:disable=too-many-locals
assert self._tid is not None assert self._tid is not None
cursor = self._store_cursor cursor = self._store_cursor
adapter = self._adapter adapter = self._adapter
@ -927,8 +936,9 @@ class RelStorage(UndoLogCompatible,
txn_has_blobs = self.blobhelper.txn_has_blobs txn_has_blobs = self.blobhelper.txn_has_blobs
else: else:
txn_has_blobs = False txn_has_blobs = False
oid_ints = adapter.mover.move_from_temp(cursor, tid_int, txn_has_blobs)
# This returns the OID ints stored, but we don't use them here
adapter.mover.move_from_temp(cursor, tid_int, txn_has_blobs)
return resolved return resolved
@ -1087,7 +1097,7 @@ class RelStorage(UndoLogCompatible,
if self._preallocated_oids: if self._preallocated_oids:
oid_int = self._preallocated_oids.pop() oid_int = self._preallocated_oids.pop()
else: else:
def f(conn, cursor): def f(_conn, cursor):
return list(self._adapter.oidallocator.new_oids(cursor)) return list(self._adapter.oidallocator.new_oids(cursor))
preallocated = self._with_store(f) preallocated = self._with_store(f)
preallocated.sort(reverse=True) preallocated.sort(reverse=True)
@ -1103,6 +1113,7 @@ class RelStorage(UndoLogCompatible,
return False return False
def modifiedInVersion(self, oid): def modifiedInVersion(self, oid):
# pylint:disable=unused-argument
return '' return ''
def supportsUndo(self): def supportsUndo(self):
@ -1113,6 +1124,7 @@ class RelStorage(UndoLogCompatible,
@metricmethod @metricmethod
def undoLog(self, first=0, last=-20, filter=None): def undoLog(self, first=0, last=-20, filter=None):
# pylint:disable=too-many-locals
if self._stale_error is not None: if self._stale_error is not None:
raise self._stale_error raise self._stale_error
if last < 0: if last < 0:
@ -1135,7 +1147,7 @@ class RelStorage(UndoLogCompatible,
# This is largely cleaned up with transaction 2.0/ZODB 5, where the storage # This is largely cleaned up with transaction 2.0/ZODB 5, where the storage
# interface is defined in terms of bytes only. # interface is defined in terms of bytes only.
d = { d = {
'id': base64_encodebytes(tid)[:-1], 'id': base64_encodebytes(tid)[:-1], # pylint:disable=deprecated-method
'time': TimeStamp(tid).timeTime(), 'time': TimeStamp(tid).timeTime(),
'user_name': user or b'', 'user_name': user or b'',
'description': desc or b'', 'description': desc or b'',
@ -1156,6 +1168,7 @@ class RelStorage(UndoLogCompatible,
@metricmethod @metricmethod
def history(self, oid, version=None, size=1, filter=None): def history(self, oid, version=None, size=1, filter=None):
# pylint:disable=unused-argument,too-many-locals
if self._stale_error is not None: if self._stale_error is not None:
raise self._stale_error raise self._stale_error
with self._lock: with self._lock:
@ -1205,7 +1218,7 @@ class RelStorage(UndoLogCompatible,
if transaction is not self._transaction: if transaction is not self._transaction:
raise StorageTransactionError(self, transaction) raise StorageTransactionError(self, transaction)
undo_tid = base64_decodebytes(transaction_id + b'\n') undo_tid = base64_decodebytes(transaction_id + b'\n') # pylint:disable=deprecated-method
assert len(undo_tid) == 8 assert len(undo_tid) == 8
undo_tid_int = u64(undo_tid) undo_tid_int = u64(undo_tid)
@ -1246,6 +1259,7 @@ class RelStorage(UndoLogCompatible,
@metricmethod @metricmethod
def pack(self, t, referencesf, prepack_only=False, skip_prepack=False, def pack(self, t, referencesf, prepack_only=False, skip_prepack=False,
sleep=None): sleep=None):
# pylint:disable=too-many-branches
if self._is_read_only: if self._is_read_only:
raise ReadOnlyError() raise ReadOnlyError()
@ -1468,6 +1482,7 @@ class RelStorage(UndoLogCompatible,
self.blobhelper.restoreBlob(cursor, oid, serial, blobfilename) self.blobhelper.restoreBlob(cursor, oid, serial, blobfilename)
def copyTransactionsFrom(self, other): def copyTransactionsFrom(self, other):
# pylint:disable=too-many-locals
# adapted from ZODB.blob.BlobStorageMixin # adapted from ZODB.blob.BlobStorageMixin
begin_time = time.time() begin_time = time.time()
txnum = 0 txnum = 0

View File

@ -15,6 +15,7 @@
# This is copied from ZODB.tests.RecoveryStorage and expanded to fit # This is copied from ZODB.tests.RecoveryStorage and expanded to fit
# history-free storages. # history-free storages.
# pylint:disable=no-member,too-many-locals
from ZODB.blob import is_blob_record from ZODB.blob import is_blob_record
from transaction import Transaction from transaction import Transaction
@ -117,7 +118,7 @@ class IteratorDeepCompare(object):
in src. Also note that the dest does not retain transaction in src. Also note that the dest does not retain transaction
metadata. metadata.
""" """
missing = object()
src_objects = {} # {oid: (tid, data, blob or None)} src_objects = {} # {oid: (tid, data, blob or None)}
for txn in src.iterator(): for txn in src.iterator():
for rec in txn: for rec in txn:
@ -195,10 +196,10 @@ class BasicRecoveryStorage(IteratorDeepCompare):
txn.commit() txn.commit()
# Now pack the destination. # Now pack the destination.
snooze() snooze()
self._dst.pack(time.time(), referencesf) self._dst.pack(time.time(), referencesf)
# And check to see that the root object exists, but not the other # And check to see that the root object exists, but not the other
# objects. # objects.
data, serial = self._dst.load(root._p_oid, '') _data, _serial = self._dst.load(root._p_oid, '')
raises(KeyError, self._dst.load, obj1._p_oid, '') raises(KeyError, self._dst.load, obj1._p_oid, '')
raises(KeyError, self._dst.load, obj2._p_oid, '') raises(KeyError, self._dst.load, obj2._p_oid, '')
@ -232,9 +233,9 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
db = DB(self._storage) db = DB(self._storage)
c = db.open() c = db.open()
r = c.root() r = c.root()
obj = r["obj1"] = MinPO(1) r["obj1"] = MinPO(1)
transaction.commit() transaction.commit()
obj = r["obj2"] = MinPO(1) r["obj2"] = MinPO(1)
transaction.commit() transaction.commit()
self._dst.copyTransactionsFrom(self._storage) self._dst.copyTransactionsFrom(self._storage)
@ -248,6 +249,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
# Get the last transaction and its record iterator. Record iterators # Get the last transaction and its record iterator. Record iterators
# can't be accessed out-of-order, so we need to do this in a bit # can't be accessed out-of-order, so we need to do this in a bit
# complicated way: # complicated way:
final = None
for final in it: for final in it:
records = list(final) records = list(final)
@ -259,6 +261,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
self._dst.tpc_finish(final) self._dst.tpc_finish(final)
def checkRestoreWithMultipleObjectsInUndoRedo(self): def checkRestoreWithMultipleObjectsInUndoRedo(self):
# pylint:disable=too-many-statements
from ZODB.FileStorage import FileStorage from ZODB.FileStorage import FileStorage
# Undo creates backpointers in (at least) FileStorage. ZODB 3.2.1 # Undo creates backpointers in (at least) FileStorage. ZODB 3.2.1
@ -314,7 +317,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
tid = info[0]['id'] tid = info[0]['id']
t = Transaction() t = Transaction()
self._storage.tpc_begin(t) self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t) _oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t) self._storage.tpc_vote(t)
self._storage.tpc_finish(t) self._storage.tpc_finish(t)
@ -338,7 +341,7 @@ class UndoableRecoveryStorage(BasicRecoveryStorage):
tid = info[0]['id'] tid = info[0]['id']
t = Transaction() t = Transaction()
self._storage.tpc_begin(t) self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t) _oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t) self._storage.tpc_vote(t)
self._storage.tpc_finish(t) self._storage.tpc_finish(t)

View File

@ -32,7 +32,7 @@ def bigmark():
for i in xrange(1, oid_count): for i in xrange(1, oid_count):
if random() < 0.2: if random() < 0.2:
refs = [] refs = []
for j in range(randint(0, 20)): for _ in range(randint(0, 20)):
refs.append((i * k, randint(0, oid_count) * k)) refs.append((i * k, randint(0, oid_count) * k))
marker.add_refs(refs) marker.add_refs(refs)
refcount += len(refs) refcount += len(refs)

View File

@ -79,7 +79,7 @@ directory.
>>> def onfail(): >>> def onfail():
... return cache_size('blobs') ... return cache_size('blobs')
>>> from relstorage.tests.util import wait_until >>> from ZEO.tests.forker import wait_until
>>> wait_until("size is reduced", check, 99, onfail) >>> wait_until("size is reduced", check, 99, onfail)
If we read all of the blobs, data will be downloaded again, as If we read all of the blobs, data will be downloaded again, as

View File

@ -49,11 +49,11 @@ def new_time():
the packing time actually is before the commit time. the packing time actually is before the commit time.
""" """
now = new_time = time.time() now = anew_time = time.time()
while new_time <= now: while anew_time <= now:
new_time = time.time() anew_time = time.time()
time.sleep(1) time.sleep(1)
return new_time return anew_time
with open(__file__, 'rb') as _f: with open(__file__, 'rb') as _f:
@ -85,21 +85,21 @@ def random_file(size, fd):
b.rotate(1) b.rotate(1)
datagen = fdata() datagen = fdata()
bytes = 0 bytes = 0
md5sum = md5() hasher = md5()
while bytes < size: while bytes < size:
data = next(datagen) data = next(datagen)
md5sum.update(data) hasher.update(data)
fd.write(data) fd.write(data)
bytes += len(data) bytes += len(data)
return md5sum.hexdigest() return hasher.hexdigest()
def md5sum(fd): def md5sum(fd):
md5sum = md5() hasher = md5()
blocksize = md5sum.block_size << 8 blocksize = hasher.block_size << 8
for data in iter(lambda: fd.read(blocksize), b''): for data in iter(lambda: fd.read(blocksize), b''):
md5sum.update(data) hasher.update(data)
return md5sum.hexdigest() return hasher.hexdigest()
def sizeof_fmt(num): def sizeof_fmt(num):
@ -113,7 +113,7 @@ class BlobTestBase(ZODB.tests.StorageTestBase.StorageTestBase):
def setUp(self): def setUp(self):
ZODB.tests.StorageTestBase.StorageTestBase.setUp(self) ZODB.tests.StorageTestBase.StorageTestBase.setUp(self)
self._storage = self.create_storage() self._storage = self.create_storage() # pylint:disable=no-member
class BlobUndoTests(BlobTestBase): class BlobUndoTests(BlobTestBase):
@ -249,7 +249,7 @@ class RecoveryBlobStorage(BlobTestBase,
def setUp(self): def setUp(self):
BlobTestBase.setUp(self) BlobTestBase.setUp(self)
self._dst = self.create_storage('dest') self._dst = self.create_storage('dest') # pylint:disable=no-member
def tearDown(self): def tearDown(self):
self._dst.close() self._dst.close()
@ -502,35 +502,35 @@ def do_not_depend_on_cwd():
>>> bs.close() >>> bs.close()
""" """
if False: # if False:
# ZODB 3.8 fails this test because it creates a single # # ZODB 3.8 fails this test because it creates a single
# 'savepoints' directory. # # 'savepoints' directory.
def savepoint_isolation(): # def savepoint_isolation():
"""Make sure savepoint data is distinct accross transactions # """Make sure savepoint data is distinct accross transactions
>>> bs = create_storage() # >>> bs = create_storage()
>>> db = DB(bs) # >>> db = DB(bs)
>>> conn = db.open() # >>> conn = db.open()
>>> conn.root().b = ZODB.blob.Blob() # >>> conn.root().b = ZODB.blob.Blob()
>>> conn.root().b.open('w').write('initial') # >>> conn.root().b.open('w').write('initial')
>>> transaction.commit() # >>> transaction.commit()
>>> conn.root().b.open('w').write('1') # >>> conn.root().b.open('w').write('1')
>>> _ = transaction.savepoint() # >>> _ = transaction.savepoint()
>>> tm = transaction.TransactionManager() # >>> tm = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm) # >>> conn2 = db.open(transaction_manager=tm)
>>> conn2.root().b.open('w').write('2') # >>> conn2.root().b.open('w').write('2')
>>> _ = tm.savepoint() # >>> _ = tm.savepoint()
>>> conn.root().b.open().read() # >>> conn.root().b.open().read()
'1' # '1'
>>> conn2.root().b.open().read() # >>> conn2.root().b.open().read()
'2' # '2'
>>> transaction.abort() # >>> transaction.abort()
>>> tm.commit() # >>> tm.commit()
>>> conn.sync() # >>> conn.sync()
>>> conn.root().b.open().read() # >>> conn.root().b.open().read()
'2' # '2'
>>> db.close() # >>> db.close()
""" # """
def savepoint_cleanup(): def savepoint_cleanup():
"""Make sure savepoint data gets cleaned up. """Make sure savepoint data gets cleaned up.
@ -651,6 +651,7 @@ def storage_reusable_suite(prefix, factory,
Pass a factory taking a name and a blob directory name. Pass a factory taking a name and a blob directory name.
""" """
# pylint:disable=unused-argument
def setup(test): def setup(test):
setUp(test) setUp(test)
def create_storage(name='data', blob_dir=None, **kw): def create_storage(name='data', blob_dir=None, **kw):

View File

@ -17,7 +17,7 @@ from relstorage.tests.RecoveryStorage import BasicRecoveryStorage
from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage
from relstorage.tests.reltestbase import GenericRelStorageTests from relstorage.tests.reltestbase import GenericRelStorageTests
from relstorage.tests.reltestbase import RelStorageTestBase from relstorage.tests.reltestbase import RelStorageTestBase
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage from ZODB.FileStorage import FileStorage
from ZODB.serialize import referencesf from ZODB.serialize import referencesf
from ZODB.tests.ConflictResolution import PCounter from ZODB.tests.ConflictResolution import PCounter
@ -27,13 +27,12 @@ from ZODB.tests.PackableStorage import Root
from ZODB.tests.PackableStorage import ZERO from ZODB.tests.PackableStorage import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle from ZODB.tests.StorageTestBase import zodb_pickle
from ZODB.tests.StorageTestBase import zodb_unpickle from ZODB.tests.StorageTestBase import zodb_unpickle
from relstorage._compat import loads
import time import time
class HistoryFreeRelStorageTests( class HistoryFreeRelStorageTests(GenericRelStorageTests):
GenericRelStorageTests, # pylint:disable=too-many-ancestors,abstract-method,too-many-locals,too-many-statements
):
keep_history = False keep_history = False
@ -41,6 +40,7 @@ class HistoryFreeRelStorageTests(
# collects garbage but does not retain old versions. # collects garbage but does not retain old versions.
def checkPackAllRevisions(self): def checkPackAllRevisions(self):
from relstorage._compat import loads
self._initroot() self._initroot()
eq = self.assertEqual eq = self.assertEqual
raises = self.assertRaises raises = self.assertRaises
@ -235,7 +235,7 @@ class HistoryFreeRelStorageTests(
s1.poll_invalidations() s1.poll_invalidations()
# commit a change # commit a change
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) _revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# commit a conflicting change using s1 # commit a conflicting change using s1
main_storage = self._storage main_storage = self._storage
@ -243,12 +243,12 @@ class HistoryFreeRelStorageTests(
try: try:
# we can resolve this conflict because s1 has an open # we can resolve this conflict because s1 has an open
# transaction that can read the old state of the object. # transaction that can read the old state of the object.
revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) _revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
s1.release() s1.release()
finally: finally:
self._storage = main_storage self._storage = main_storage
data, serialno = self._storage.load(oid, '') data, _serialno = self._storage.load(oid, '')
inst = zodb_unpickle(data) inst = zodb_unpickle(data)
self.assertEqual(inst._value, 5) self.assertEqual(inst._value, 5)
@ -282,11 +282,9 @@ class HistoryFreeRelStorageTests(
db.close() db.close()
class HistoryFreeToFileStorage( class HistoryFreeToFileStorage(RelStorageTestBase,
RelStorageTestBase, BasicRecoveryStorage):
BasicRecoveryStorage, # pylint:disable=abstract-method,too-many-ancestors
):
keep_history = False keep_history = False
def setUp(self): def setUp(self):
@ -303,11 +301,9 @@ class HistoryFreeToFileStorage(
return FileStorage('Dest.fs') return FileStorage('Dest.fs')
class HistoryFreeFromFileStorage( class HistoryFreeFromFileStorage(RelStorageTestBase,
RelStorageTestBase, UndoableRecoveryStorage):
UndoableRecoveryStorage, # pylint:disable=abstract-method,too-many-ancestors
):
keep_history = False keep_history = False
def setUp(self): def setUp(self):

View File

@ -17,7 +17,7 @@ from persistent.mapping import PersistentMapping
from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage from relstorage.tests.RecoveryStorage import UndoableRecoveryStorage
from relstorage.tests.reltestbase import GenericRelStorageTests from relstorage.tests.reltestbase import GenericRelStorageTests
from relstorage.tests.reltestbase import RelStorageTestBase from relstorage.tests.reltestbase import RelStorageTestBase
from relstorage._compat import TRANSACTION_DATA_IS_TEXT
from ZODB.DB import DB from ZODB.DB import DB
from ZODB.FileStorage import FileStorage from ZODB.FileStorage import FileStorage
from ZODB.serialize import referencesf from ZODB.serialize import referencesf
@ -34,16 +34,14 @@ import transaction
import unittest import unittest
class HistoryPreservingRelStorageTests( class HistoryPreservingRelStorageTests(GenericRelStorageTests,
GenericRelStorageTests, TransactionalUndoStorage.TransactionalUndoStorage,
TransactionalUndoStorage.TransactionalUndoStorage, IteratorStorage.IteratorStorage,
IteratorStorage.IteratorStorage, IteratorStorage.ExtendedIteratorStorage,
IteratorStorage.ExtendedIteratorStorage, RevisionStorage.RevisionStorage,
RevisionStorage.RevisionStorage, PackableStorage.PackableUndoStorage,
PackableStorage.PackableUndoStorage, HistoryStorage.HistoryStorage):
HistoryStorage.HistoryStorage, # pylint:disable=too-many-ancestors,abstract-method,too-many-locals
):
keep_history = True keep_history = True
def checkUndoMultipleConflictResolution(self, *_args, **_kwargs): def checkUndoMultipleConflictResolution(self, *_args, **_kwargs):
@ -144,7 +142,7 @@ class HistoryPreservingRelStorageTests(
if isinstance(ugly_string, bytes): if isinstance(ugly_string, bytes):
# Always text. Use latin 1 because it can decode any arbitrary # Always text. Use latin 1 because it can decode any arbitrary
# bytes. # bytes.
ugly_string = ugly_string.decode('latin-1') ugly_string = ugly_string.decode('latin-1') # pylint:disable=redefined-variable-type
# The storage layer is defined to take bytes (implicitly in # The storage layer is defined to take bytes (implicitly in
# older ZODB releases, explicitly in ZODB 5.something), but historically # older ZODB releases, explicitly in ZODB 5.something), but historically
@ -211,7 +209,7 @@ class HistoryPreservingRelStorageTests(
def checkPackGCReusePrePackData(self): def checkPackGCReusePrePackData(self):
self._storage = self.make_storage(pack_prepack_only=True) self._storage = self.make_storage(pack_prepack_only=True)
oid = self.checkPackGC(expect_object_deleted=False,close=False) oid = self.checkPackGC(expect_object_deleted=False, close=False)
# We now have pre-pack analysis data # We now have pre-pack analysis data
self._storage._options.pack_prepack_only = False self._storage._options.pack_prepack_only = False
self._storage.pack(0, referencesf, skip_prepack=True) self._storage.pack(0, referencesf, skip_prepack=True)
@ -250,7 +248,9 @@ class HistoryPreservingRelStorageTests(
db.close() db.close()
def checkHistoricalConnection(self): def checkHistoricalConnection(self):
import datetime, persistent, ZODB.POSException import datetime
import persistent
import ZODB.POSException
db = DB(self._storage) db = DB(self._storage)
conn = db.open() conn = db.open()
root = conn.root() root = conn.root()
@ -300,11 +300,9 @@ class HistoryPreservingRelStorageTests(
self.assertFalse(ZODB.interfaces.IExternalGC.providedBy(self._storage)) self.assertFalse(ZODB.interfaces.IExternalGC.providedBy(self._storage))
self.assertRaises(AttributeError, self._storage.deleteObject) self.assertRaises(AttributeError, self._storage.deleteObject)
class HistoryPreservingToFileStorage( class HistoryPreservingToFileStorage(RelStorageTestBase,
RelStorageTestBase, UndoableRecoveryStorage):
UndoableRecoveryStorage, # pylint:disable=too-many-ancestors,abstract-method,too-many-locals
):
keep_history = True keep_history = True
def setUp(self): def setUp(self):
@ -321,11 +319,9 @@ class HistoryPreservingToFileStorage(
return FileStorage('Dest.fs') return FileStorage('Dest.fs')
class HistoryPreservingFromFileStorage( class HistoryPreservingFromFileStorage(RelStorageTestBase,
RelStorageTestBase, UndoableRecoveryStorage):
UndoableRecoveryStorage, # pylint:disable=too-many-ancestors,abstract-method,too-many-locals
):
keep_history = True keep_history = True
def setUp(self): def setUp(self):

View File

@ -14,33 +14,27 @@ logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG) logging.getLogger().setLevel(logging.DEBUG)
use = 'oracle' use = 'oracle'
keep_history=True keep_history = True
# pylint:disable=redefined-variable-type
if use == 'mysql': if use == 'mysql':
from relstorage.adapters.mysql import MySQLAdapter from relstorage.adapters.mysql import MySQLAdapter
a = MySQLAdapter( a = MySQLAdapter(db='packtest',
db='packtest', user='relstoragetest',
user='relstoragetest', passwd='relstoragetest',
passwd='relstoragetest', options=Options(keep_history=keep_history),)
options=Options(keep_history=keep_history),
)
elif use == 'postgresql': elif use == 'postgresql':
from relstorage.adapters.postgresql import PostgreSQLAdapter from relstorage.adapters.postgresql import PostgreSQLAdapter
a = PostgreSQLAdapter(dsn= a = PostgreSQLAdapter(dsn="dbname='packtest' "
"dbname='packtest' " 'user=relstoragetest '
'user=relstoragetest ' 'password=relstoragetest',
'password=relstoragetest', options=Options(keep_history=keep_history),)
options=Options(keep_history=keep_history),
)
elif use == 'oracle': elif use == 'oracle':
from relstorage.adapters.oracle import OracleAdapter from relstorage.adapters.oracle import OracleAdapter
dsn = os.environ.get('ORACLE_TEST_DSN', 'XE') dsn = os.environ.get('ORACLE_TEST_DSN', 'XE')
a = OracleAdapter( a = OracleAdapter(user='packtest',
user='packtest', password='relstoragetest',
password='relstoragetest', dsn=dsn,
dsn=dsn, options=Options(keep_history=keep_history),)
options=Options(keep_history=keep_history),
)
else: else:
raise AssertionError("which database?") raise AssertionError("which database?")
@ -51,26 +45,26 @@ c = d.open()
print('size:') print('size:')
print(d.getSize()) print(d.getSize())
if 1:
print('initializing...') print('initializing...')
container = PersistentMapping() container = PersistentMapping()
c.root()['container'] = container c.root()['container'] = container
container_size = 10000 container_size = 10000
for i in range(container_size): for i in range(container_size):
container[i] = PersistentMapping() container[i] = PersistentMapping()
transaction.commit()
print('generating transactions...')
for trans in range(100):
print(trans)
sources = (random.randint(0, container_size - 1) for j in range(100))
for source in sources:
obj = container[source]
obj[trans] = container[random.randint(0, container_size - 1)]
transaction.commit() transaction.commit()
print('generating transactions...') print('size:')
for trans in range(100): print(d.getSize())
print(trans)
sources = (random.randint(0, container_size - 1) for j in range(100))
for source in sources:
obj = container[source]
obj[trans] = container[random.randint(0, container_size - 1)]
transaction.commit()
print('size:')
print(d.getSize())
print('packing...') print('packing...')
d.pack() d.pack()

View File

@ -12,7 +12,7 @@
# #
############################################################################## ##############################################################################
"""A foundation for RelStorage tests""" """A foundation for RelStorage tests"""
# pylint:disable=too-many-ancestors,abstract-method,too-many-public-methods
from ZODB.DB import DB from ZODB.DB import DB
from ZODB.POSException import ReadConflictError from ZODB.POSException import ReadConflictError
from ZODB.serialize import referencesf from ZODB.serialize import referencesf
@ -50,8 +50,9 @@ class StorageCreatingMixin(object):
return storage return storage
def make_storage(self, zap=True, **kw): def make_storage(self, zap=True, **kw):
if ('cache_servers' not in kw and 'cache_module_name' not in kw if ('cache_servers' not in kw
and kw.get('share_local_cache', True)): and 'cache_module_name' not in kw
and kw.get('share_local_cache', True)):
if util.CACHE_SERVERS and util.CACHE_MODULE_NAME: if util.CACHE_SERVERS and util.CACHE_MODULE_NAME:
kw['cache_servers'] = util.CACHE_SERVERS kw['cache_servers'] = util.CACHE_SERVERS
kw['cache_module_name'] = util.CACHE_MODULE_NAME kw['cache_module_name'] = util.CACHE_MODULE_NAME
@ -288,7 +289,7 @@ class GenericRelStorageTests(
data = b'a 16 byte string' * (1024 * 1024) data = b'a 16 byte string' * (1024 * 1024)
oid = self._storage.new_oid() oid = self._storage.new_oid()
self._dostoreNP(oid, data=data) self._dostoreNP(oid, data=data)
got, serialno = self._storage.load(oid, '') got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data)) self.assertEqual(len(got), len(data))
self.assertEqual(got, data) self.assertEqual(got, data)
@ -296,19 +297,18 @@ class GenericRelStorageTests(
# Store 99 objects each with 1900 bytes. This is intended # Store 99 objects each with 1900 bytes. This is intended
# to exercise possible buffer overfilling that the batching # to exercise possible buffer overfilling that the batching
# code might cause. # code might cause.
import transaction
data = b'0123456789012345678' * 100 data = b'0123456789012345678' * 100
t = transaction.Transaction() t = transaction.Transaction()
self._storage.tpc_begin(t) self._storage.tpc_begin(t)
oids = [] oids = []
for i in range(99): for _ in range(99):
oid = self._storage.new_oid() oid = self._storage.new_oid()
self._storage.store(oid, b'\0'*8, data, '', t) self._storage.store(oid, b'\0'*8, data, '', t)
oids.append(oid) oids.append(oid)
self._storage.tpc_vote(t) self._storage.tpc_vote(t)
self._storage.tpc_finish(t) self._storage.tpc_finish(t)
for oid in oids: for oid in oids:
got, serialno = self._storage.load(oid, '') got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data)) self.assertEqual(len(got), len(data))
self.assertEqual(got, data) self.assertEqual(got, data)
@ -610,7 +610,7 @@ class GenericRelStorageTests(
# extra1 should have been garbage collected # extra1 should have been garbage collected
self.assertRaises(KeyError, self.assertRaises(KeyError,
self._storage.load, extra1._p_oid, '') self._storage.load, extra1._p_oid, '')
# extra2 and extra3 should both still exist # extra2 and extra3 should both still exist
self._storage.load(extra2._p_oid, '') self._storage.load(extra2._p_oid, '')
self._storage.load(extra3._p_oid, '') self._storage.load(extra3._p_oid, '')
@ -832,6 +832,7 @@ class AbstractRSZodbConvertTests(StorageCreatingMixin,
keep_history = True keep_history = True
filestorage_name = 'source' filestorage_name = 'source'
relstorage_name = 'destination' relstorage_name = 'destination'
filestorage_file = None
def _relstorage_contents(self): def _relstorage_contents(self):
raise NotImplementedError() raise NotImplementedError()
@ -905,5 +906,5 @@ class DoubleCommitter(Persistent):
"""A crazy persistent class that changes self in __getstate__""" """A crazy persistent class that changes self in __getstate__"""
def __getstate__(self): def __getstate__(self):
if not hasattr(self, 'new_attribute'): if not hasattr(self, 'new_attribute'):
self.new_attribute = 1 self.new_attribute = 1 # pylint:disable=attribute-defined-outside-init
return Persistent.__getstate__(self) return Persistent.__getstate__(self)

View File

@ -1,11 +1,12 @@
"""Tests of relstorage.blobhelper""" """Tests of relstorage.blobhelper"""
# pylint:disable=too-many-public-methods,unused-argument
from relstorage.tests.util import support_blob_cache from relstorage.tests.util import support_blob_cache
import os import os
import unittest import unittest
import tempfile
from ZODB.blob import remove_committed_dir
from relstorage._compat import PY3 from relstorage._compat import PY3
from relstorage._compat import dumps
test_oid = b'\0' * 7 + b'\x01' test_oid = b'\0' * 7 + b'\x01'
test_tid = b'\0' * 7 + b'\x02' test_tid = b'\0' * 7 + b'\x02'
@ -14,11 +15,11 @@ test_tid = b'\0' * 7 + b'\x02'
class BlobHelperTest(unittest.TestCase): class BlobHelperTest(unittest.TestCase):
def setUp(self): def setUp(self):
import tempfile self.uploaded = None
self.blob_dir = tempfile.mkdtemp() self.blob_dir = tempfile.mkdtemp()
def tearDown(self): def tearDown(self):
from ZODB.blob import remove_committed_dir
remove_committed_dir(self.blob_dir) remove_committed_dir(self.blob_dir)
def _class(self): def _class(self):
@ -157,7 +158,7 @@ class BlobHelperTest(unittest.TestCase):
obj = self._make_default(shared=False) obj = self._make_default(shared=False)
with obj.openCommittedBlobFile(None, test_oid, test_tid) as f: with obj.openCommittedBlobFile(None, test_oid, test_tid) as f:
if not PY3: if not PY3:
self.assertEqual(f.__class__, file) self.assertEqual(f.__class__, file) # pylint:disable=undefined-variable
self.assertEqual(f.read(), b'blob here') self.assertEqual(f.read(), b'blob here')
def test_openCommittedBlobFile_as_blobfile(self): def test_openCommittedBlobFile_as_blobfile(self):
@ -190,7 +191,7 @@ class BlobHelperTest(unittest.TestCase):
with obj.openCommittedBlobFile(None, test_oid, test_tid) as f: with obj.openCommittedBlobFile(None, test_oid, test_tid) as f:
self.assertEqual(loadBlob_calls, [1]) self.assertEqual(loadBlob_calls, [1])
if not PY3: if not PY3:
self.assertEqual(f.__class__, file) self.assertEqual(f.__class__, file) # pylint:disable=undefined-variable
self.assertEqual(f.read(), b'blob here') self.assertEqual(f.read(), b'blob here')
def test_openCommittedBlobFile_retry_as_blobfile(self): def test_openCommittedBlobFile_retry_as_blobfile(self):
@ -273,11 +274,11 @@ class BlobHelperTest(unittest.TestCase):
obj = self._make_default(shared=False) obj = self._make_default(shared=False)
self.assertFalse(obj.txn_has_blobs) self.assertFalse(obj.txn_has_blobs)
obj.storeBlob(None, store_func, test_oid, test_tid, 'blob pickle', obj.storeBlob(None, store_func, test_oid, test_tid, 'blob pickle',
fn, '', dummy_txn) fn, '', dummy_txn)
self.assertFalse(os.path.exists(fn)) self.assertFalse(os.path.exists(fn))
self.assertTrue(obj.txn_has_blobs) self.assertTrue(obj.txn_has_blobs)
self.assertEqual(called, self.assertEqual(called,
[(test_oid, test_tid, 'blob pickle', '', dummy_txn)]) [(test_oid, test_tid, 'blob pickle', '', dummy_txn)])
self.assertEqual(self.uploaded[:2], (1, None)) self.assertEqual(self.uploaded[:2], (1, None))
target_fn = self.uploaded[2] target_fn = self.uploaded[2]
self.assertEqual(read_file(target_fn), 'here a blob') self.assertEqual(read_file(target_fn), 'here a blob')

View File

@ -119,15 +119,13 @@ class AbstractZODBConvertBase(unittest.TestCase):
self._check_value_of_key_in_dest(None) self._check_value_of_key_in_dest(None)
def test_incremental(self): def test_incremental(self):
x = 10 self._write_value_for_key_in_src(10)
self._write_value_for_key_in_src(x)
main(['', self.cfgfile]) main(['', self.cfgfile])
self._check_value_of_key_in_dest(x) self._check_value_of_key_in_dest(10)
x = "hi" self._write_value_for_key_in_src("hi")
self._write_value_for_key_in_src(x)
main(['', '--incremental', self.cfgfile]) main(['', '--incremental', self.cfgfile])
self._check_value_of_key_in_dest(x) self._check_value_of_key_in_dest("hi")
def test_incremental_empty_src_dest(self): def test_incremental_empty_src_dest(self):
# Should work and not raise a POSKeyError # Should work and not raise a POSKeyError

View File

@ -73,7 +73,6 @@ class ZODBPackScriptTests(unittest.TestCase):
def test_pack_with_1_day(self): def test_pack_with_1_day(self):
from ZODB.DB import DB from ZODB.DB import DB
from ZODB.FileStorage import FileStorage from ZODB.FileStorage import FileStorage
from ZODB.POSException import POSKeyError
import time import time
import transaction import transaction
from relstorage.zodbpack import main from relstorage.zodbpack import main

View File

@ -27,6 +27,7 @@ import logging
import os import os
import unittest import unittest
# pylint:disable=no-member,too-many-ancestors
base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest') base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
@ -203,7 +204,6 @@ def test_suite():
suite.addTest(unittest.makeSuite(HPMySQLDestZODBConvertTests)) suite.addTest(unittest.makeSuite(HPMySQLDestZODBConvertTests))
suite.addTest(unittest.makeSuite(HPMySQLSrcZODBConvertTests)) suite.addTest(unittest.makeSuite(HPMySQLSrcZODBConvertTests))
import ZODB.blob
from relstorage.tests.blob.testblob import storage_reusable_suite from relstorage.tests.blob.testblob import storage_reusable_suite
from relstorage.tests.util import shared_blob_dir_choices from relstorage.tests.util import shared_blob_dir_choices
for shared_blob_dir in shared_blob_dir_choices: for shared_blob_dir in shared_blob_dir_choices:

View File

@ -25,6 +25,8 @@ import os
import sys import sys
import unittest import unittest
# pylint:disable=no-member,too-many-ancestors
base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest') base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
@ -49,6 +51,7 @@ class UseOracleAdapter(object):
class ZConfigTests(object): class ZConfigTests(object):
def checkConfigureViaZConfig(self): def checkConfigureViaZConfig(self):
# pylint:disable=too-many-locals
import tempfile import tempfile
dsn = os.environ.get('ORACLE_TEST_DSN', 'XE') dsn = os.environ.get('ORACLE_TEST_DSN', 'XE')
fd, replica_conf = tempfile.mkstemp() fd, replica_conf = tempfile.mkstemp()
@ -144,6 +147,7 @@ db_names = {
} }
def test_suite(): def test_suite():
# pylint:disable=too-many-locals
import relstorage.adapters.oracle as _adapter import relstorage.adapters.oracle as _adapter
try: try:
_adapter.select_driver() _adapter.select_driver()
@ -163,7 +167,7 @@ def test_suite():
]: ]:
suite.addTest(unittest.makeSuite(klass, "check")) suite.addTest(unittest.makeSuite(klass, "check"))
import ZODB.blob
from .util import RUNNING_ON_CI from .util import RUNNING_ON_CI
if RUNNING_ON_CI or os.environ.get("RS_ORCL_SMALL_BLOB"): if RUNNING_ON_CI or os.environ.get("RS_ORCL_SMALL_BLOB"):
# cx_Oracle blob support can only address up to sys.maxint on # cx_Oracle blob support can only address up to sys.maxint on

View File

@ -26,6 +26,8 @@ import logging
import os import os
import unittest import unittest
# pylint:disable=no-member,too-many-ancestors
base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest') base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
@ -121,18 +123,18 @@ class HPPostgreSQLSrcZODBConvertTests(UsePostgreSQLAdapter, _PgSQLCfgMixin, Abst
pass pass
class HPPostgreSQLTests(UsePostgreSQLAdapter, HistoryPreservingRelStorageTests, class HPPostgreSQLTests(UsePostgreSQLAdapter, HistoryPreservingRelStorageTests,
ZConfigTests): ZConfigTests):
pass pass
class HPPostgreSQLToFile(UsePostgreSQLAdapter, HistoryPreservingToFileStorage): class HPPostgreSQLToFile(UsePostgreSQLAdapter, HistoryPreservingToFileStorage):
pass pass
class HPPostgreSQLFromFile(UsePostgreSQLAdapter, class HPPostgreSQLFromFile(UsePostgreSQLAdapter,
HistoryPreservingFromFileStorage): HistoryPreservingFromFileStorage):
pass pass
class HFPostgreSQLTests(UsePostgreSQLAdapter, HistoryFreeRelStorageTests, class HFPostgreSQLTests(UsePostgreSQLAdapter, HistoryFreeRelStorageTests,
ZConfigTests): ZConfigTests):
pass pass
class HFPostgreSQLToFile(UsePostgreSQLAdapter, HistoryFreeToFileStorage): class HFPostgreSQLToFile(UsePostgreSQLAdapter, HistoryFreeToFileStorage):
@ -149,6 +151,7 @@ db_names = {
} }
def test_suite(): def test_suite():
# pylint:disable=too-many-locals
import relstorage.adapters.postgresql as _adapter import relstorage.adapters.postgresql as _adapter
try: try:
_adapter.select_driver() _adapter.select_driver()
@ -171,7 +174,6 @@ def test_suite():
suite.addTest(unittest.makeSuite(HPPostgreSQLDestZODBConvertTests)) suite.addTest(unittest.makeSuite(HPPostgreSQLDestZODBConvertTests))
suite.addTest(unittest.makeSuite(HPPostgreSQLSrcZODBConvertTests)) suite.addTest(unittest.makeSuite(HPPostgreSQLSrcZODBConvertTests))
import ZODB.blob
from .util import RUNNING_ON_CI from .util import RUNNING_ON_CI
if RUNNING_ON_CI or os.environ.get("RS_PG_SMALL_BLOB"): if RUNNING_ON_CI or os.environ.get("RS_PG_SMALL_BLOB"):
# Avoid creating 2GB blobs to be friendly to neighbors # Avoid creating 2GB blobs to be friendly to neighbors

View File

@ -1,9 +1,6 @@
import os import os
import time
import unittest
from relstorage._compat import string_types
from ZEO.tests.forker import wait_until import unittest
# ZODB >= 3.9. The blob directory can be a private cache. # ZODB >= 3.9. The blob directory can be a private cache.
@ -17,7 +14,7 @@ RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
if RUNNING_ON_CI: if RUNNING_ON_CI:
skipOnCI = unittest.skip skipOnCI = unittest.skip
else: else:
def skipOnCI(reason): def skipOnCI(reason): # pylint:disable=unused-argument
def dec(f): def dec(f):
return f return f
return dec return dec
@ -28,6 +25,7 @@ CACHE_MODULE_NAME = None
if RUNNING_ON_TRAVIS: if RUNNING_ON_TRAVIS:
# We expect to have access to a local memcache server # We expect to have access to a local memcache server
# on travis. Use it if we can import drivers. # on travis. Use it if we can import drivers.
# pylint:disable=unused-import
try: try:
import pylibmc import pylibmc
CACHE_SERVERS = ["localhost:11211"] CACHE_SERVERS = ["localhost:11211"]

View File

@ -111,6 +111,7 @@ class TreeMarker(object):
new OIDs marked and `next_pass` is the collection of OIDs to new OIDs marked and `next_pass` is the collection of OIDs to
follow in the next pass. follow in the next pass.
""" """
# pylint:disable=too-many-locals
# next_pass: {oid_hi: IISet32X} # next_pass: {oid_hi: IISet32X}
next_pass = collections.defaultdict(IISet32X) next_pass = collections.defaultdict(IISet32X)
found = 0 found = 0

View File

@ -19,7 +19,7 @@ from __future__ import print_function
import logging import logging
import argparse import argparse
from persistent.TimeStamp import TimeStamp from persistent.TimeStamp import TimeStamp # pylint:disable=import-error
from io import StringIO from io import StringIO
import sys import sys
import ZConfig import ZConfig
@ -69,6 +69,7 @@ class _DefaultStartStorageIteration(object):
return getattr(self._source, name) return getattr(self._source, name)
def main(argv=None): def main(argv=None):
# pylint:disable=too-many-branches
if argv is None: if argv is None:
argv = sys.argv argv = sys.argv
parser = argparse.ArgumentParser(description=__doc__) parser = argparse.ArgumentParser(description=__doc__)
@ -123,7 +124,7 @@ def main(argv=None):
# This *should* be a byte string. # This *should* be a byte string.
last_tid = u64(last_tid) last_tid = u64(last_tid)
next_tid = p64(last_tid+1) next_tid = p64(last_tid + 1)
# Compensate for the RelStorage bug(?) and get a reusable iterator # Compensate for the RelStorage bug(?) and get a reusable iterator
# that starts where we want it to. There's no harm in wrapping it for # that starts where we want it to. There's no harm in wrapping it for
# other sources like FileStorage too. # other sources like FileStorage too.

View File

@ -32,6 +32,7 @@ class SuffixMultiplier(object):
self._default = default self._default = default
# all keys must be the same size # all keys must be the same size
self._keysz = None self._keysz = None
def check(a, b): def check(a, b):
if len(a) != len(b): if len(a) != len(b):
raise ValueError("suffix length mismatch") raise ValueError("suffix length mismatch")
@ -45,10 +46,11 @@ class SuffixMultiplier(object):
return int(v[:-self._keysz]) * m return int(v[:-self._keysz]) * m
return int(v) * self._default return int(v) * self._default
convert_bytesize = SuffixMultiplier({'kb': 1024, convert_bytesize = SuffixMultiplier({
'mb': 1024*1024, 'kb': 1024,
'gb': 1024*1024*1024, 'mb': 1024 * 1024,
}) 'gb': 1024 * 1024 * 1024,
})
def convert_int(value): def convert_int(value):
@ -109,7 +111,7 @@ class PostgreSQLAdapterHelper(Resolver):
kw, unused = self.interpret_kwargs(kw) kw, unused = self.interpret_kwargs(kw)
dsn_args.extend(kw.items()) dsn_args.extend(kw.items())
dsn = ' '.join("%s='%s'"%arg for arg in dsn_args) dsn = ' '.join("%s='%s'" % arg for arg in dsn_args)
def factory(options): def factory(options):
from relstorage.adapters.postgresql import PostgreSQLAdapter from relstorage.adapters.postgresql import PostgreSQLAdapter
@ -189,9 +191,7 @@ class RelStorageURIResolver(Resolver):
def factory(): def factory():
adapter = adapter_factory(options) adapter = adapter_factory(options)
storage = RelStorage(adapter=adapter, options=options) storage = RelStorage(adapter=adapter, options=options)
if demostorage: return storage if not demostorage else DemoStorage(base=storage)
storage = DemoStorage(base=storage)
return storage
return factory, unused return factory, unused
postgresql_resolver = RelStorageURIResolver(PostgreSQLAdapterHelper()) postgresql_resolver = RelStorageURIResolver(PostgreSQLAdapterHelper())