Cleanup all the lint (#143)
* Clean up all violations reported by pylint in preperation for turning on landscape.io * fix all prospector errors. * don't run pylint on pypy/py3 because we're getting all sorts of import errors under Py3 (and it's sorta slow on pypy) * back to editable? Somehow _cache_ring isn't always being built. * skip bootstrap.py on landscape. * pylint fixes for umysql
This commit is contained in:
parent
37a1ddc658
commit
4bf54e0954
@ -1,7 +1,7 @@
|
||||
doc-warnings: no # experimental, raises an exception
|
||||
test-warnings: no
|
||||
strictness: veryhigh
|
||||
max-line-length: 100
|
||||
max-line-length: 130
|
||||
# We don't use any of the auto-detected things, and
|
||||
# auto-detection slows down startup
|
||||
autodetect: false
|
||||
@ -10,13 +10,14 @@ requirements:
|
||||
|
||||
python-targets:
|
||||
- 2
|
||||
- 3
|
||||
# - 3 # landscape.io seems to fail if we run both py2 and py3?
|
||||
ignore-paths:
|
||||
- doc/
|
||||
- build
|
||||
- dist
|
||||
- .eggs
|
||||
- setup.py
|
||||
- bootstrap.py
|
||||
#ignore-patterns:
|
||||
|
||||
pyroma:
|
||||
@ -42,6 +43,9 @@ pyflakes:
|
||||
|
||||
pep8:
|
||||
disable:
|
||||
# N803: argument should be lowercase. We have 'Binary' and
|
||||
# camelCase names.
|
||||
- N803
|
||||
# N805: first arg should be self; fails on metaclasses and
|
||||
# classmethods; pylint does a better job
|
||||
- N805
|
||||
|
17
.pylintrc
17
.pylintrc
@ -10,8 +10,7 @@
|
||||
# comments at the end of the line does the same thing (though Py3 supports
|
||||
# mixing)
|
||||
|
||||
|
||||
# invalid-name, ; We get lots of these, especially in scripts. should fix many of them
|
||||
# invalid-name, ; Things like loadBlob get flagged
|
||||
# protected-access, ; We have many cases of this; legit ones need to be examinid and commented, then this removed
|
||||
# no-self-use, ; common in superclasses with extension points
|
||||
# too-few-public-methods, ; Exception and marker classes get tagged with this
|
||||
@ -29,10 +28,10 @@
|
||||
# useless-suppression: the only way to avoid repeating it for specific statements everywhere that we
|
||||
# do Py2/Py3 stuff is to put it here. Sadly this means that we might get better but not realize it.
|
||||
disable=wrong-import-position,
|
||||
invalid-name,
|
||||
wrong-import-order,
|
||||
missing-docstring,
|
||||
ungrouped-imports,
|
||||
invalid-name,
|
||||
protected-access,
|
||||
no-self-use,
|
||||
too-few-public-methods,
|
||||
@ -43,8 +42,9 @@ disable=wrong-import-position,
|
||||
cyclic-import,
|
||||
too-many-arguments,
|
||||
redefined-builtin,
|
||||
useless-suppression,
|
||||
# undefined-all-variable
|
||||
useless-suppression,
|
||||
duplicate-code,
|
||||
# undefined-all-variable
|
||||
|
||||
|
||||
[FORMAT]
|
||||
@ -72,15 +72,14 @@ generated-members=exc_clear
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set). This supports can work
|
||||
# with qualified names.
|
||||
# greenlet, Greenlet, parent, dead: all attempts to fix issues in greenlet.py
|
||||
# only seen on the service, e.g., self.parent.loop: class parent has no loop
|
||||
ignored-classes=SSLContext, SSLSocket, greenlet, Greenlet, parent, dead
|
||||
|
||||
ignored-classes=SectionValue
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis. It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=gevent._corecffi
|
||||
#ignored-modules=gevent._corecffi
|
||||
|
||||
[DESIGN]
|
||||
max-attributes=12
|
||||
|
14
.travis.yml
14
.travis.yml
@ -11,7 +11,7 @@ addons:
|
||||
python:
|
||||
- pypy-5.4.1
|
||||
- 2.7
|
||||
- 3.4
|
||||
- 3.5
|
||||
env:
|
||||
matrix:
|
||||
- ENV=mysql
|
||||
@ -21,7 +21,7 @@ env:
|
||||
- ENV=umysqldb
|
||||
matrix:
|
||||
exclude:
|
||||
- python: 3.4
|
||||
- python: 3.5
|
||||
env: ENV=umysqldb
|
||||
- python: pypy-5.4.1
|
||||
env: ENV=pymysql
|
||||
@ -31,6 +31,7 @@ matrix:
|
||||
script:
|
||||
# coverage slows PyPy down from 2minutes to 12+.
|
||||
# But don't run the pymysql/pypy tests twice.
|
||||
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pylint --rcfile=.pylintrc relstorage -f parseable -r n; fi
|
||||
- if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then python -m relstorage.tests.alltests -v; fi
|
||||
- if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then coverage run -m relstorage.tests.alltests -v; fi
|
||||
after_success:
|
||||
@ -43,16 +44,13 @@ before_install:
|
||||
|
||||
install:
|
||||
- pip install -U pip setuptools
|
||||
- pip install -U tox coveralls
|
||||
- pip install -U tox coveralls pylint
|
||||
- if [[ $TRAVIS_PYTHON_VERSION == pypy* ]]; then pip install -U python-memcached; fi
|
||||
- if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then pip install -U pylibmc cffi; fi
|
||||
- pip install -U -e ".[test]"
|
||||
- .travis/setup-$ENV.sh
|
||||
# cache: pip seems not to work if `install` is replaced (https://github.com/travis-ci/travis-ci/issues/3239)
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/pip
|
||||
- $HOME/.venv
|
||||
|
||||
cache: pip
|
||||
|
||||
before_cache:
|
||||
- rm -f $HOME/.cache/pip/log/debug.log
|
||||
|
@ -6,7 +6,8 @@ Compatibility shims.
|
||||
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
# pylint:disable=unused-import
|
||||
# pylint:disable=unused-import,invalid-name,no-member,undefined-variable
|
||||
# pylint:disable=no-name-in-module,redefined-variable-type
|
||||
|
||||
import sys
|
||||
import platform
|
||||
@ -19,8 +20,10 @@ PYPY = platform.python_implementation() == 'PyPy'
|
||||
if PY3:
|
||||
def list_keys(d):
|
||||
return list(d.keys())
|
||||
|
||||
def list_items(d):
|
||||
return list(d.items())
|
||||
|
||||
def list_values(d):
|
||||
return list(d.values())
|
||||
iteritems = dict.items
|
||||
|
@ -25,7 +25,7 @@ class RowBatcher(object):
|
||||
"""
|
||||
|
||||
row_limit = 100
|
||||
size_limit = 1<<20
|
||||
size_limit = 1 << 20
|
||||
|
||||
def __init__(self, cursor, row_limit=None):
|
||||
self.cursor = cursor
|
||||
@ -101,6 +101,6 @@ class RowBatcher(object):
|
||||
for row in rows.values():
|
||||
parts.append(s)
|
||||
params.extend(row)
|
||||
parts = ',\n'.join(parts)
|
||||
stmt = "%s INTO %s VALUES\n%s" % (command, header, parts)
|
||||
|
||||
stmt = "%s INTO %s VALUES\n%s" % (command, header, ',\n'.join(parts))
|
||||
self.cursor.execute(stmt, tuple(params))
|
||||
|
@ -56,7 +56,7 @@ class AbstractConnectionManager(object):
|
||||
"""Set the on_store_opened hook"""
|
||||
self.on_store_opened = f
|
||||
|
||||
def open(self):
|
||||
def open(self, **kwargs):
|
||||
"""Open a database connection and return (conn, cursor)."""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -22,6 +22,7 @@ class DatabaseIterator(object):
|
||||
|
||||
def __init__(self, database_type, runner):
|
||||
self.runner = runner
|
||||
self.database_type = database_type
|
||||
|
||||
def iter_objects(self, cursor, tid):
|
||||
"""Iterate over object states in a transaction.
|
||||
@ -110,7 +111,7 @@ class HistoryPreservingDatabaseIterator(DatabaseIterator):
|
||||
stmt += " AND tid <= %(max_tid)s"
|
||||
stmt += " ORDER BY tid"
|
||||
self.runner.run_script_stmt(cursor, stmt,
|
||||
{'min_tid': start, 'max_tid': stop})
|
||||
{'min_tid': start, 'max_tid': stop})
|
||||
return self._transaction_iterator(cursor)
|
||||
|
||||
|
||||
@ -150,7 +151,10 @@ class HistoryFreeDatabaseIterator(DatabaseIterator):
|
||||
|
||||
Skips packed transactions.
|
||||
Yields (tid, username, description, extension) for each transaction.
|
||||
|
||||
This always returns an empty iterable.
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
return []
|
||||
|
||||
def iter_transactions_range(self, cursor, start=None, stop=None):
|
||||
@ -171,7 +175,7 @@ class HistoryFreeDatabaseIterator(DatabaseIterator):
|
||||
stmt += " AND tid <= %(max_tid)s"
|
||||
stmt += " ORDER BY tid"
|
||||
self.runner.run_script_stmt(cursor, stmt,
|
||||
{'min_tid': start, 'max_tid': stop})
|
||||
{'min_tid': start, 'max_tid': stop})
|
||||
return ((tid, '', '', '', True) for (tid,) in cursor)
|
||||
|
||||
def iter_object_history(self, cursor, oid):
|
||||
|
@ -17,7 +17,7 @@ from ZODB.POSException import StorageError
|
||||
from zope.interface import Attribute
|
||||
from zope.interface import Interface
|
||||
|
||||
#pylint: disable=inherit-non-class,no-method-argument
|
||||
#pylint: disable=inherit-non-class,no-method-argument,no-self-argument
|
||||
|
||||
class IRelStorageAdapter(Interface):
|
||||
"""A database adapter for RelStorage"""
|
||||
@ -487,7 +487,7 @@ class ITransactionControl(Interface):
|
||||
"""Returns the most recent tid."""
|
||||
|
||||
def add_transaction(cursor, tid, username, description, extension,
|
||||
packed=False):
|
||||
packed=False):
|
||||
"""Add a transaction."""
|
||||
|
||||
def commit_phase1(conn, cursor, tid):
|
||||
|
@ -80,6 +80,7 @@ def select_driver(options=None):
|
||||
@implementer(IRelStorageAdapter)
|
||||
class MySQLAdapter(object):
|
||||
"""MySQL adapter for RelStorage."""
|
||||
# pylint:disable=too-many-instance-attributes
|
||||
|
||||
def __init__(self, options=None, **params):
|
||||
if options is None:
|
||||
@ -95,28 +96,28 @@ class MySQLAdapter(object):
|
||||
driver,
|
||||
params=params,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.runner = ScriptRunner()
|
||||
self.locker = MySQLLocker(
|
||||
options=options,
|
||||
lock_exceptions=driver.lock_exceptions,
|
||||
)
|
||||
)
|
||||
self.schema = MySQLSchemaInstaller(
|
||||
connmanager=self.connmanager,
|
||||
runner=self.runner,
|
||||
keep_history=self.keep_history,
|
||||
)
|
||||
)
|
||||
self.mover = MySQLObjectMover(
|
||||
database_type='mysql',
|
||||
options=options,
|
||||
Binary=driver.Binary,
|
||||
)
|
||||
)
|
||||
self.connmanager.set_on_store_opened(self.mover.on_store_opened)
|
||||
self.oidallocator = MySQLOIDAllocator()
|
||||
self.txncontrol = MySQLTransactionControl(
|
||||
keep_history=self.keep_history,
|
||||
Binary=driver.Binary,
|
||||
)
|
||||
)
|
||||
|
||||
if self.keep_history:
|
||||
poll_query = "SELECT MAX(tid) FROM transaction"
|
||||
@ -128,7 +129,7 @@ class MySQLAdapter(object):
|
||||
runner=self.runner,
|
||||
revert_when_stale=options.revert_when_stale,
|
||||
)
|
||||
|
||||
# pylint:disable=redefined-variable-type
|
||||
if self.keep_history:
|
||||
self.packundo = MySQLHistoryPreservingPackUndo(
|
||||
database_type='mysql',
|
||||
@ -136,11 +137,11 @@ class MySQLAdapter(object):
|
||||
runner=self.runner,
|
||||
locker=self.locker,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.dbiter = HistoryPreservingDatabaseIterator(
|
||||
database_type='mysql',
|
||||
runner=self.runner,
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.packundo = MySQLHistoryFreePackUndo(
|
||||
database_type='mysql',
|
||||
@ -148,15 +149,15 @@ class MySQLAdapter(object):
|
||||
runner=self.runner,
|
||||
locker=self.locker,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.dbiter = HistoryFreeDatabaseIterator(
|
||||
database_type='mysql',
|
||||
runner=self.runner,
|
||||
)
|
||||
)
|
||||
|
||||
self.stats = MySQLStats(
|
||||
connmanager=self.connmanager,
|
||||
)
|
||||
)
|
||||
|
||||
def new_instance(self):
|
||||
return MySQLAdapter(options=self.options, **self._params)
|
||||
|
@ -49,7 +49,7 @@ class MySQLdbConnectionManager(AbstractConnectionManager):
|
||||
return params
|
||||
|
||||
def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED",
|
||||
replica_selector=None):
|
||||
replica_selector=None, **kwargs):
|
||||
"""Open a database connection and return (conn, cursor)."""
|
||||
if replica_selector is None:
|
||||
replica_selector = self.replica_selector
|
||||
|
@ -15,8 +15,8 @@
|
||||
"""
|
||||
MySQL IDBDriver implementations.
|
||||
"""
|
||||
|
||||
from __future__ import print_function, absolute_import
|
||||
# pylint:disable=redefined-variable-type
|
||||
|
||||
import os
|
||||
import sys
|
||||
@ -31,6 +31,8 @@ from ..interfaces import IDBDriver, IDBDriverOptions
|
||||
|
||||
from .._abstract_drivers import _standard_exceptions
|
||||
|
||||
from relstorage._compat import intern
|
||||
|
||||
logger = __import__('logging').getLogger(__name__)
|
||||
|
||||
database_type = 'mysql'
|
||||
@ -82,7 +84,7 @@ else: # pragma: no cover
|
||||
pymysql.err.Error,
|
||||
IOError,
|
||||
pymysql.err.DatabaseError
|
||||
)
|
||||
)
|
||||
|
||||
disconnected_exceptions += (
|
||||
IOError, # This one can escape mapping;
|
||||
@ -105,6 +107,7 @@ else: # pragma: no cover
|
||||
|
||||
if hasattr(pymysql.converters, 'escape_string'):
|
||||
orig_escape_string = pymysql.converters.escape_string
|
||||
|
||||
def escape_string(value, mapping=None):
|
||||
if isinstance(value, bytearray) and not value:
|
||||
return value
|
||||
@ -167,9 +170,11 @@ else:
|
||||
from pymysql.err import InternalError, InterfaceError, ProgrammingError
|
||||
|
||||
class UConnection(umysqldb.connections.Connection):
|
||||
# pylint:disable=abstract-method
|
||||
_umysql_conn = None
|
||||
|
||||
def __debug_lock(self, sql, ex=False): # pragma: no cover
|
||||
if not 'GET_LOCK' in sql:
|
||||
if 'GET_LOCK' not in sql:
|
||||
return
|
||||
|
||||
try:
|
||||
@ -270,7 +275,7 @@ else:
|
||||
assert not self._umysql_conn.is_connected()
|
||||
self._umysql_conn.close()
|
||||
del self._umysql_conn
|
||||
self._umysql_conn = umysql.Connection()
|
||||
self._umysql_conn = umysql.Connection() # pylint:disable=no-member
|
||||
self._connect() # Potentially this could raise again?
|
||||
|
||||
def connect(self, *_args, **_kwargs): # pragma: no cover
|
||||
@ -279,7 +284,7 @@ else:
|
||||
return self._connect()
|
||||
|
||||
@implementer(IDBDriver)
|
||||
class umysqldbDriver(PyMySQLDriver):
|
||||
class umysqldbDriver(PyMySQLDriver): # noqa
|
||||
__name__ = 'umysqldb'
|
||||
connect = UConnection
|
||||
# umysql has a tendency to crash when given a bytearray (which
|
||||
@ -291,8 +296,8 @@ else:
|
||||
|
||||
|
||||
if (not preferred_driver_name
|
||||
or (preferred_driver_name == 'PyMySQL'
|
||||
and not hasattr(sys, 'pypy_version_info'))):
|
||||
or (preferred_driver_name == 'PyMySQL'
|
||||
and not hasattr(sys, 'pypy_version_info'))):
|
||||
preferred_driver_name = driver.__name__
|
||||
del driver
|
||||
|
||||
|
@ -83,13 +83,15 @@ class MySQLObjectMover(AbstractObjectMover):
|
||||
cursor.execute(stmt, (tid,))
|
||||
|
||||
@metricmethod_sampled
|
||||
def update_current(self, cursor, tid):
|
||||
def update_current(self, cursor, tid): # pylint:disable=method-hidden
|
||||
"""Update the current object pointers.
|
||||
|
||||
tid is the integer tid of the transaction being committed.
|
||||
"""
|
||||
if not self.keep_history:
|
||||
# nothing needs to be updated
|
||||
# Can elide this check in the future.
|
||||
self.update_current = lambda cursor, tid: None
|
||||
return
|
||||
|
||||
cursor.execute("""
|
||||
|
@ -233,16 +233,16 @@ class MySQLSchemaInstaller(AbstractSchemaInstaller):
|
||||
self.runner.run_script(cursor, stmt)
|
||||
|
||||
# Temp tables are created in a session-by-session basis
|
||||
def _create_temp_store(self, cursor):
|
||||
def _create_temp_store(self, _cursor):
|
||||
return
|
||||
|
||||
def _create_temp_blob_chunk(self, cursor):
|
||||
def _create_temp_blob_chunk(self, _cursor):
|
||||
return
|
||||
|
||||
def _create_temp_pack_visit(self, cursor):
|
||||
def _create_temp_pack_visit(self, _cursor):
|
||||
return
|
||||
|
||||
def _create_temp_undo(self, cursor):
|
||||
def _create_temp_undo(self, _cursor):
|
||||
return
|
||||
|
||||
def _init_after_create(self, cursor):
|
||||
|
@ -23,7 +23,7 @@ from ..txncontrol import AbstractTransactionControl
|
||||
@implementer(ITransactionControl)
|
||||
class MySQLTransactionControl(AbstractTransactionControl):
|
||||
|
||||
def __init__(self, keep_history, Binary):
|
||||
def __init__(self, keep_history, Binary): # noqa
|
||||
self.keep_history = keep_history
|
||||
self.Binary = Binary
|
||||
|
||||
|
@ -20,12 +20,6 @@ from __future__ import absolute_import
|
||||
import six
|
||||
import abc
|
||||
|
||||
|
||||
from perfmetrics import metricmethod
|
||||
from relstorage.adapters.interfaces import IOIDAllocator
|
||||
from zope.interface import implementer
|
||||
from relstorage._compat import mysql_connection
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class AbstractOIDAllocator(object):
|
||||
# All of these allocators allocate 16 OIDs at a time. In the sequence
|
||||
|
@ -47,7 +47,7 @@ def select_driver(options=None):
|
||||
@implementer(IRelStorageAdapter)
|
||||
class OracleAdapter(object):
|
||||
"""Oracle adapter for RelStorage."""
|
||||
|
||||
# pylint:disable=too-many-instance-attributes
|
||||
def __init__(self, user, password, dsn, commit_lock_id=0,
|
||||
twophase=False, options=None):
|
||||
"""Create an Oracle adapter.
|
||||
@ -59,6 +59,7 @@ class OracleAdapter(object):
|
||||
commit process. This is disabled by default. Even when this option
|
||||
is disabled, the ZODB two-phase commit is still in effect.
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
self._user = user
|
||||
self._password = password
|
||||
self._dsn = dsn
|
||||
@ -78,18 +79,18 @@ class OracleAdapter(object):
|
||||
dsn=dsn,
|
||||
twophase=twophase,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.runner = CXOracleScriptRunner(driver)
|
||||
self.locker = OracleLocker(
|
||||
options=self.options,
|
||||
lock_exceptions=driver.lock_exceptions,
|
||||
inputsize_NUMBER=driver.NUMBER,
|
||||
)
|
||||
)
|
||||
self.schema = OracleSchemaInstaller(
|
||||
connmanager=self.connmanager,
|
||||
runner=self.runner,
|
||||
keep_history=self.keep_history,
|
||||
)
|
||||
)
|
||||
inputsizes = {
|
||||
'blobdata': driver.BLOB,
|
||||
'rawdata': driver.BINARY,
|
||||
@ -105,17 +106,17 @@ class OracleAdapter(object):
|
||||
runner=self.runner,
|
||||
Binary=driver.Binary,
|
||||
batcher_factory=lambda cursor, row_limit: OracleRowBatcher(cursor, inputsizes, row_limit),
|
||||
)
|
||||
)
|
||||
self.mover.inputsizes = inputsizes
|
||||
self.connmanager.set_on_store_opened(self.mover.on_store_opened)
|
||||
self.oidallocator = OracleOIDAllocator(
|
||||
connmanager=self.connmanager,
|
||||
)
|
||||
)
|
||||
self.txncontrol = OracleTransactionControl(
|
||||
keep_history=self.keep_history,
|
||||
Binary=driver.Binary,
|
||||
twophase=twophase,
|
||||
)
|
||||
)
|
||||
|
||||
if self.keep_history:
|
||||
poll_query = "SELECT MAX(tid) FROM transaction"
|
||||
@ -128,6 +129,7 @@ class OracleAdapter(object):
|
||||
revert_when_stale=options.revert_when_stale,
|
||||
)
|
||||
|
||||
# pylint:disable=redefined-variable-type
|
||||
if self.keep_history:
|
||||
self.packundo = OracleHistoryPreservingPackUndo(
|
||||
database_type='oracle',
|
||||
@ -135,11 +137,11 @@ class OracleAdapter(object):
|
||||
runner=self.runner,
|
||||
locker=self.locker,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.dbiter = HistoryPreservingDatabaseIterator(
|
||||
database_type='oracle',
|
||||
runner=self.runner,
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.packundo = OracleHistoryFreePackUndo(
|
||||
database_type='oracle',
|
||||
@ -147,15 +149,15 @@ class OracleAdapter(object):
|
||||
runner=self.runner,
|
||||
locker=self.locker,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.dbiter = HistoryFreeDatabaseIterator(
|
||||
database_type='oracle',
|
||||
runner=self.runner,
|
||||
)
|
||||
)
|
||||
|
||||
self.stats = OracleStats(
|
||||
connmanager=self.connmanager,
|
||||
)
|
||||
)
|
||||
|
||||
def new_instance(self):
|
||||
# This adapter and its components are stateless, so it's
|
||||
@ -166,7 +168,7 @@ class OracleAdapter(object):
|
||||
dsn=self._dsn,
|
||||
twophase=self._twophase,
|
||||
options=self.options,
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
parts = [self.__class__.__name__]
|
||||
|
@ -40,7 +40,7 @@ class OracleRowBatcher(RowBatcher):
|
||||
|
||||
def replace_var(match):
|
||||
name = match.group(1)
|
||||
new_name = '%s_%d' % (name, rownum)
|
||||
new_name = '%s_%d' % (name, rownum) # pylint:disable=undefined-loop-variable
|
||||
if name in self.inputsizes:
|
||||
stmt_inputsizes[new_name] = self.inputsizes[name]
|
||||
params[new_name] = row[name]
|
||||
@ -69,8 +69,8 @@ class OracleRowBatcher(RowBatcher):
|
||||
mod_row = oracle_rowvar_re.sub(replace_var, row_schema)
|
||||
parts.append("INTO %s VALUES (%s)" % (header, mod_row))
|
||||
|
||||
parts = '\n'.join(parts)
|
||||
stmt = "INSERT ALL\n%s\nSELECT * FROM DUAL" % parts
|
||||
|
||||
stmt = "INSERT ALL\n%s\nSELECT * FROM DUAL" % '\n'.join(parts)
|
||||
if stmt_inputsizes:
|
||||
self.cursor.setinputsizes(**stmt_inputsizes)
|
||||
self.cursor.execute(stmt, params)
|
||||
|
@ -68,7 +68,7 @@ class CXOracleConnectionManager(AbstractConnectionManager):
|
||||
|
||||
@metricmethod
|
||||
def open(self, transaction_mode="ISOLATION LEVEL READ COMMITTED",
|
||||
twophase=False, replica_selector=None):
|
||||
twophase=False, replica_selector=None, **kwargs):
|
||||
"""Open a database connection and return (conn, cursor)."""
|
||||
if replica_selector is None:
|
||||
replica_selector = self.replica_selector
|
||||
|
@ -40,7 +40,7 @@ except ImportError:
|
||||
else: # pragma: no cover
|
||||
|
||||
@implementer(IDBDriver)
|
||||
class cx_OracleDriver(object):
|
||||
class cx_OracleDriver(object): # noqa
|
||||
__name__ = 'cx_Oracle'
|
||||
disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(cx_Oracle)
|
||||
disconnected_exceptions += (cx_Oracle.DatabaseError,)
|
||||
|
@ -37,7 +37,8 @@ def _to_oracle_ordered(query_tuple):
|
||||
@implementer(IObjectMover)
|
||||
class OracleObjectMover(AbstractObjectMover):
|
||||
|
||||
inputsizes = ()
|
||||
# This is assigned to by the adapter.
|
||||
inputsizes = None
|
||||
|
||||
_move_from_temp_hp_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hp_insert_query)
|
||||
_move_from_temp_hf_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hf_insert_query)
|
||||
@ -239,7 +240,7 @@ class OracleObjectMover(AbstractObjectMover):
|
||||
state = :blobdata
|
||||
WHERE zoid = :oid
|
||||
"""
|
||||
cursor.setinputsizes(blobdata=self.inputsizes['blobdata'])
|
||||
cursor.setinputsizes(blobdata=self.inputsizes['blobdata']) # pylint:disable=unsubscriptable-object
|
||||
cursor.execute(stmt, oid=oid, prev_tid=prev_tid,
|
||||
md5sum=md5sum, blobdata=self.Binary(data))
|
||||
|
||||
@ -264,7 +265,7 @@ class OracleObjectMover(AbstractObjectMover):
|
||||
bytecount = 0
|
||||
# Current versions of cx_Oracle only support offsets up
|
||||
# to sys.maxint or 4GB, whichever comes first.
|
||||
maxsize = min(sys.maxsize, 1<<32)
|
||||
maxsize = min(sys.maxsize, 1 << 32)
|
||||
try:
|
||||
cursor.execute(stmt, (oid, tid))
|
||||
while True:
|
||||
@ -279,9 +280,11 @@ class OracleObjectMover(AbstractObjectMover):
|
||||
f = open(filename, 'wb')
|
||||
# round off the chunk-size to be a multiple of the oracle
|
||||
# blob chunk size to maximize performance
|
||||
read_chunk_size = int(max(round(
|
||||
1.0 * self.blob_chunk_size / blob.getchunksize()), 1) *
|
||||
blob.getchunksize())
|
||||
read_chunk_size = int(
|
||||
max(
|
||||
round(1.0 * self.blob_chunk_size / blob.getchunksize()),
|
||||
1)
|
||||
* blob.getchunksize())
|
||||
offset = 1 # Oracle still uses 1-based indexing.
|
||||
reader = iter(lambda: blob.read(offset, read_chunk_size), b'')
|
||||
for read_chunk in reader:
|
||||
@ -307,7 +310,7 @@ class OracleObjectMover(AbstractObjectMover):
|
||||
# Current versions of cx_Oracle only support offsets up
|
||||
# to sys.maxint or 4GB, whichever comes first. We divide up our
|
||||
# upload into chunks within this limit.
|
||||
oracle_blob_chunk_maxsize = min(sys.maxsize, 1<<32)
|
||||
oracle_blob_chunk_maxsize = min(sys.maxsize, 1 << 32)
|
||||
|
||||
@metricmethod_sampled
|
||||
def upload_blob(self, cursor, oid, tid, filename):
|
||||
@ -315,6 +318,7 @@ class OracleObjectMover(AbstractObjectMover):
|
||||
|
||||
If serial is None, upload to the temporary table.
|
||||
"""
|
||||
# pylint:disable=too-many-locals
|
||||
if tid is not None:
|
||||
if self.keep_history:
|
||||
delete_stmt = """
|
||||
|
@ -146,9 +146,9 @@ class OracleSchemaInstaller(AbstractSchemaInstaller):
|
||||
|
||||
def prepare(self):
|
||||
"""Create the database schema if it does not already exist."""
|
||||
def callback(conn, cursor):
|
||||
def callback(_conn, cursor):
|
||||
tables = self.list_tables(cursor)
|
||||
if not 'object_state' in tables:
|
||||
if 'object_state' not in tables:
|
||||
self.create(cursor)
|
||||
else:
|
||||
self.check_compatibility(cursor, tables)
|
||||
|
@ -33,6 +33,7 @@ def format_to_named(stmt):
|
||||
return _stmt_cache[stmt]
|
||||
except KeyError:
|
||||
matches = []
|
||||
|
||||
def replace(_match):
|
||||
matches.append(None)
|
||||
return ':%d' % len(matches)
|
||||
@ -74,7 +75,7 @@ class OracleScriptRunner(ScriptRunner):
|
||||
params[k] = v
|
||||
else:
|
||||
stmt = generic_stmt % self.script_vars
|
||||
params = ()
|
||||
params = () # pylint:disable=redefined-variable-type
|
||||
|
||||
try:
|
||||
cursor.execute(stmt, params)
|
||||
@ -116,6 +117,7 @@ class CXOracleScriptRunner(OracleScriptRunner):
|
||||
error indicating truncation. The run_lob_stmt() method works
|
||||
around this.
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
if defaultType == self.driver.BLOB:
|
||||
# Default size for BLOB is 4, we want the whole blob inline.
|
||||
# Typical chunk size is 8132, we choose a multiple - 32528
|
||||
|
@ -23,26 +23,26 @@ class OracleStats(AbstractStats):
|
||||
"""Returns the number of objects in the database"""
|
||||
# The tests expect an exact number, but the code below generates
|
||||
# an estimate, so this is disabled for now.
|
||||
if True:
|
||||
return 0
|
||||
else:
|
||||
conn, cursor = self.connmanager.open(
|
||||
self.connmanager.isolation_read_only)
|
||||
try:
|
||||
stmt = """
|
||||
SELECT NUM_ROWS
|
||||
FROM USER_TABLES
|
||||
WHERE TABLE_NAME = 'CURRENT_OBJECT'
|
||||
"""
|
||||
cursor.execute(stmt)
|
||||
res = cursor.fetchone()[0]
|
||||
if res is None:
|
||||
res = 0
|
||||
else:
|
||||
res = int(res)
|
||||
return res
|
||||
finally:
|
||||
self.connmanager.close(conn, cursor)
|
||||
return 0
|
||||
|
||||
def _estimate_object_count(self):
|
||||
conn, cursor = self.connmanager.open(
|
||||
self.connmanager.isolation_read_only)
|
||||
try:
|
||||
stmt = """
|
||||
SELECT NUM_ROWS
|
||||
FROM USER_TABLES
|
||||
WHERE TABLE_NAME = 'CURRENT_OBJECT'
|
||||
"""
|
||||
cursor.execute(stmt)
|
||||
res = cursor.fetchone()[0]
|
||||
if res is None:
|
||||
res = 0
|
||||
else:
|
||||
res = int(res)
|
||||
return res
|
||||
finally:
|
||||
self.connmanager.close(conn, cursor)
|
||||
|
||||
def get_db_size(self):
|
||||
"""Returns the approximate size of the database in bytes"""
|
||||
|
@ -14,6 +14,8 @@
|
||||
"""Pack/Undo implementations.
|
||||
"""
|
||||
|
||||
# pylint:disable=too-many-lines,unused-argument
|
||||
|
||||
from ZODB.POSException import UndoError
|
||||
from ZODB.utils import u64
|
||||
from perfmetrics import metricmethod
|
||||
@ -34,6 +36,8 @@ class PackUndo(object):
|
||||
|
||||
verify_sane_database = False
|
||||
|
||||
_script_choose_pack_transaction = None
|
||||
|
||||
def __init__(self, database_type, connmanager, runner, locker, options):
|
||||
self.database_type = database_type
|
||||
self.connmanager = connmanager
|
||||
@ -354,7 +358,7 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
SELECT zoid, prev_tid FROM temp_undo
|
||||
"""
|
||||
self.runner.run_script(cursor, stmt,
|
||||
{'undo_tid': undo_tid, 'self_tid': self_tid})
|
||||
{'undo_tid': undo_tid, 'self_tid': self_tid})
|
||||
res = list(cursor)
|
||||
|
||||
stmt = self._script_reset_temp_undo
|
||||
@ -420,7 +424,7 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
state = db_binary_to_bytes(state)
|
||||
if hasattr(state, 'read'):
|
||||
# Oracle
|
||||
state = state.read()
|
||||
state = state.read() # pylint:disable=no-member
|
||||
if state:
|
||||
assert isinstance(state, bytes), type(state) # PY3: used to be str(state)
|
||||
from_count += 1
|
||||
@ -429,8 +433,8 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
except:
|
||||
log.error(
|
||||
"pre_pack: can't unpickle "
|
||||
"object %d in transaction %d; state length = %d" % (
|
||||
from_oid, tid, len(state)))
|
||||
"object %d in transaction %d; state length = %d",
|
||||
from_oid, tid, len(state))
|
||||
raise
|
||||
for to_oid in to_oids:
|
||||
add_rows.append((from_oid, tid, to_oid))
|
||||
@ -456,7 +460,7 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
|
||||
to_count = len(add_rows)
|
||||
log.debug("pre_pack: transaction %d: has %d reference(s) "
|
||||
"from %d object(s)", tid, to_count, from_count)
|
||||
"from %d object(s)", tid, to_count, from_count)
|
||||
return to_count
|
||||
|
||||
@metricmethod
|
||||
@ -519,7 +523,7 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
AND tid <= %(pack_tid)s
|
||||
"""
|
||||
self.runner.run_script_stmt(
|
||||
cursor, stmt, {'pack_tid':pack_tid})
|
||||
cursor, stmt, {'pack_tid': pack_tid})
|
||||
to_remove += cursor.rowcount
|
||||
|
||||
log.info("pre_pack: enumerating transactions to pack")
|
||||
@ -533,7 +537,7 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
cursor.execute(stmt)
|
||||
|
||||
log.info("pre_pack: will remove %d object state(s)",
|
||||
to_remove)
|
||||
to_remove)
|
||||
|
||||
except:
|
||||
log.exception("pre_pack: failed")
|
||||
@ -645,10 +649,11 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
@metricmethod
|
||||
def pack(self, pack_tid, sleep=None, packed_func=None):
|
||||
"""Pack. Requires the information provided by pre_pack."""
|
||||
|
||||
# pylint:disable=too-many-locals
|
||||
# Read committed mode is sufficient.
|
||||
|
||||
conn, cursor = self.connmanager.open()
|
||||
try:
|
||||
try: # pylint:disable=too-many-nested-blocks
|
||||
try:
|
||||
stmt = """
|
||||
SELECT transaction.tid,
|
||||
@ -697,9 +702,9 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
statecounter += len(packed_list)
|
||||
if counter >= lastreport + reportstep:
|
||||
log.info("pack: packed %d (%.1f%%) transaction(s), "
|
||||
"affecting %d states",
|
||||
counter, counter/float(total)*100,
|
||||
statecounter)
|
||||
"affecting %d states",
|
||||
counter, counter / float(total) * 100,
|
||||
statecounter)
|
||||
lastreport = counter / reportstep * reportstep
|
||||
del packed_list[:]
|
||||
self.locker.release_commit_lock(cursor)
|
||||
@ -726,7 +731,7 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
|
||||
|
||||
def _pack_transaction(self, cursor, pack_tid, tid, packed,
|
||||
has_removable, packed_list):
|
||||
has_removable, packed_list):
|
||||
"""Pack one transaction. Requires populated pack tables."""
|
||||
log.debug("pack: transaction %d: packing", tid)
|
||||
removed_objects = 0
|
||||
@ -748,7 +753,7 @@ class HistoryPreservingPackUndo(PackUndo):
|
||||
AND tid <= %(pack_tid)s
|
||||
"""
|
||||
self.runner.run_script_stmt(cursor, stmt,
|
||||
{'pack_tid': pack_tid, 'tid': tid})
|
||||
{'pack_tid': pack_tid, 'tid': tid})
|
||||
|
||||
stmt = """
|
||||
SELECT pack_state.zoid
|
||||
@ -932,7 +937,7 @@ class HistoryFreePackUndo(PackUndo):
|
||||
state = db_binary_to_bytes(state)
|
||||
if hasattr(state, 'read'):
|
||||
# Oracle
|
||||
state = state.read()
|
||||
state = state.read() # pylint:disable=no-member
|
||||
add_objects.append((from_oid, tid))
|
||||
if state:
|
||||
assert isinstance(state, bytes), type(state)
|
||||
@ -1046,9 +1051,10 @@ class HistoryFreePackUndo(PackUndo):
|
||||
|
||||
Requires the information provided by pre_pack.
|
||||
"""
|
||||
# pylint:disable=too-many-locals
|
||||
# Read committed mode is sufficient.
|
||||
conn, cursor = self.connmanager.open()
|
||||
try:
|
||||
try: # pylint:disable=too-many-nested-blocks
|
||||
try:
|
||||
stmt = """
|
||||
SELECT zoid, keep_tid
|
||||
@ -1090,7 +1096,7 @@ class HistoryFreePackUndo(PackUndo):
|
||||
counter = total - len(to_remove)
|
||||
if counter >= lastreport + reportstep:
|
||||
log.info("pack: removed %d (%.1f%%) state(s)",
|
||||
counter, counter/float(total)*100)
|
||||
counter, counter / float(total) * 100)
|
||||
lastreport = counter / reportstep * reportstep
|
||||
self.locker.release_commit_lock(cursor)
|
||||
self._pause_pack_until_lock(cursor, sleep)
|
||||
|
@ -46,16 +46,14 @@ class Poller(object):
|
||||
that the changes are too complex to list. new_polled_tid can be
|
||||
0 if there is no data in the database.
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
# find out the tid of the most recent transaction.
|
||||
cursor.execute(self.poll_query)
|
||||
rows = list(cursor)
|
||||
if not rows:
|
||||
if not rows or not rows[0][0]:
|
||||
# No data.
|
||||
return None, 0
|
||||
new_polled_tid = rows[0][0]
|
||||
if not new_polled_tid:
|
||||
# No data.
|
||||
return None, 0
|
||||
|
||||
if prev_polled_tid is None:
|
||||
# This is the first time the connection has polled.
|
||||
@ -65,54 +63,7 @@ class Poller(object):
|
||||
# No transactions have been committed since prev_polled_tid.
|
||||
return (), new_polled_tid
|
||||
|
||||
elif new_polled_tid > prev_polled_tid:
|
||||
# New transaction(s) have been added.
|
||||
|
||||
if self.keep_history:
|
||||
# If the previously polled transaction no longer exists,
|
||||
# the cache is too old and needs to be cleared.
|
||||
# XXX Do we actually need to detect this condition? I think
|
||||
# if we delete this block of code, all the unreachable
|
||||
# objects will be garbage collected anyway. So, as a test,
|
||||
# there is no equivalent of this block of code for
|
||||
# history-free storage. If something goes wrong, then we'll
|
||||
# know there's some other edge condition we have to account
|
||||
# for.
|
||||
stmt = "SELECT 1 FROM transaction WHERE tid = %(tid)s"
|
||||
cursor.execute(
|
||||
intern(stmt % self.runner.script_vars),
|
||||
{'tid': prev_polled_tid})
|
||||
rows = cursor.fetchall()
|
||||
if not rows:
|
||||
# Transaction not found; perhaps it has been packed.
|
||||
# The connection cache should be cleared.
|
||||
return None, new_polled_tid
|
||||
|
||||
# Get the list of changed OIDs and return it.
|
||||
if self.keep_history:
|
||||
stmt = """
|
||||
SELECT zoid, tid
|
||||
FROM current_object
|
||||
WHERE tid > %(tid)s
|
||||
"""
|
||||
else:
|
||||
stmt = """
|
||||
SELECT zoid, tid
|
||||
FROM object_state
|
||||
WHERE tid > %(tid)s
|
||||
"""
|
||||
params = {'tid': prev_polled_tid}
|
||||
if ignore_tid is not None:
|
||||
stmt += " AND tid != %(self_tid)s"
|
||||
params['self_tid'] = ignore_tid
|
||||
stmt = intern(stmt % self.runner.script_vars)
|
||||
|
||||
cursor.execute(stmt, params)
|
||||
changes = cursor.fetchall()
|
||||
|
||||
return changes, new_polled_tid
|
||||
|
||||
else:
|
||||
if new_polled_tid <= prev_polled_tid:
|
||||
# The database connection is stale. This can happen after
|
||||
# reading an asynchronous slave that is not fully up to date.
|
||||
# (It may also suggest that transaction IDs are not being created
|
||||
@ -127,14 +78,61 @@ class Poller(object):
|
||||
# We have to invalidate the whole cPickleCache, otherwise
|
||||
# the cache would be inconsistent with the reverted state.
|
||||
return None, new_polled_tid
|
||||
else:
|
||||
# This client never wants to revert to stale data, so
|
||||
# raise ReadConflictError to trigger a retry.
|
||||
# We're probably just waiting for async replication
|
||||
# to catch up, so retrying could do the trick.
|
||||
raise ReadConflictError(
|
||||
"The database connection is stale: new_polled_tid=%d, "
|
||||
"prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid))
|
||||
|
||||
# This client never wants to revert to stale data, so
|
||||
# raise ReadConflictError to trigger a retry.
|
||||
# We're probably just waiting for async replication
|
||||
# to catch up, so retrying could do the trick.
|
||||
raise ReadConflictError(
|
||||
"The database connection is stale: new_polled_tid=%d, "
|
||||
"prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid))
|
||||
|
||||
|
||||
# New transaction(s) have been added.
|
||||
|
||||
if self.keep_history:
|
||||
# If the previously polled transaction no longer exists,
|
||||
# the cache is too old and needs to be cleared.
|
||||
# XXX Do we actually need to detect this condition? I think
|
||||
# if we delete this block of code, all the unreachable
|
||||
# objects will be garbage collected anyway. So, as a test,
|
||||
# there is no equivalent of this block of code for
|
||||
# history-free storage. If something goes wrong, then we'll
|
||||
# know there's some other edge condition we have to account
|
||||
# for.
|
||||
stmt = "SELECT 1 FROM transaction WHERE tid = %(tid)s"
|
||||
cursor.execute(
|
||||
intern(stmt % self.runner.script_vars),
|
||||
{'tid': prev_polled_tid})
|
||||
rows = cursor.fetchall()
|
||||
if not rows:
|
||||
# Transaction not found; perhaps it has been packed.
|
||||
# The connection cache should be cleared.
|
||||
return None, new_polled_tid
|
||||
|
||||
# Get the list of changed OIDs and return it.
|
||||
if self.keep_history:
|
||||
stmt = """
|
||||
SELECT zoid, tid
|
||||
FROM current_object
|
||||
WHERE tid > %(tid)s
|
||||
"""
|
||||
else:
|
||||
stmt = """
|
||||
SELECT zoid, tid
|
||||
FROM object_state
|
||||
WHERE tid > %(tid)s
|
||||
"""
|
||||
params = {'tid': prev_polled_tid}
|
||||
if ignore_tid is not None:
|
||||
stmt += " AND tid != %(self_tid)s"
|
||||
params['self_tid'] = ignore_tid
|
||||
stmt = intern(stmt % self.runner.script_vars)
|
||||
|
||||
cursor.execute(stmt, params)
|
||||
changes = cursor.fetchall()
|
||||
|
||||
return changes, new_polled_tid
|
||||
|
||||
def list_changes(self, cursor, after_tid, last_tid):
|
||||
"""Return the (oid, tid) values changed in a range of transactions.
|
||||
|
@ -47,6 +47,7 @@ def select_driver(options=None):
|
||||
class PostgreSQLAdapter(object):
|
||||
"""PostgreSQL adapter for RelStorage."""
|
||||
|
||||
# pylint:disable=too-many-instance-attributes
|
||||
def __init__(self, dsn='', options=None):
|
||||
# options is a relstorage.options.Options or None
|
||||
self._dsn = dsn
|
||||
@ -63,32 +64,32 @@ class PostgreSQLAdapter(object):
|
||||
driver,
|
||||
dsn=dsn,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.runner = ScriptRunner()
|
||||
self.locker = PostgreSQLLocker(
|
||||
options=options,
|
||||
lock_exceptions=driver.lock_exceptions,
|
||||
version_detector=self.version_detector,
|
||||
)
|
||||
)
|
||||
self.schema = PostgreSQLSchemaInstaller(
|
||||
connmanager=self.connmanager,
|
||||
runner=self.runner,
|
||||
locker=self.locker,
|
||||
keep_history=self.keep_history,
|
||||
)
|
||||
)
|
||||
self.mover = PostgreSQLObjectMover(
|
||||
database_type='postgresql',
|
||||
options=options,
|
||||
runner=self.runner,
|
||||
version_detector=self.version_detector,
|
||||
Binary=driver.Binary,
|
||||
)
|
||||
)
|
||||
self.connmanager.set_on_store_opened(self.mover.on_store_opened)
|
||||
self.oidallocator = PostgreSQLOIDAllocator()
|
||||
self.txncontrol = PostgreSQLTransactionControl(
|
||||
keep_history=self.keep_history,
|
||||
driver=driver,
|
||||
)
|
||||
)
|
||||
|
||||
self.poller = Poller(
|
||||
poll_query="EXECUTE get_latest_tid",
|
||||
@ -96,7 +97,7 @@ class PostgreSQLAdapter(object):
|
||||
runner=self.runner,
|
||||
revert_when_stale=options.revert_when_stale,
|
||||
)
|
||||
|
||||
# pylint:disable=redefined-variable-type
|
||||
if self.keep_history:
|
||||
self.packundo = HistoryPreservingPackUndo(
|
||||
database_type='postgresql',
|
||||
@ -104,11 +105,11 @@ class PostgreSQLAdapter(object):
|
||||
runner=self.runner,
|
||||
locker=self.locker,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.dbiter = HistoryPreservingDatabaseIterator(
|
||||
database_type='postgresql',
|
||||
runner=self.runner,
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.packundo = HistoryFreePackUndo(
|
||||
database_type='postgresql',
|
||||
@ -116,15 +117,15 @@ class PostgreSQLAdapter(object):
|
||||
runner=self.runner,
|
||||
locker=self.locker,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
self.dbiter = HistoryFreeDatabaseIterator(
|
||||
database_type='postgresql',
|
||||
runner=self.runner,
|
||||
)
|
||||
)
|
||||
|
||||
self.stats = PostgreSQLStats(
|
||||
connmanager=self.connmanager,
|
||||
)
|
||||
)
|
||||
|
||||
def new_instance(self):
|
||||
inst = type(self)(dsn=self._dsn, options=self.options)
|
||||
|
@ -48,7 +48,7 @@ class Psycopg2ConnectionManager(AbstractConnectionManager):
|
||||
return dsn
|
||||
|
||||
@metricmethod
|
||||
def open(self, isolation=None, replica_selector=None):
|
||||
def open(self, isolation=None, replica_selector=None, **kwargs):
|
||||
"""Open a database connection and return (conn, cursor)."""
|
||||
if isolation is None:
|
||||
isolation = self.isolation_read_committed
|
||||
|
@ -17,7 +17,7 @@ PostgreSQL IDBDriver implementations.
|
||||
"""
|
||||
|
||||
from __future__ import print_function, absolute_import
|