Wrap lines at 100. Fixes #166.

This commit is contained in:
Jason Madden 2017-02-09 09:59:38 -06:00
parent 3e4009f2e6
commit b1ea762bf4
No known key found for this signature in database
GPG Key ID: 349F84431A08B99E
16 changed files with 83 additions and 42 deletions

View File

@ -49,7 +49,7 @@ disable=wrong-import-position,
[FORMAT]
# duplicated from setup.cfg
max-line-length=160
max-line-length=100
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.

View File

@ -32,7 +32,8 @@ def _select_driver(options, driver_options):
raise ImportError("Unable to use the driver '%s' for the database '%s'."
" Available drivers are: %s."
" Verify the driver name and that the right packages are installed."
% (name, driver_options.database_type, list(driver_options.driver_map.keys())))
% (name, driver_options.database_type,
list(driver_options.driver_map.keys())))
_base_disconnected_exceptions = (ReplicaClosedException,)

View File

@ -148,7 +148,8 @@ else:
# See https://github.com/zodb/relstorage/issues/155
__name__ = "MySQL Connector/Python"
disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(mysql.connector)
disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(
mysql.connector)
use_replica_exceptions = (mysql.connector.OperationalError,)
Binary = staticmethod(mysql.connector.Binary)
@ -348,8 +349,9 @@ else:
# (if the blob chunk size was configured too high), or it can
# happen for aggregate queries (the dbiter.iter_objects query is
# particularly common cause of this.) Retrying won't help.
raise TransactionTooLargeError('umysql got results bigger than 16MB.'
" Reduce the server's max_allowed_packet setting.")
raise TransactionTooLargeError(
"umysql got results bigger than 16MB."
" Reduce the server's max_allowed_packet setting.")
raise
except Exception: # pragma: no cover
self.__debug_lock(sql, True)

View File

@ -105,7 +105,8 @@ class OracleAdapter(object):
options=options,
runner=self.runner,
Binary=driver.Binary,
batcher_factory=lambda cursor, row_limit: OracleRowBatcher(cursor, inputsizes, row_limit),
batcher_factory=lambda cursor, row_limit: OracleRowBatcher(
cursor, inputsizes, row_limit),
)
self.mover.inputsizes = inputsizes
self.connmanager.add_on_store_opened(self.mover.on_store_opened)

View File

@ -40,9 +40,12 @@ class OracleObjectMover(AbstractObjectMover):
# This is assigned to by the adapter.
inputsizes = None
_move_from_temp_hp_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hp_insert_query)
_move_from_temp_hf_insert_query = format_to_named(AbstractObjectMover._move_from_temp_hf_insert_query)
_move_from_temp_copy_blob_query = format_to_named(AbstractObjectMover._move_from_temp_copy_blob_query)
_move_from_temp_hp_insert_query = format_to_named(
AbstractObjectMover._move_from_temp_hp_insert_query)
_move_from_temp_hf_insert_query = format_to_named(
AbstractObjectMover._move_from_temp_hf_insert_query)
_move_from_temp_copy_blob_query = format_to_named(
AbstractObjectMover._move_from_temp_copy_blob_query)
_load_current_queries = _to_oracle_ordered(AbstractObjectMover._load_current_queries)

View File

@ -82,7 +82,8 @@ else: # pragma: no cover
@implementer(IDBDriver)
class Psycopg2cffiDriver(object):
__name__ = 'psycopg2cffi'
disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(psycopg2cffi)
disconnected_exceptions, close_exceptions, lock_exceptions = _standard_exceptions(
psycopg2cffi)
use_replica_exceptions = (psycopg2cffi.OperationalError,)
Binary = psycopg2cffi.Binary
connect = _create_connection(psycopg2cffi)

View File

@ -56,7 +56,8 @@ class PostgreSQLObjectMover(AbstractObjectMover):
_detect_conflict_query = 'EXECUTE detect_conflicts'
on_load_opened_statement_names = ('_prepare_load_current_query',)
on_store_opened_statement_names = on_load_opened_statement_names + ('_prepare_detect_conflict_query',)
on_store_opened_statement_names = on_load_opened_statement_names + (
'_prepare_detect_conflict_query',)
# Sadly we can't PREPARE this statement; apparently it holds a
# lock on OBJECT_STATE that interferes with taking the commit lock.

View File

@ -338,7 +338,10 @@ class CacheRing(object):
# Treat it as a simple hit
return self.on_hit(entry)
rejected_items = _lru_update_mru(self.cffi_cache, self.ring_home, entry.cffi_ring_node, old_size, new_size)
rejected_items = _lru_update_mru(self.cffi_cache,
self.ring_home,
entry.cffi_ring_node,
old_size, new_size)
if not rejected_items.r_next:
# Nothing rejected.

View File

@ -217,7 +217,8 @@ def __set_mod_time(new_path, persistent_cache):
if mod_time and mod_time > 0:
# Older PyPy on Linux raises an OSError/Errno22 if the mod_time is less than 0
# and is a float (https://bitbucket.org/pypy/pypy/issues/2408/cpython-difference-osutime-path-11-11)
# and is a float
# (https://bitbucket.org/pypy/pypy/issues/2408/cpython-difference-osutime-path-11-11)
logger.debug("Setting date of %r to cache time %s (current time %s)",
new_path, mod_time, time.time())
os.utime(new_path, (mod_time, mod_time))

View File

@ -195,7 +195,10 @@ class StorageCache(object):
This is usually memcache connections if they're in use.
"""
clients = self.clients_local_first if self.clients_local_first is not _UsedAfterRelease else ()
clients = ()
if self.clients_local_first is not _UsedAfterRelease:
clients = self.clients_local_first
for client in clients:
client.disconnect_all()

View File

@ -261,7 +261,8 @@ class StorageTraceSimulator(object):
records = []
with self._open_file(filename) as f:
for line in f:
line = line.decode('ascii') if isinstance(line, bytes) and str is not bytes else line
if isinstance(line, bytes) and str is not bytes:
line = line.decode('ascii')
fields = [x.strip() for x in line.split(",")]
fields[0] = int(fields[0]) # asu
try:
@ -284,10 +285,12 @@ class StorageTraceSimulator(object):
def _report_one(self, stats, f, cache_local_mb, begin_time, end_time):
stats['time'] = end_time - begin_time
print("{:15s} {:>5s} {:>7s} {:>7s} {:>5s}".format("File", "Limit", "Size", "Time", "Hits"))
print("{:15s} {:5d} {:7.2f} {:7.2f} {:.3f}".format(os.path.basename(f), cache_local_mb,
stats['bytes'] / 1024 / 1024, stats['time'],
stats['ratio']))
print("{:15s} {:>5s} {:>7s} {:>7s} {:>5s}".format(
"File", "Limit", "Size", "Time", "Hits"))
print("{:15s} {:5d} {:7.2f} {:7.2f} {:.3f}".format(
os.path.basename(f), cache_local_mb,
stats['bytes'] / 1024 / 1024, stats['time'],
stats['ratio']))
def _simulate_local(self, records, cache_local_mb, f):
from relstorage.cache.local_client import LocalClient
@ -415,7 +418,8 @@ class StorageTraceSimulator(object):
# Poll after a certain number of operations, or of we know we would get a
# conflict.
if current_tid_int - cache.bm_current_tid >= TRANSACTION_SIZE or oid_int in cache.bm_changes:
if (current_tid_int - cache.bm_current_tid >= TRANSACTION_SIZE
or oid_int in cache.bm_changes):
cache.after_poll(None, cache.bm_current_tid, current_tid_int,
cache.bm_changes.items())
cache.bm_current_tid = current_tid_int
@ -467,9 +471,12 @@ class StorageTraceSimulator(object):
stats = meth(records, size, f)
all_stats.append((f, size, stats))
print("{:15s} {:>5s} {:>7s} {:>7s} {:>5s}".format("File", "Limit", "Size", "Time", "Hits"))
print("{:15s} {:>5s} {:>7s} {:>7s} {:>5s}".format(
"File", "Limit", "Size", "Time", "Hits"))
for f, size, stats in all_stats:
print("{:15s} {:5d} {:7.2f} {:7.2f} {:.3f}".format(os.path.basename(f), size, stats['bytes'] / 1024 / 1024, stats['time'], stats['ratio']))
print("{:15s} {:5d} {:7.2f} {:7.2f} {:.3f}".format(
os.path.basename(f), size, stats['bytes'] / 1024 / 1024,
stats['time'], stats['ratio']))
else:
size = int(sys.argv[3])

View File

@ -79,7 +79,8 @@ if __name__ == '__main__':
sys.argv.remove('--only-oracle')
logging.basicConfig(level=logging.CRITICAL,
format='%(asctime)s %(levelname)-5.5s [%(name)s][%(thread)d:%(process)d][%(threadName)s] %(message)s')
format='%(asctime)s %(levelname)-5.5s'
' [%(name)s][%(thread)d:%(process)d][%(threadName)s] %(message)s')
# We get constant errors about failing to lock a blob file,
# which really bloats the CI logs, so turn those off.
logging.getLogger('zc.lockfile').setLevel(logging.CRITICAL)

View File

@ -66,13 +66,14 @@ del _f
def random_file(size, fd):
"""Create a random data of at least the given size, writing to fd.
"""
Create a random data of at least the given size, writing to fd.
See http://jessenoller.com/2008/05/30/making-re-creatable-random-data-files-really-fast-in-python/
See
http://jessenoller.com/2008/05/30/making-re-creatable-random-data-files-really-fast-in-python/
for the technique used.
Returns the md5 sum of the file contents for easy comparison.
"""
def fdata():
seed = "1092384956781341341234656953214543219"

View File

@ -48,14 +48,20 @@ class AbstractZODBConvertBase(unittest.TestCase):
for i in self._to_close:
i.close()
self._to_close = []
# XXX: On PyPy with psycopg2cffi, running these two tests will result
# in a hang: HPPostgreSQLDestZODBConvertTests.test_clear_empty_dest HPPostgreSQLDestZODBConvertTests.test_clear_full_dest
# test_clear_full_dest will hang in the zodbconvert call to zap_all(), in the C code of the
# PG driver. Presumably some connection with some lock got left open and was preventing
# the TRUNCATE statements from taking out a lock. The same tests do not hang with psycopg2cffi on
# C Python. Manually running the gc (twice!) here fixes the issue. Note that this only started when
# we wrapped the destination storage in ZlibStorage (which copies methods into its own dict) so there's
# something weird going on with the GC. Seen in PyPy 2.5.0 and 5.3.
# XXX: On PyPy with psycopg2cffi, running these two tests will
# result in a hang:
# HPPostgreSQLDestZODBConvertTests.test_clear_empty_dest
# HPPostgreSQLDestZODBConvertTests.test_clear_full_dest
# test_clear_full_dest will hang in the zodbconvert call to
# zap_all(), in the C code of the PG driver. Presumably some
# connection with some lock got left open and was preventing
# the TRUNCATE statements from taking out a lock. The same
# tests do not hang with psycopg2cffi on C Python. Manually
# running the gc (twice!) here fixes the issue. Note that this
# only started when we wrapped the destination storage in
# ZlibStorage (which copies methods into its own dict) so
# there's something weird going on with the GC. Seen in PyPy
# 2.5.0 and 5.3.
gc.collect()
gc.collect()
@ -229,10 +235,14 @@ class ZlibWrappedZODBConvertTests(FSZODBConvertTests):
return "%import zc.zlibstorage\n"
def _cfg_source(self):
return "\n<zlibstorage source>" + super(ZlibWrappedZODBConvertTests, self)._cfg_source() + "</zlibstorage>"
return ("\n<zlibstorage source>"
+ super(ZlibWrappedZODBConvertTests, self)._cfg_source()
+ "</zlibstorage>")
def _cfg_dest(self):
return "\n<zlibstorage destination>" + super(ZlibWrappedZODBConvertTests, self)._cfg_dest() + "</zlibstorage>"
return ("\n<zlibstorage destination>"
+ super(ZlibWrappedZODBConvertTests, self)._cfg_dest()
+ "</zlibstorage>")
def _create_src_storage(self):
return ZlibStorage(super(ZlibWrappedZODBConvertTests, self)._create_src_storage())

View File

@ -116,10 +116,14 @@ class _PgSQLCfgMixin(object):
</postgresql>
"""
class HPPostgreSQLDestZODBConvertTests(UsePostgreSQLAdapter, _PgSQLCfgMixin, AbstractRSDestZodbConvertTests):
class HPPostgreSQLDestZODBConvertTests(UsePostgreSQLAdapter,
_PgSQLCfgMixin,
AbstractRSDestZodbConvertTests):
pass
class HPPostgreSQLSrcZODBConvertTests(UsePostgreSQLAdapter, _PgSQLCfgMixin, AbstractRSSrcZodbConvertTests):
class HPPostgreSQLSrcZODBConvertTests(UsePostgreSQLAdapter,
_PgSQLCfgMixin,
AbstractRSSrcZodbConvertTests):
pass
class HPPostgreSQLTests(UsePostgreSQLAdapter, HistoryPreservingRelStorageTests,

View File

@ -80,7 +80,8 @@ def main(argv=None):
parser.add_argument(
"--clear", dest="clear", action="store_true",
default=False,
help="Clear the contents of the destination storage before copying. Only works if the destination is a RelStorage."
help="Clear the contents of the destination storage before copying."
" Only works if the destination is a RelStorage."
" WARNING: use this only if you are certain the destination has no useful data.")
parser.add_argument(
"--incremental", dest="incremental", action="store_true",
@ -110,9 +111,10 @@ def main(argv=None):
log.info("Storages opened successfully.")
if options.incremental:
assert hasattr(destination, 'lastTransaction'), ("Error: no API is known for determining the last committed "
"transaction of the destination storage. Aborting "
"conversion.")
assert hasattr(destination, 'lastTransaction'), (
"Error: no API is known for determining the last committed "
"transaction of the destination storage. Aborting "
"conversion.")
if not storage_has_data(destination):
log.warning("Destination empty, start conversion from the beginning.")