本文整理匯總了Python中s3ql.database.Connection.close方法的典型用法代碼示例。如果您正苦於以下問題:Python Connection.close方法的具體用法?Python Connection.close怎麽用?Python Connection.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類s3ql.database.Connection
的用法示例。
在下文中一共展示了Connection.close方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: db
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import close [as 別名]
def db():
dbfile = tempfile.NamedTemporaryFile()
db = Connection(dbfile.name)
create_tables(db)
init_tables(db)
try:
yield db
finally:
db.close()
dbfile.close()
示例2: run
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import close [as 別名]
def run(self):
log.debug('MetadataDownloadThread: start')
while not self.quit:
self.event.wait(self.interval)
self.event.clear()
if self.quit:
break
with self.bucket_pool() as bucket:
#XXX: call bucket.is_get_consistent() to verify data consistency later
seq_no = get_seq_no(bucket)
if seq_no > self.param['seq_no']:
log.info('Remote metadata is newer than local (%d vs %d), '
'download it', seq_no, self.param['seq_no'])
elif seq_no < self.param['seq_no']:
log.warn('Remote metadata is older than local (%s vs %d), '
'ignore the bucket until upload metadata thread done',
seq_no, self.param['seq_no'])
continue
else:
log.info('seqno equals local (%d vs %d), ignore then download',
seq_no, self.param['seq_no'])
continue
log.info("Downloading & uncompressing metadata...")
os.close(os.open(self.cachepath + '.db.tmp',
os.O_RDWR | os.O_CREAT | os.O_TRUNC,
stat.S_IRUSR | stat.S_IWUSR))
db_conn = Connection(self.cachepath + '.db.tmp', fast_mode=True)
with bucket.open_read("s3ql_metadata") as fh:
restore_metadata(fh, db_conn)
db_conn.close()
with llfuse.lock:
if self.quit:
break
os.rename(self.cachepath + '.db.tmp', self.cachepath + '.db')
self.db_mtime = os.stat(self.cachepath + '.db').st_mtime
self.param['seq_no'] = seq_no
log.debug('MetadataDownloadThread: end')
示例3: runTest
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import close [as 別名]
def runTest(self):
try:
subprocess.call(['rsync', '--version'],
stderr=subprocess.STDOUT,
stdout=open('/dev/null', 'wb'))
except OSError as exc:
if exc.errno == errno.ENOENT:
raise unittest.SkipTest('rsync not installed')
raise
ref_dir = tempfile.mkdtemp()
try:
populate_dir(ref_dir)
# Make file system and fake high inode number
self.mkfs()
db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
(2 ** 31 + 10, u'inodes'))
db.close()
# Copy source data
self.mount()
subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
self.mnt_dir + '/'])
self.umount()
# Check that inode watermark is high
db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
self.assertGreater(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31 + 10)
self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31 + 10)
db.close()
# Renumber inodes
self.fsck()
# Check if renumbering was done
db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
self.assertLess(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31)
self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31)
db.close()
# Compare
self.mount()
rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete',
'--exclude', '/lost+found',
ref_dir + '/', self.mnt_dir + '/'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = rsync.communicate()[0]
if out:
self.fail('Copy not equal to original, rsync says:\n' + out)
elif rsync.returncode != 0:
self.fail('rsync failed with ' + out)
self.umount()
finally:
shutil.rmtree(ref_dir)
示例4: test
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import close [as 別名]
def test(self):
skip_without_rsync()
ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
try:
populate_dir(ref_dir)
# Make file system and fake high inode number
self.mkfs()
db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
(2 ** 31 + 10, 'inodes'))
db.close()
# Copy source data
self.mount()
subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
self.mnt_dir + '/'])
self.umount()
# Check that inode watermark is high
db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
('inodes',)) > 2 ** 31 + 10
assert db.get_val('SELECT MAX(id) FROM inodes') > 2 ** 31 + 10
db.close()
# Renumber inodes
self.fsck()
# Check if renumbering was done
db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
('inodes',)) < 2 ** 31
assert db.get_val('SELECT MAX(id) FROM inodes') < 2 ** 31
db.close()
# Compare
self.mount()
try:
out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found',
ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True,
stderr=subprocess.STDOUT)
except CalledProcessError as exc:
pytest.fail('rsync failed with ' + exc.output)
if out:
pytest.fail('Copy not equal to original, rsync says:\n' + out)
self.umount()
finally:
shutil.rmtree(ref_dir)
示例5: DumpTests
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import close [as 別名]
class DumpTests(unittest.TestCase):
def setUp(self):
self.tmpfh1 = tempfile.NamedTemporaryFile()
self.tmpfh2 = tempfile.NamedTemporaryFile()
self.src = Connection(self.tmpfh1.name)
self.dst = Connection(self.tmpfh2.name)
self.fh = tempfile.TemporaryFile()
# Disable exclusive locking for all tests
self.src.execute('PRAGMA locking_mode = NORMAL')
self.dst.execute('PRAGMA locking_mode = NORMAL')
self.create_table(self.src)
self.create_table(self.dst)
def tearDown(self):
self.src.close()
self.dst.close()
self.tmpfh1.close()
self.tmpfh2.close()
self.fh.close()
def test_transactions(self):
self.fill_vals(self.src)
dumpspec = (('id', deltadump.INTEGER, 0),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
self.dst.execute('PRAGMA journal_mode = WAL')
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh, trx_rows=10)
self.compare_tables(self.src, self.dst)
def test_1_vals_1(self):
self.fill_vals(self.src)
dumpspec = (('id', deltadump.INTEGER, 0),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_1_vals_2(self):
self.fill_vals(self.src)
dumpspec = (('id', deltadump.INTEGER, 1),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_1_vals_3(self):
self.fill_vals(self.src)
dumpspec = (('id', deltadump.INTEGER, -1),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_2_buf_auto(self):
self.fill_vals(self.src)
self.fill_buf(self.src)
dumpspec = (('id', deltadump.INTEGER),
('buf', deltadump.BLOB))
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_2_buf_fixed(self):
BUFLEN = 32
self.fill_vals(self.src)
self.fill_buf(self.src, BUFLEN)
dumpspec = (('id', deltadump.INTEGER),
('buf', deltadump.BLOB, BUFLEN))
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_3_deltas_1(self):
self.fill_deltas(self.src)
dumpspec = (('id', deltadump.INTEGER, 0),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_3_deltas_2(self):
#.........這裏部分代碼省略.........
示例6: main
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import close [as 別名]
#.........這裏部分代碼省略.........
raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
elif param['revision'] > CURRENT_FS_REV:
raise QuietError('File system revision too new, please update your '
'S3QL installation.')
if param['seq_no'] < seq_no:
if bucket.is_get_consistent():
print(textwrap.fill(textwrap.dedent('''\
Up to date metadata is not available. Probably the file system has not
been properly unmounted and you should try to run fsck on the computer
where the file system has been mounted most recently.
''')))
else:
print(textwrap.fill(textwrap.dedent('''\
Up to date metadata is not available. Either the file system has not
been unmounted cleanly or the data has not yet propagated through the backend.
In the later case, waiting for a while should fix the problem, in
the former case you should try to run fsck on the computer where
the file system has been mounted most recently
''')))
print('Enter "continue" to use the outdated data anyway:',
'> ', sep='\n', end='')
if options.batch:
raise QuietError('(in batch mode, exiting)')
if sys.stdin.readline().strip() != 'continue':
raise QuietError()
param['seq_no'] = seq_no
param['needs_fsck'] = True
if (not param['needs_fsck']
and ((time.time() - time.timezone) - param['last_fsck'])
< 60 * 60 * 24 * 31): # last check more than 1 month ago
if options.force:
log.info('File system seems clean, checking anyway.')
else:
log.info('File system is marked as clean. Use --force to force checking.')
return
# If using local metadata, check consistency
if db:
log.info('Checking DB integrity...')
try:
# get_list may raise CorruptError itself
res = db.get_list('PRAGMA integrity_check(20)')
if res[0][0] != u'ok':
log.error('\n'.join(x[0] for x in res ))
raise apsw.CorruptError()
except apsw.CorruptError:
raise QuietError('Local metadata is corrupted. Remove or repair the following '
'files manually and re-run fsck:\n'
+ cachepath + '.db (corrupted)\n'
+ cachepath + '.param (intact)')
else:
log.info("Downloading & uncompressing metadata...")
os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
stat.S_IRUSR | stat.S_IWUSR))
db = Connection(cachepath + '.db.tmp', fast_mode=True)
with bucket.open_read("s3ql_metadata") as fh:
restore_metadata(fh, db)
db.close()
os.rename(cachepath + '.db.tmp', cachepath + '.db')
db = Connection(cachepath + '.db')
# Increase metadata sequence no
param['seq_no'] += 1
param['needs_fsck'] = True
bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
fsck = Fsck(cachepath + '-cache', bucket, param, db)
fsck.check()
if fsck.uncorrectable_errors:
raise QuietError("Uncorrectable errors found, aborting.")
if os.path.exists(cachepath + '-cache'):
os.rmdir(cachepath + '-cache')
log.info('Saving metadata...')
fh = tempfile.TemporaryFile()
dump_metadata(fh, db)
log.info("Compressing & uploading metadata..")
cycle_metadata(bucket)
fh.seek(0)
param['needs_fsck'] = False
param['last_fsck'] = time.time() - time.timezone
param['last-modified'] = time.time() - time.timezone
with bucket.open_write("s3ql_metadata", param) as dst:
fh.seek(0)
shutil.copyfileobj(fh, dst)
fh.close()
pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
db.execute('ANALYZE')
db.execute('VACUUM')
db.close()
示例7: get_metadata
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import close [as 別名]
def get_metadata(bucket, cachepath, readonly=False):
'''Retrieve metadata
Checks:
- Revision
- Unclean mounts
Locally cached metadata is used if up-to-date.
'''
seq_no = get_seq_no(bucket)
# Check for cached metadata
db = None
if os.path.exists(cachepath + '.params'):
param = pickle.load(open(cachepath + '.params', 'rb'))
if param['seq_no'] < seq_no:
log.info('Ignoring locally cached metadata (outdated).')
param = bucket.lookup('s3ql_metadata')
else:
log.info('Using cached metadata.')
db = Connection(cachepath + '.db')
else:
param = bucket.lookup('s3ql_metadata')
# Check for unclean shutdown
if param['seq_no'] < seq_no:
if bucket.is_get_consistent():
#raise QuietError(textwrap.fill(textwrap.dedent('''\
# It appears that the file system is still mounted somewhere else. If this is not
# the case, the file system may have not been unmounted cleanly and you should try
# to run fsck on the computer where the file system has been mounted most recently.
# ''')))
log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint but local bucket is consistent, just ignore it")
else:
#raise QuietError(textwrap.fill(textwrap.dedent('''\
# It appears that the file system is still mounted somewhere else. If this is not the
# case, the file system may have not been unmounted cleanly or the data from the
# most-recent mount may have not yet propagated through the backend. In the later case,
# waiting for a while should fix the problem, in the former case you should try to run
# fsck on the computer where the file system has been mounted most recently.
# ''')))
log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint and local bucket is inconsistent, could not ignore the error")
# Check revision
if param['revision'] < CURRENT_FS_REV:
raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
elif param['revision'] > CURRENT_FS_REV:
raise QuietError('File system revision too new, please update your '
'S3QL installation.')
# Check that the fs itself is clean
if not readonly and param['needs_fsck']:
raise QuietError("File system damaged or not unmounted cleanly, run fsck!")
if (time.time() - time.timezone) - param['last_fsck'] > 60 * 60 * 24 * 31:
log.warn('Last file system check was more than 1 month ago, '
'running fsck.s3ql is recommended.')
# Download metadata
if not db:
log.info("Downloading & uncompressing metadata...")
os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
stat.S_IRUSR | stat.S_IWUSR))
db = Connection(cachepath + '.db.tmp', fast_mode=True)
with bucket.open_read("s3ql_metadata") as fh:
restore_metadata(fh, db)
db.close()
os.rename(cachepath + '.db.tmp', cachepath + '.db')
db = Connection(cachepath + '.db')
# Increase metadata sequence no
param['seq_no'] += 1
param['needs_fsck'] = True
bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
param['needs_fsck'] = False
return (param, db)