本文整理匯總了Python中s3ql.database.Connection.query方法的典型用法代碼示例。如果您正苦於以下問題:Python Connection.query方法的具體用法?Python Connection.query怎麽用?Python Connection.query使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類s3ql.database.Connection
的用法示例。
在下文中一共展示了Connection.query方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import query [as 別名]
def main(args=None):
if args is None:
args = sys.argv[1:]
options = parse_args(args)
setup_logging(options)
# Check for cached metadata
cachepath = get_backend_cachedir(options.storage_url, options.cachedir)
if not os.path.exists(cachepath + '.params'):
raise QuietError("No local metadata found.")
with open(cachepath + '.params', 'rb') as fh:
param = pickle.load(fh)
# Check revision
if param['revision'] < CURRENT_FS_REV:
raise QuietError('File system revision too old.')
elif param['revision'] > CURRENT_FS_REV:
raise QuietError('File system revision too new.')
if os.path.exists(DBNAME):
raise QuietError('%s exists, aborting.' % DBNAME)
log.info('Copying database...')
dst = tempfile.NamedTemporaryFile()
with open(cachepath + '.db', 'rb') as src:
shutil.copyfileobj(src, dst)
dst.flush()
db = Connection(dst.name)
log.info('Scrambling...')
md5 = lambda x: hashlib.md5(x).hexdigest()
for (id_, name) in db.query('SELECT id, name FROM names'):
db.execute('UPDATE names SET name=? WHERE id=?',
(md5(name), id_))
for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'):
db.execute('UPDATE symlink_targets SET target=? WHERE inode=?',
(md5(name), id_))
for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'):
db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?',
(md5(name), id_))
log.info('Saving...')
with open(DBNAME, 'wb+') as fh:
dump_metadata(db, fh)
示例2: DumpTests
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import query [as 別名]
#.........這裏部分代碼省略.........
dumpspec = (('id', deltadump.INTEGER, 0),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_3_deltas_2(self):
self.fill_deltas(self.src)
dumpspec = (('id', deltadump.INTEGER, 1),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_3_deltas_3(self):
self.fill_deltas(self.src)
dumpspec = (('id', deltadump.INTEGER, -1),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_4_time(self):
self.fill_vals(self.src)
t1 = 0.5 * time.time()
t2 = 2 * time.time()
for (id_,) in self.src.query('SELECT id FROM test'):
val = random.uniform(t1, t2)
self.src.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))
dumpspec = (('id', deltadump.INTEGER),
('buf', deltadump.TIME))
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def test_5_multi(self):
self.fill_vals(self.src)
dumpspec = (('id', deltadump.INTEGER, 0),)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
deltadump.dump_table(table='test', order='id', columns=dumpspec,
db=self.src, fh=self.fh)
self.fh.seek(0)
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.dst.execute('DELETE FROM test')
deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
fh=self.fh)
self.compare_tables(self.src, self.dst)
def compare_tables(self, db1, db2):
示例3: upgrade
# 需要導入模塊: from s3ql.database import Connection [as 別名]
# 或者: from s3ql.database.Connection import query [as 別名]
#.........這裏部分代碼省略.........
os.rename(os.path.join(path, name),
os.path.join(path, basename))
os.unlink(os.path.join(path, basename + '.dat'))
if isinstance(bucket, LegacyLocalBucket):
bucket = LocalBucket(bucket.name, None, None)
else:
bucket.bucket = LocalBucket(bucket.bucket.name, None, None)
# Download metadata
log.info("Downloading & uncompressing metadata...")
dbfile = tempfile.NamedTemporaryFile()
with tempfile.TemporaryFile() as tmp:
with bucket.open_read("s3ql_metadata") as fh:
shutil.copyfileobj(fh, tmp)
db = Connection(dbfile.name, fast_mode=True)
tmp.seek(0)
restore_legacy_metadata(tmp, db)
# Increase metadata sequence no
param['seq_no'] += 1
bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
for i in seq_nos:
if i < param['seq_no'] - 5:
del bucket['s3ql_seq_no_%d' % i ]
log.info("Uploading database..")
cycle_metadata(bucket)
param['last-modified'] = time.time() - time.timezone
with bucket.open_write("s3ql_metadata", param) as fh:
dump_metadata(fh, db)
else:
log.info("Downloading & uncompressing metadata...")
dbfile = tempfile.NamedTemporaryFile()
with tempfile.TemporaryFile() as tmp:
with bucket.open_read("s3ql_metadata") as fh:
shutil.copyfileobj(fh, tmp)
db = Connection(dbfile.name, fast_mode=True)
tmp.seek(0)
restore_metadata(tmp, db)
print(textwrap.dedent('''
The following process may take a long time, but can be interrupted
with Ctrl-C and resumed from this point by calling `s3qladm upgrade`
again. Please see Changes.txt for why this is necessary.
'''))
if 's3ql_hash_check_status' not in bucket:
log.info("Starting hash verification..")
start_obj = 0
else:
start_obj = int(bucket['s3ql_hash_check_status'])
log.info("Resuming hash verification with object %d..", start_obj)
try:
total = db.get_val('SELECT COUNT(id) FROM objects')
i = 0
for (obj_id, hash_) in db.query('SELECT obj_id, hash FROM blocks JOIN objects '
'ON obj_id == objects.id WHERE obj_id > ? '
'ORDER BY obj_id ASC', (start_obj,)):
if i % 100 == 0:
log.info(' ..checked %d/%d objects..', i, total)
sha = hashlib.sha256()
with bucket.open_read("s3ql_data_%d" % obj_id) as fh:
while True:
buf = fh.read(128*1024)
if not buf:
break
sha.update(buf)
if sha.digest() != hash_:
log.warn('Object %d corrupted! Deleting..', obj_id)
bucket.delete('s3ql_data_%d' % obj_id)
i += 1
except KeyboardInterrupt:
log.info("Storing verification status...")
bucket['s3ql_hash_check_status'] = '%d' % obj_id
raise QuietError('Aborting..')
log.info('Running fsck...')
fsck = Fsck(tempfile.mkdtemp(), bucket, param, db)
fsck.check()
if fsck.uncorrectable_errors:
raise QuietError("Uncorrectable errors found, aborting.")
param['revision'] = CURRENT_FS_REV
param['seq_no'] += 1
bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
log.info("Uploading database..")
cycle_metadata(bucket)
param['last-modified'] = time.time() - time.timezone
with bucket.open_write("s3ql_metadata", param) as fh:
dump_metadata(fh, db)