本文整理汇总了Python中s3ql.database.Connection类的典型用法代码示例。如果您正苦于以下问题:Python Connection类的具体用法?Python Connection怎么用?Python Connection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Connection类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: db
def db():
dbfile = tempfile.NamedTemporaryFile()
db = Connection(dbfile.name)
create_tables(db)
init_tables(db)
try:
yield db
finally:
db.close()
dbfile.close()
示例2: setUp
def setUp(self):
self.tmpfh1 = tempfile.NamedTemporaryFile()
self.tmpfh2 = tempfile.NamedTemporaryFile()
self.src = Connection(self.tmpfh1.name)
self.dst = Connection(self.tmpfh2.name)
self.fh = tempfile.TemporaryFile()
# Disable exclusive locking for all tests
self.src.execute('PRAGMA locking_mode = NORMAL')
self.dst.execute('PRAGMA locking_mode = NORMAL')
self.create_table(self.src)
self.create_table(self.dst)
示例3: main
def main(args=None):
if args is None:
args = sys.argv[1:]
options = parse_args(args)
setup_logging(options)
# Check for cached metadata
cachepath = get_backend_cachedir(options.storage_url, options.cachedir)
if not os.path.exists(cachepath + '.params'):
raise QuietError("No local metadata found.")
with open(cachepath + '.params', 'rb') as fh:
param = pickle.load(fh)
# Check revision
if param['revision'] < CURRENT_FS_REV:
raise QuietError('File system revision too old.')
elif param['revision'] > CURRENT_FS_REV:
raise QuietError('File system revision too new.')
if os.path.exists(DBNAME):
raise QuietError('%s exists, aborting.' % DBNAME)
log.info('Copying database...')
dst = tempfile.NamedTemporaryFile()
with open(cachepath + '.db', 'rb') as src:
shutil.copyfileobj(src, dst)
dst.flush()
db = Connection(dst.name)
log.info('Scrambling...')
md5 = lambda x: hashlib.md5(x).hexdigest()
for (id_, name) in db.query('SELECT id, name FROM names'):
db.execute('UPDATE names SET name=? WHERE id=?',
(md5(name), id_))
for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'):
db.execute('UPDATE symlink_targets SET target=? WHERE inode=?',
(md5(name), id_))
for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'):
db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?',
(md5(name), id_))
log.info('Saving...')
with open(DBNAME, 'wb+') as fh:
dump_metadata(db, fh)
示例4: setUp
def setUp(self):
self.backend_dir = tempfile.mkdtemp()
plain_backend = local.Backend('local://' + self.backend_dir, None, None)
self.backend_pool = BackendPool(lambda: BetterBackend(b'schwubl', 'lzma', plain_backend))
self.backend = self.backend_pool.pop_conn()
self.cachedir = tempfile.mkdtemp()
self.max_obj_size = 1024
# Destructors are not guaranteed to run, and we can't unlink
# the file immediately because apsw refers to it by name.
# Therefore, we unlink the file manually in tearDown()
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
# Tested methods assume that they are called from
# file system request handler
llfuse.lock.acquire()
self.block_cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
self.max_obj_size * 5)
self.server = fs.Operations(self.block_cache, self.db, self.max_obj_size,
InodeCache(self.db, 0))
self.server.init()
# Keep track of unused filenames
self.name_cnt = 0
示例5: setUp
def setUp(self):
self.backend_dir = tempfile.mkdtemp()
self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
None, None))
self.cachedir = tempfile.mkdtemp()
self.max_obj_size = 1024
# Destructors are not guaranteed to run, and we can't unlink
# the file immediately because apsw refers to it by name.
# Therefore, we unlink the file manually in tearDown()
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
# Create an inode we can work with
self.inode = 42
self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
"VALUES (?,?,?,?,?,?,?,?,?)",
(self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))
self.cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
self.max_obj_size * 100)
# Tested methods assume that they are called from
# file system request handler
llfuse.lock.acquire()
示例6: run
def run(self):
log.debug('MetadataDownloadThread: start')
while not self.quit:
self.event.wait(self.interval)
self.event.clear()
if self.quit:
break
with self.bucket_pool() as bucket:
#XXX: call bucket.is_get_consistent() to verify data consistency later
seq_no = get_seq_no(bucket)
if seq_no > self.param['seq_no']:
log.info('Remote metadata is newer than local (%d vs %d), '
'download it', seq_no, self.param['seq_no'])
elif seq_no < self.param['seq_no']:
log.warn('Remote metadata is older than local (%s vs %d), '
'ignore the bucket until upload metadata thread done',
seq_no, self.param['seq_no'])
continue
else:
log.info('seqno equals local (%d vs %d), ignore then download',
seq_no, self.param['seq_no'])
continue
log.info("Downloading & uncompressing metadata...")
os.close(os.open(self.cachepath + '.db.tmp',
os.O_RDWR | os.O_CREAT | os.O_TRUNC,
stat.S_IRUSR | stat.S_IWUSR))
db_conn = Connection(self.cachepath + '.db.tmp', fast_mode=True)
with bucket.open_read("s3ql_metadata") as fh:
restore_metadata(fh, db_conn)
db_conn.close()
with llfuse.lock:
if self.quit:
break
os.rename(self.cachepath + '.db.tmp', self.cachepath + '.db')
self.db_mtime = os.stat(self.cachepath + '.db').st_mtime
self.param['seq_no'] = seq_no
log.debug('MetadataDownloadThread: end')
示例7: setUp
def setUp(self):
# Destructors are not guaranteed to run, and we can't unlink
# the file immediately because apsw refers to it by name.
# Therefore, we unlink the file manually in tearDown()
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
self.cache = inode_cache.InodeCache(self.db, 0)
示例8: setUp
def setUp(self):
self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
self.backend = local.Backend('local://' + self.backend_dir, None, None)
self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
self.max_obj_size = 1024
self.dbfile = tempfile.NamedTemporaryFile()
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
self.fsck = Fsck(self.cachedir, self.backend,
{ 'max_obj_size': self.max_obj_size }, self.db)
self.fsck.expect_errors = True
示例9: setUp
def setUp(self):
self.bucket_dir = tempfile.mkdtemp()
self.bucket = local.Bucket(self.bucket_dir, None, None)
self.cachedir = tempfile.mkdtemp() + "/"
self.blocksize = 1024
self.dbfile = tempfile.NamedTemporaryFile()
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
self.fsck = Fsck(self.cachedir, self.bucket,
{ 'blocksize': self.blocksize }, self.db)
self.fsck.expect_errors = True
示例10: setUp
def setUp(self):
self.backend_dir = tempfile.mkdtemp()
self.backend = local.Backend('local://' + self.backend_dir, None, None)
self.cachedir = tempfile.mkdtemp()
self.max_obj_size = 1024
# Destructors are not guaranteed to run, and we can't unlink
# the file immediately because apsw refers to it by name.
# Therefore, we unlink the file manually in tearDown()
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
self.fsck = Fsck(self.cachedir, self.backend,
{ 'max_obj_size': self.max_obj_size }, self.db)
self.fsck.expect_errors = True
示例11: setUp
def setUp(self):
self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
None, None))
self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
self.max_obj_size = 1024
# Destructors are not guaranteed to run, and we can't unlink
# the file immediately because apsw refers to it by name.
# Therefore, we unlink the file manually in tearDown()
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
# Create an inode we can work with
self.inode = 42
now_ns = time_ns()
self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
"VALUES (?,?,?,?,?,?,?,?,?)",
(self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))
cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
self.max_obj_size * 100)
self.cache = cache
# Monkeypatch around the need for removal and upload threads
cache.to_remove = DummyQueue(cache)
class DummyDistributor:
def put(self, arg, timeout=None):
cache._do_upload(*arg)
return True
cache.to_upload = DummyDistributor()
# Tested methods assume that they are called from
# file system request handler
llfuse.lock.acquire()
示例12: setUp
def setUp(self):
self.bucket_dir = tempfile.mkdtemp()
self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
self.cachedir = tempfile.mkdtemp() + "/"
self.blocksize = 1024
self.dbfile = tempfile.NamedTemporaryFile()
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
# Create an inode we can work with
self.inode = 42
self.db.execute(
"INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
(
self.inode,
stat.S_IFREG
| stat.S_IRUSR
| stat.S_IWUSR
| stat.S_IXUSR
| stat.S_IRGRP
| stat.S_IXGRP
| stat.S_IROTH
| stat.S_IXOTH,
os.getuid(),
os.getgid(),
time.time(),
time.time(),
time.time(),
1,
32,
),
)
self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)
# Tested methods assume that they are called from
# file system request handler
llfuse.lock.acquire()
示例13: setUp
def setUp(self):
self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
plain_backend = local.Backend('local://' + self.backend_dir, None, None)
self.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
plain_backend))
self.backend = self.backend_pool.pop_conn()
self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
self.max_obj_size = 1024
# Destructors are not guaranteed to run, and we can't unlink
# the file immediately because apsw refers to it by name.
# Therefore, we unlink the file manually in tearDown()
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
# Tested methods assume that they are called from
# file system request handler
llfuse.lock.acquire()
cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
self.max_obj_size * 5)
self.block_cache = cache
self.server = fs.Operations(cache, self.db, self.max_obj_size,
InodeCache(self.db, 0))
self.server.init()
# Monkeypatch around the need for removal and upload threads
cache.to_remove = DummyQueue(cache)
class DummyDistributor:
def put(self, arg, timeout=None):
cache._do_upload(*arg)
return True
cache.to_upload = DummyDistributor()
# Keep track of unused filenames
self.name_cnt = 0
示例14: setUp
def setUp(self):
self.bucket_dir = tempfile.mkdtemp()
self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
self.bucket = self.bucket_pool.pop_conn()
self.cachedir = tempfile.mkdtemp() + "/"
self.blocksize = 1024
self.dbfile = tempfile.NamedTemporaryFile()
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
# Tested methods assume that they are called from
# file system request handler
llfuse.lock.acquire()
self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir,
self.blocksize * 5)
self.server = fs.Operations(self.block_cache, self.db, self.blocksize)
self.server.init()
# Keep track of unused filenames
self.name_cnt = 0
示例15: cache_tests
class cache_tests(TestCase):
def setUp(self):
self.bucket_dir = tempfile.mkdtemp()
self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
self.cachedir = tempfile.mkdtemp() + "/"
self.blocksize = 1024
self.dbfile = tempfile.NamedTemporaryFile()
self.db = Connection(self.dbfile.name)
create_tables(self.db)
init_tables(self.db)
# Create an inode we can work with
self.inode = 42
self.db.execute(
"INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
(
self.inode,
stat.S_IFREG
| stat.S_IRUSR
| stat.S_IWUSR
| stat.S_IXUSR
| stat.S_IRGRP
| stat.S_IXGRP
| stat.S_IROTH
| stat.S_IXOTH,
os.getuid(),
os.getgid(),
time.time(),
time.time(),
time.time(),
1,
32,
),
)
self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)
# Tested methods assume that they are called from
# file system request handler
llfuse.lock.acquire()
def tearDown(self):
self.cache.bucket_pool = self.bucket_pool
self.cache.destroy()
if os.path.exists(self.cachedir):
shutil.rmtree(self.cachedir)
shutil.rmtree(self.bucket_dir)
llfuse.lock.release()
@staticmethod
def random_data(len_):
with open("/dev/urandom", "rb") as fh:
return fh.read(len_)
def test_get(self):
inode = self.inode
blockno = 11
data = self.random_data(int(0.5 * self.blocksize))
# Case 1: Object does not exist yet
with self.cache.get(inode, blockno) as fh:
fh.seek(0)
fh.write(data)
# Case 2: Object is in cache
with self.cache.get(inode, blockno) as fh:
fh.seek(0)
self.assertEqual(data, fh.read(len(data)))
# Case 3: Object needs to be downloaded
self.cache.clear()
with self.cache.get(inode, blockno) as fh:
fh.seek(0)
self.assertEqual(data, fh.read(len(data)))
def test_expire(self):
inode = self.inode
# Define the 4 most recently accessed ones
most_recent = [7, 11, 10, 8]
for i in most_recent:
time.sleep(0.2)
with self.cache.get(inode, i) as fh:
fh.write("%d" % i)
# And some others
for i in range(20):
if i in most_recent:
continue
with self.cache.get(inode, i) as fh:
fh.write("%d" % i)
# Flush the 2 most recently accessed ones
commit(self.cache, inode, most_recent[-2])
commit(self.cache, inode, most_recent[-3])
#.........这里部分代码省略.........