本文整理汇总了Python中gdbm.open方法的典型用法代码示例。如果您正苦于以下问题:Python gdbm.open方法的具体用法?Python gdbm.open怎么用?Python gdbm.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gdbm
的用法示例。
在下文中一共展示了gdbm.open方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create
# 需要导入模块: import gdbm [as 别名]
# 或者: from gdbm import open [as 别名]
def create(self, path, flags, mode): # {{{3
try:
self.__log_call('create', 'create(%r, %o, %o)', path, flags, mode)
if self.read_only: return -errno.EROFS
try:
# If the file already exists, just open it.
status = self.open(path, flags, nested=True)
except OSError, e:
if e.errno != errno.ENOENT: raise
# Otherwise create a new file and open that.
inode, parent_ino = self.__insert(path, mode, 0)
status = self.open(path, flags, nested=True, inode=inode)
self.__commit_changes()
self.__gc_hook()
return status
示例2: open
# 需要导入模块: import gdbm [as 别名]
# 或者: from gdbm import open [as 别名]
def open(self, path, flags, nested=None, inode=None): # {{{3
try:
self.__log_call('open', 'open(%r, %o)', path, flags)
# Make sure the file exists?
inode = inode or self.__path2keys(path)[1]
# Make sure the file is readable and/or writable.
access_flags = 0
if flags & (os.O_RDONLY | os.O_RDWR): access_flags |= os.R_OK
if flags & (os.O_WRONLY | os.O_RDWR): access_flags |= os.W_OK
if not self.__access(inode, access_flags):
return -errno.EACCES
return 0
except Exception, e:
if nested: raise
return self.__except_to_status('open', e, errno.ENOENT)
示例3: __init_logging
# 需要导入模块: import gdbm [as 别名]
# 或者: from gdbm import open [as 别名]
def __init_logging(self, options): # {{{3
# Configure logging of messages to a file.
if options.log_file:
handler = logging.StreamHandler(open(options.log_file, 'w'))
self.logger.addHandler(handler)
# Convert verbosity argument to logging level?
if options.verbosity > 0:
if options.verbosity <= 1:
self.logger.setLevel(logging.INFO)
elif options.verbosity <= 2:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.NOTSET)
示例4: __open_datastore
# 需要导入模块: import gdbm [as 别名]
# 或者: from gdbm import open [as 别名]
def __open_datastore(self, use_gdbm):
# gdbm is preferred over other dbm implementations because it supports fast
# vs. synchronous modes, however any other dedicated key/value store should
# work just fine (albeit not as fast). Note though that existing key/value
# stores are always accessed through the library that created them.
mode = self.read_only and 'r' or 'c'
if use_gdbm:
try:
import gdbm
mode += self.synchronous and 's' or 'f'
return gdbm.open(self.datastore_file, mode)
except ImportError:
pass
import anydbm
return anydbm.open(self.datastore_file, mode)
示例5: __write_blocks
# 需要导入模块: import gdbm [as 别名]
# 或者: from gdbm import open [as 别名]
def __write_blocks(self, inode, buf, apparent_size): # {{{3
start_time = time.time()
# Delete existing index entries for file.
self.conn.execute('DELETE FROM "index" WHERE inode = ?', (inode,))
# Store any changed blocks and rebuild the file index.
storage_size = len(buf)
for block_nr in xrange(int(math.ceil(storage_size / float(self.block_size)))):
buf.seek(self.block_size * block_nr, os.SEEK_SET)
new_block = buf.read(self.block_size)
digest = self.__hash(new_block)
encoded_digest = digest.encode('hex') # sqlite3.Binary(digest)
row = self.conn.execute('SELECT id FROM hashes WHERE hash = ?', (encoded_digest,)).fetchone()
if row:
hash_id = row[0]
existing_block = self.decompress(self.__get_block_from_telegram(digest.encode('hex')))
# Check for hash collisions.
if new_block != existing_block:
# Found a hash collision: dump debugging info and exit.
dumpfile_collision = '/tmp/dedupfs-collision-%i' % time.time()
handle = open(dumpfile_collision, 'w')
handle.write('Content of existing block is %r.\n' % existing_block)
handle.write('Content of new block is %r.\n' % new_block)
handle.close()
self.logger.critical(
"Found a hash collision on block number %i of inode %i!\n" + \
"The existing block is %i bytes and hashes to %s.\n" + \
"The new block is %i bytes and hashes to %s.\n" + \
"Saved existing and conflicting data blocks to %r.",
block_nr, inode, len(existing_block), digest,
len(new_block), digest, dumpfile_collision)
os._exit(1)
self.conn.execute('INSERT INTO "index" (inode, hash_id, block_nr) VALUES (?, ?, ?)',
(inode, hash_id, block_nr))
else:
FIFO_PIPE = str('upipe_' + digest.encode('hex'))
try:
os.mkfifo(FIFO_PIPE)
except OSError as oe:
if oe.errno != errno.EEXIST:
raise
process = Popen(["python3.6", "download_service.py", "upload", digest.encode('hex')], bufsize=-1)
with open(FIFO_PIPE, 'wb') as pipe:
os.unlink(FIFO_PIPE)
pipe.write(self.compress(new_block))
# if callable(getattr(pipe, 'flush', None)):
# pipe.flush()
process.wait()
# self.blocks[digest] = self.compress(new_block)
self.conn.execute('INSERT INTO hashes (id, hash) VALUES (NULL, ?)', (encoded_digest,))
self.conn.execute('INSERT INTO "index" (inode, hash_id, block_nr) VALUES (?, last_insert_rowid(), ?)',
(inode, block_nr))
# Check that the data was properly stored in the database?
self.__verify_write(new_block, digest, block_nr, inode)
block_nr += 1
# Update file size and last modified time.
self.conn.execute('UPDATE inodes SET size = ?, mtime = ? WHERE inode = ?',
(apparent_size, self.__newctime(), inode))
self.time_spent_writing_blocks += time.time() - start_time