本文整理汇总了Python中lmdb.Error方法的典型用法代码示例。如果您正苦于以下问题:Python lmdb.Error方法的具体用法?Python lmdb.Error怎么用?Python lmdb.Error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lmdb
的用法示例。
在下文中一共展示了lmdb.Error方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _open_lmbd_readonly
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def _open_lmbd_readonly(self):
# We set the mapsize here because LMBD will map 1TB of virtual memory if
# we don't, making it hard to figure out how much memory we're actually
# using.
map_size = None
try:
map_size = os.path.getsize(self.path)
except OSError as e:
raise exceptions.FileFormatError(str(e)) from e
try:
store = zarr.LMDBStore(
self.path, map_size=map_size, readonly=True, subdir=False, lock=False
)
except lmdb.InvalidError as e:
raise exceptions.FileFormatError(
"Unknown file format:{}".format(str(e))
) from e
except lmdb.Error as e:
raise exceptions.FileFormatError(str(e)) from e
return store
示例2: close
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def close(self) -> None:
"""Close all handles to the writer checkout and release the writer lock.
Failure to call this method after the writer checkout has been used
will result in a lock being placed on the repository which will not
allow any writes until it has been manually cleared.
"""
with suppress(lmdb.Error):
self._verify_alive()
if isinstance(self._stack, ExitStack):
self._stack.close()
if hasattr(self, '_columns'):
if hasattr(self._columns, '_destruct'):
self._columns._destruct()
with suppress(lmdb.Error):
heads.release_writer_lock(self._branchenv, self._writer_lock)
for attr in list(self.__dict__.keys()):
delattr(self, attr)
atexit.unregister(self.close)
return
示例3: __init__
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def __init__(self, save):
self.save = save
try:
self.db_experiences = shelve.Shelf(
LMDBDict("data/commit_experiences.lmdb", readonly=not save),
protocol=pickle.DEFAULT_PROTOCOL,
writeback=save,
)
except lmdb.Error as e:
if not save and "No such file or directory" in str(e):
self.db_experiences = {}
else:
raise
if not save:
self.mem_experiences = {}
示例4: _wrap_iterator
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def _wrap_iterator(iterator, cursor_chain, deserializer):
class _WrapperIter:
def __iter__(self):
return self
def __next__(self):
try:
return _read(
next(iterator),
cursor_chain,
deserializer)
except lmdb.Error:
raise StopIteration()
return _WrapperIter()
示例5: get_word_vector
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def get_word_vector(self, word):
"""
Get static embeddings (e.g. glove) for a given token
"""
if (self.name == 'wiki.fr') or (self.name == 'wiki.fr.bin'):
# the pre-trained embeddings are not cased
word = word.lower()
if self.env is None or self.extension == 'bin':
# db not available or embeddings in bin format, the embeddings should be available in memory (normally!)
return self.get_word_vector_in_memory(word)
try:
with self.env.begin() as txn:
vector = txn.get(word.encode(encoding='UTF-8'))
if vector:
word_vector = _deserialize_pickle(vector)
vector = None
else:
word_vector = np.zeros((self.static_embed_size,), dtype=np.float32)
# alternatively, initialize with random negative values
#word_vector = np.random.uniform(low=-0.5, high=0.0, size=(self.embed_size,))
# alternatively use fasttext OOV ngram possibilities (if ngram available)
except lmdb.Error:
# no idea why, but we need to close and reopen the environment to avoid
# mdb_txn_begin: MDB_BAD_RSLOT: Invalid reuse of reader locktable slot
# when opening new transaction !
self.env.close()
envFilePath = os.path.join(self.embedding_lmdb_path, self.name)
self.env = lmdb.open(envFilePath, readonly=True, max_readers=2048, max_spare_txns=2, lock=False)
return self.get_word_vector(word)
return word_vector
示例6: get_ELMo_lmdb_vector
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def get_ELMo_lmdb_vector(self, token_list, max_size_sentence):
"""
Try to get the ELMo embeddings for a sequence cached in LMDB
"""
if self.env_ELMo is None:
# db cache not available, we don't cache ELMo stuff
return None
try:
ELMo_vector = np.zeros((len(token_list), max_size_sentence-2, ELMo_embed_size), dtype='float32')
with self.env_ELMo.begin() as txn:
for i in range(0, len(token_list)):
txn = self.env_ELMo.begin()
# get a hash for the token_list
the_hash = list_digest(token_list[i])
vector = txn.get(the_hash.encode(encoding='UTF-8'))
if vector:
# adapt expected shape/padding
local_embeddings = _deserialize_pickle(vector)
if local_embeddings.shape[0] > max_size_sentence-2:
# squeeze the extra padding space
ELMo_vector[i] = local_embeddings[:max_size_sentence-2,]
elif local_embeddings.shape[0] == max_size_sentence-2:
# bingo~!
ELMo_vector[i] = local_embeddings
else:
# fill the missing space with padding
filler = np.zeros((max_size_sentence-(local_embeddings.shape[0]+2), ELMo_embed_size), dtype='float32')
ELMo_vector[i] = np.concatenate((local_embeddings, filler))
vector = None
else:
return None
except lmdb.Error:
# no idea why, but we need to close and reopen the environment to avoid
# mdb_txn_begin: MDB_BAD_RSLOT: Invalid reuse of reader locktable slot
# when opening new transaction !
self.env_ELMo.close()
self.env_ELMo = lmdb.open(self.embedding_ELMo_cache, readonly=True, max_readers=2048, max_spare_txns=2, lock=False)
return self.get_ELMo_lmdb_vector(token_list)
return ELMo_vector
示例7: test_does_not_exist
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def test_does_not_exist(self):
path_lmdb = os.path.join(self.dir_tmp, 'test_num_entries_does_not_exist_lmdb')
assert_false(os.path.exists(path_lmdb))
assert_raises(lmdb.Error, ds.DataSourceLMDB, path_lmdb)
示例8: exists
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def exists(self):
if not os.path.isdir(self.p):
raise lmdb.Error("LMDB not found (%s)")
示例9: test_num_entries_does_not_exist
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def test_num_entries_does_not_exist(self):
path_lmdb = os.path.join(self.dir_tmp, 'test_num_entries_does_not_exist_lmdb')
assert_false(os.path.exists(path_lmdb))
assert_raises(lmdb.Error, r.num_entries, path_lmdb)
示例10: start
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def start(self, ctx=None):
logger.debug("Starting data engine...")
# register with the context
if ctx:
ctx.bind('dataengine', self)
if not self.datapath:
self.datapath = os.path.join(environ.data_dir(), 'stores')
if not os.path.exists(self.datapath):
os.makedirs(self.datapath)
logger.debug("Data path: %s", self.datapath)
try:
self.database = lmdb.Environment(self.datapath,
map_size=2000000000,
max_dbs=1024)
with self.database.begin(write=False) as txn:
cur = txn.cursor()
for k, v in iter(cur):
logger.debug("Found existing store: %s", k)
_db = self.database.open_db(k, create=False)
self.stores[k] = Store(k, _db, self)
except lmdb.Error:
logger.exception("Failed to open database.", exc_info=True)
raise
logger.debug("Data engine started.")
示例11: create_store
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def create_store(self, name):
if isinstance(name, unicode):
name = name.encode('utf-8')
try:
_db = self.database.open_db(name, dupsort=False, create=True)
store = Store(name, _db, self)
self.stores[name] = store
return store
except lmdb.Error as ex:
logger.exception(ex)
raise DataError(ex.message)
示例12: remove_store
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def remove_store(self, name):
try:
store = self.stores.get(name)
if store is not None:
with self.database.begin(write=True) as txn:
txn.drop(store._db)
del self.stores[name]
except lmdb.Error as ex:
logger.exception("Failed to remove store.", ex)
raise DataError(ex.message)
示例13: get_BERT_lmdb_vector
# 需要导入模块: import lmdb [as 别名]
# 或者: from lmdb import Error [as 别名]
def get_BERT_lmdb_vector(self, sentence):
"""
Try to get the BERT extracted embeddings for a sequence cached in LMDB
"""
if self.env_BERT is None:
# db cache not available, we don't cache ELMo stuff
return None
try:
BERT_vector = np.zeros((BERT_sentence_size, BERT_embed_size), dtype='float32')
with self.env_BERT.begin() as txn:
txn = self.env_BERT.begin()
# get a hash for the token_list
the_hash = list_digest(sentence)
vector = txn.get(the_hash.encode(encoding='UTF-8'))
if vector:
# adapt expected shape/padding
BERT_vector = _deserialize_pickle(vector)
'''
if local_embeddings.shape[0] > max_size_sentence:
# squeeze the extra padding space
BERT_vector = local_embeddings[:max_size_sentence,]
elif local_embeddings.shape[0] == max_size_sentence:
# bingo~!
BERT_vector = local_embeddings
else:
# fill the missing space with padding
filler = np.zeros((max_size_sentence-(local_embeddings.shape[0]), BERT_embed_size), dtype='float32')
BERT_vector = np.concatenate((local_embeddings, filler))
'''
vector = None
else:
return None
except lmdb.Error:
# no idea why, but we need to close and reopen the environment to avoid
# mdb_txn_begin: MDB_BAD_RSLOT: Invalid reuse of reader locktable slot
# when opening new transaction !
self.env_BERT.close()
self.env_BERT = lmdb.open(self.embedding_BERT_cache, readonly=True, max_readers=2048, max_spare_txns=2, lock=False)
return self.get_BERT_lmdb_vector(sentence)
return BERT_vector