本文整理汇总了Python中sqlitedict.SqliteDict.commit方法的典型用法代码示例。如果您正苦于以下问题:Python SqliteDict.commit方法的具体用法?Python SqliteDict.commit怎么用?Python SqliteDict.commit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sqlitedict.SqliteDict
的用法示例。
在下文中一共展示了SqliteDict.commit方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _persist_v0
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def _persist_v0(file_path, zg):
print 'Creating db...'
persisted = SqliteDict(file_path, autocommit=False)
print 'Updating data...'
persisted.update(zg.country_postal_codes)
print 'Committing data...'
persisted.commit()
示例2: main
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def main(data_dir):
print 'Loading data...'
zg = Zipgun(data_dir, force_text=True)
print 'Creating db...'
persisted = SqliteDict(os.path.join(data_dir, DATA_FILE), autocommit=False)
print 'Updating data...'
persisted.update(zg.country_postal_codes)
print 'Committing data...'
persisted.commit()
示例3: test_reopen_conn
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def test_reopen_conn(self):
"""Verify using a contextmanager that a connection can be reopened."""
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
db = SqliteDict(filename=fname)
with db:
db['key'] = 'value'
db.commit()
with db:
db['key'] = 'value'
db.commit()
示例4: test_overwrite_using_flag_n
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def test_overwrite_using_flag_n(self):
"""Re-opening of a database with flag='c' destroys it all."""
# given,
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
orig_db = SqliteDict(filename=fname, tablename='sometable')
orig_db['key'] = 'value'
orig_db.commit()
orig_db.close()
# verify,
next_db = SqliteDict(filename=fname, tablename='sometable', flag='n')
self.assertNotIn('key', next_db.keys())
示例5: test_default_reuse_existing_flag_c
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def test_default_reuse_existing_flag_c(self):
"""Re-opening of a database does not destroy it."""
# given,
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
orig_db = SqliteDict(filename=fname)
orig_db['key'] = 'value'
orig_db.commit()
orig_db.close()
next_db = SqliteDict(filename=fname)
self.assertIn('key', next_db.keys())
self.assertEqual(next_db['key'], 'value')
示例6: test_irregular_tablenames
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def test_irregular_tablenames(self):
"""Irregular table names need to be quoted"""
db = SqliteDict(':memory:', tablename='9nine')
db['key'] = 'value'
db.commit()
self.assertEqual(db['key'], 'value')
db.close()
db = SqliteDict(':memory:', tablename='outer space')
db['key'] = 'value'
db.commit()
self.assertEqual(db['key'], 'value')
db.close()
with self.assertRaisesRegexp(ValueError, r'^Invalid tablename '):
SqliteDict(':memory:', '"')
示例7: _persist_v1
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def _persist_v1(file_path, zg):
print 'Creating meta db...'
zipgun_info = SqliteDict(
file_path, tablename='zipgun_info', autocommit=False)
zipgun_info['version'] = 1
zipgun_info['country_codes'] = zg.country_postal_codes.keys()
zipgun_info.commit()
for country_code in zg.country_postal_codes:
print 'Creating {} db...'.format(country_code)
country_data = SqliteDict(
file_path, tablename='zg_{}'.format(country_code),
autocommit=False)
country_data.update(zg.country_postal_codes[country_code])
country_data.commit()
time.sleep(1.0) # Pretty bullshit
country_data.close()
zipgun_info.close()
示例8: ModelCacheStoreSqlite
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
class ModelCacheStoreSqlite(ModelCacheStore):
""" BTree查找实现 """
def __init__(self, name):
from sqlitedict import SqliteDict
self.datadict = SqliteDict(name)
def sync(self):
return self.datadict.commit() # instead of #sync
示例9: test_overwrite_using_flag_w
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def test_overwrite_using_flag_w(self):
"""Re-opening of a database with flag='w' destroys only the target table."""
# given,
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
orig_db_1 = SqliteDict(filename=fname, tablename='one')
orig_db_1['key'] = 'value'
orig_db_1.commit()
orig_db_1.close()
orig_db_2 = SqliteDict(filename=fname, tablename='two')
orig_db_2['key'] = 'value'
orig_db_2.commit()
orig_db_2.close()
# verify, when re-opening table space 'one' with flag='2', we destroy
# its contents. However, when re-opening table space 'two' with
# default flag='r', its contents remain.
next_db_1 = SqliteDict(filename=fname, tablename='one', flag='w')
self.assertNotIn('key', next_db_1.keys())
next_db_2 = SqliteDict(filename=fname, tablename='two')
self.assertIn('key', next_db_2.keys())
示例10: test_readonly
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def test_readonly(self):
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
orig_db = SqliteDict(filename=fname)
orig_db['key'] = 'value'
orig_db['key_two'] = 2
orig_db.commit()
orig_db.close()
readonly_db = SqliteDict(filename=fname, flag = 'r')
self.assertTrue(readonly_db['key'] == 'value')
self.assertTrue(readonly_db['key_two'] == 2)
def attempt_write():
readonly_db['key'] = ['new_value']
def attempt_update():
readonly_db.update(key = 'value2', key_two = 2.1)
def attempt_delete():
del readonly_db['key']
def attempt_clear():
readonly_db.clear()
def attempt_terminate():
readonly_db.terminate()
attempt_funcs = [attempt_write,
attempt_update,
attempt_delete,
attempt_clear,
attempt_terminate]
for func in attempt_funcs:
with self.assertRaises(RuntimeError):
func()
示例11: SimServer
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
class SimServer(object):
"""
Top-level functionality for similarity services. A similarity server takes
care of::
1. creating semantic models
2. indexing documents using these models
3. finding the most similar documents in an index.
An object of this class can be shared across network via Pyro, to answer remote
client requests. It is thread safe. Using a server concurrently from multiple
processes is safe for reading = answering similarity queries. Modifying
(training/indexing) is realized via locking = serialized internally.
"""
def __init__(self, basename, use_locks=False):
"""
All data will be stored under directory `basename`. If there is a server
there already, it will be loaded (resumed).
The server object is stateless in RAM -- its state is defined entirely by its location.
There is therefore no need to store the server object.
"""
if not os.path.isdir(basename):
raise ValueError("%r must be a writable directory" % basename)
self.basename = basename
self.use_locks = use_locks
self.lock_update = threading.RLock() if use_locks else gensim.utils.nocm
try:
self.fresh_index = SimIndex.load(self.location('index_fresh'))
except:
logger.debug("starting a new fresh index")
self.fresh_index = None
try:
self.opt_index = SimIndex.load(self.location('index_opt'))
except:
logger.debug("starting a new optimized index")
self.opt_index = None
try:
self.model = SimModel.load(self.location('model'))
except:
self.model = None
self.payload = SqliteDict(self.location('payload'), autocommit=True, journal_mode=JOURNAL_MODE)
self.flush(save_index=False, save_model=False, clear_buffer=True)
logger.info("loaded %s" % self)
def location(self, name):
return os.path.join(self.basename, name)
@gensim.utils.synchronous('lock_update')
def flush(self, save_index=False, save_model=False, clear_buffer=False):
"""Commit all changes, clear all caches."""
if save_index:
if self.fresh_index is not None:
self.fresh_index.save(self.location('index_fresh'))
if self.opt_index is not None:
self.opt_index.save(self.location('index_opt'))
if save_model:
if self.model is not None:
self.model.save(self.location('model'))
self.payload.commit()
if clear_buffer:
if hasattr(self, 'fresh_docs'):
try:
self.fresh_docs.terminate() # erase all buffered documents + file on disk
except:
pass
self.fresh_docs = SqliteDict(journal_mode=JOURNAL_MODE) # buffer defaults to a random location in temp
self.fresh_docs.sync()
def close(self):
"""Explicitly close open file handles, databases etc."""
try:
self.payload.close()
except:
pass
try:
self.model.close()
except:
pass
try:
self.fresh_index.close()
except:
pass
try:
self.opt_index.close()
except:
pass
try:
self.fresh_docs.terminate()
except:
pass
def __del__(self):
"""When the server went out of scope, make an effort to close its DBs."""
self.close()
@gensim.utils.synchronous('lock_update')
#.........这里部分代码省略.........
示例12: IMAPMailbox
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
#.........这里部分代码省略.........
return a
def _seqMessageSetToSeqDict(self, messageSet):
if not messageSet.last:
messageSet.last = self.getMessageCount()
seqMap = {}
msgs = self.__msg_list_()
for messageNum in messageSet:
if messageNum > 0 and messageNum <= self.getMessageCount():
seqMap[messageNum] = msgs[messageNum - 1]
return seqMap
def fetch(self, messages, uid):
return [[seq, MaildirMessage(seq,
file(filename, 'rb').read(),
self.msg_info[filename.split('/')[-1]]['flags'],
rfc822date())]
for seq, filename in self.__fetch_(messages, uid).iteritems()]
def __fetch_(self, messages, uid):
if uid:
messagesToFetch = {}
if not messages.last:
messages.last = self.mbox_info['uidnext']
fn_uid = dict((fn, self.msg_info[fn]['uid']) for fn in self.msg_info.keys())
for uid in messages:
if uid in fn_uid.values():
for name, _id in fn_uid.iteritems():
if uid == _id:
if os.path.exists(os.path.join(self.path,'new', name)):
messagesToFetch[uid] = os.path.join(self.path,'new', name)
elif os.path.exists(os.path.join(self.path,'cur', name)):
messagesToFetch[uid] = os.path.join(self.path,'cur', name)
else:
messagesToFetch = self._seqMessageSetToSeqDict(messages)
return messagesToFetch
def store(self, messages, flags, mode, uid):
d = {}
for _id, path in self.__fetch_(messages, uid).iteritems():
filename = path.split('/')[-1]
if mode < 0:
old_f = self.msg_info[filename]
old_f['flags'] = list(set(old_f['flags']).difference(set(flags)))
self.msg_info[filename] = old_f
if misc.IMAP_FLAGS['SEEN'] in flags and path.split('/')[-2] != 'new':
new_path = os.path.join(self.path, 'new', filename)
os.rename(path, new_path)
elif mode == 0:
old_f = self.msg_info[filename]
old_f['flags'] = flags
self.msg_info[filename] = old_f
elif mode > 0:
old_f = self.msg_info[filename]
old_f['flags'] = list(set(old_f['flags']).union(set(flags)))
self.msg_info[filename] = old_f
if misc.IMAP_FLAGS['SEEN'] in flags and path.split('/')[-2] != 'cur':
new_path = os.path.join(self.path, 'cur', filename)
os.rename(path, new_path)
d[_id] = self.msg_info[filename]['flags']
#self.msg_info.commit(blocking=False) # XXX
return d
def expunge(self):
uids = []
for path in self.__msg_list_():
fn = path.split('/')[-1]
if fn not in self.msg_info.keys():
continue
uid = self.msg_info[fn]['uid']
if misc.IMAP_FLAGS['DELETED'] in self.msg_info[fn]['flags']:
os.remove(path)
del self.msg_info[fn]
uids.append(uid)
#self.msg_info.commit(blocking=False) # XXX
return uids
def addListener(self, listener):
self.listeners.append(listener)
return True
def removeListener(self, listener):
self.listeners.remove(listener)
return True
def requestStatus(self, names):
return imap4.statusRequestHelper(self, names)
def destroy(self):
pass
def close(self):
print('!!! %s - %d !!!' % (self.path, len(self.listeners)))
if len(self.listeners) == 0:
self._stop_monitor()
if conf.imap_expunge_on_close:
self.expunge()
self.msg_info.commit(blocking=False)
self.mbox_info.commit(blocking = False)
self.msg_info.close()
self.mbox_info.close()
示例13: main
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import commit [as 别名]
def main(result_file, site_file, constant_modification_list=None, variable_modification_list=None,
enzyme_info=None, n_processes=4, output_file=None):
if output_file is None:
# output_file = os.path.splitext(result_file)[0] + '.theoretical_ions'
output_file = os.path.splitext(result_file)[0] + ".db"
else:
output_file += ".db"
modification_table = RestrictedModificationTable.bootstrap(constant_modification_list, variable_modification_list)
if constant_modification_list is None and variable_modification_list is None:
modification_table = ModificationTable.bootstrap()
if isinstance(site_file, basestring):
site_list = [line.strip() for line in open(site_file, "r")]
site_list = list(map(int, site_list))
else:
site_list = site_file
compo_dict = csv.DictReader(open(result_file, "r"), delimiter=",")
colnames = compo_dict.fieldnames
glycan_identity = get_glycan_identities(colnames)
enzyme_info = map(get_enzyme, enzyme_info)
tag = datetime.datetime.strftime(datetime.datetime.now(), "%Y%m%d-%H%M%S")
metadata = {
"glycan_identities": glycan_identity,
"constant_modifications": constant_modification_list,
"variable_modifications": variable_modification_list,
"site_list": site_list,
"ms1_output_file": result_file,
"enzyme": enzyme_info,
"tag": tag,
"enable_partial_hexnac_match": constants.PARTIAL_HEXNAC_LOSS
}
metadata_store = SqliteDict(output_file, tablename="metadata", flag='n')
metadata_store.update(metadata)
metadata_store.commit()
theoretical_search_space_store = SqliteDict(output_file, tablename="theoretical_search_space")
pool = multiprocessing.Pool(n_processes)
task_fn = functools.partial(process_predicted_ms1_ion, modification_table=modification_table,
site_list=site_list, glycan_identity=glycan_identity)
cntr = 0
if n_processes > 1:
logger.debug("Building theoretical sequences concurrently")
for res in (itertools.chain.from_iterable(pool.imap(task_fn, compo_dict, chunksize=500))):
theoretical_search_space_store[cntr] = res
cntr += 1
else:
logger.debug("Building theoretical sequences sequentially")
for row in compo_dict:
res = task_fn(row)
for item in res:
theoretical_search_space_store[cntr] = item
cntr += 1
if (cntr % 10000) == 0:
theoretical_search_space_store.commit()
logger.info("Committing, %d records made", cntr)
theoretical_search_space_store.commit()
theoretical_search_space_store.close()
pool.close()
pool.join()
pool.terminate()
logger.info("Hypothesis building complete")
return output_file