本文整理汇总了Python中sqlitedict.SqliteDict.keys方法的典型用法代码示例。如果您正苦于以下问题:Python SqliteDict.keys方法的具体用法?Python SqliteDict.keys怎么用?Python SqliteDict.keys使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sqlitedict.SqliteDict
的用法示例。
在下文中一共展示了SqliteDict.keys方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: adjust_evernote_font
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
def adjust_evernote_font():
"""
Call for Evernote
"""
note_info = SqliteDict(conf.db.db_file, autocommit=True)
notes_in_evernote = list()
for note in get_notes(get_notebooks()):
guid = note.guid
notes_in_evernote.append(guid)
if guid not in note_info.keys() \
or note_info[guid][FONT_SIZE] != conf.font_size \
or note_info[guid][LINE_HEIGHT] != conf.line_height:
adjust_note(note)
note_info[guid] = {FONT_SIZE: conf.font_size,
LINE_HEIGHT: conf.line_height}
guids_to_forget = [guid for guid in note_info.keys()
if guid not in notes_in_evernote]
for guid in guids_to_forget:
logging.debug("Delete guid from DB: {}".format(guid))
del note_info[guid]
note_info.close()
示例2: test_overwrite_using_flag_n
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
def test_overwrite_using_flag_n(self):
"""Re-opening of a database with flag='c' destroys it all."""
# given,
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
orig_db = SqliteDict(filename=fname, tablename='sometable')
orig_db['key'] = 'value'
orig_db.commit()
orig_db.close()
# verify,
next_db = SqliteDict(filename=fname, tablename='sometable', flag='n')
self.assertNotIn('key', next_db.keys())
示例3: test_default_reuse_existing_flag_c
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
def test_default_reuse_existing_flag_c(self):
"""Re-opening of a database does not destroy it."""
# given,
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
orig_db = SqliteDict(filename=fname)
orig_db['key'] = 'value'
orig_db.commit()
orig_db.close()
next_db = SqliteDict(filename=fname)
self.assertIn('key', next_db.keys())
self.assertEqual(next_db['key'], 'value')
示例4: test_overwrite_using_flag_w
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
def test_overwrite_using_flag_w(self):
"""Re-opening of a database with flag='w' destroys only the target table."""
# given,
fname = norm_file('tests/db/sqlitedict-override-test.sqlite')
orig_db_1 = SqliteDict(filename=fname, tablename='one')
orig_db_1['key'] = 'value'
orig_db_1.commit()
orig_db_1.close()
orig_db_2 = SqliteDict(filename=fname, tablename='two')
orig_db_2['key'] = 'value'
orig_db_2.commit()
orig_db_2.close()
# verify, when re-opening table space 'one' with flag='2', we destroy
# its contents. However, when re-opening table space 'two' with
# default flag='r', its contents remain.
next_db_1 = SqliteDict(filename=fname, tablename='one', flag='w')
self.assertNotIn('key', next_db_1.keys())
next_db_2 = SqliteDict(filename=fname, tablename='two')
self.assertIn('key', next_db_2.keys())
示例5: SimServer
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
#.........这里部分代码省略.........
self.model = SimModel(self.fresh_docs, method=method, params=params)
self.flush(save_model=True, clear_buffer=clear_buffer)
@gensim.utils.synchronous('lock_update')
def index(self, corpus=None, clear_buffer=True):
"""
Permanently index all documents previously added via `buffer`, or
directly index documents from `corpus`, if specified.
The indexing model must already exist (see `train`) before this function
is called.
"""
if not self.model:
msg = 'must initialize model for %s before indexing documents' % self.basename
logger.error(msg)
raise AttributeError(msg)
if corpus is not None:
# use the supplied corpus only (erase existing buffer, if any)
self.flush(clear_buffer=True)
self.buffer(corpus)
if not self.fresh_docs:
msg = "index called but no indexing corpus specified for %s" % self
logger.error(msg)
raise ValueError(msg)
if not self.fresh_index:
logger.info("starting a new fresh index for %s" % self)
self.fresh_index = SimIndex(self.location('index_fresh'), self.model.num_features)
self.fresh_index.index_documents(self.fresh_docs, self.model)
if self.opt_index is not None:
self.opt_index.delete(self.fresh_docs.keys())
logger.info("storing document payloads")
for docid in self.fresh_docs:
payload = self.fresh_docs[docid].get('payload', None)
if payload is None:
# HACK: exit on first doc without a payload (=assume all docs have payload, or none does)
break
self.payload[docid] = payload
self.flush(save_index=True, clear_buffer=clear_buffer)
@gensim.utils.synchronous('lock_update')
def optimize(self):
"""
Precompute top similarities for all indexed documents. This speeds up
`find_similar` queries by id (but not queries by fulltext).
Internally, documents are moved from a fresh index (=no precomputed similarities)
to an optimized index (precomputed similarities). Similarity queries always
query both indexes, so this split is transparent to clients.
If you add documents later via `index`, they go to the fresh index again.
To precompute top similarities for these new documents too, simply call
`optimize` again.
"""
if self.fresh_index is None:
logger.warning("optimize called but there are no new documents")
return # nothing to do!
if self.opt_index is None:
logger.info("starting a new optimized index for %s" % self)
self.opt_index = SimIndex(self.location('index_opt'), self.model.num_features)
示例6: IMAPMailbox
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
class IMAPMailbox(ExtendedMaildir):
implements(imap4.IMailbox, imap4.ICloseableMailbox)
AppendFactory = SerpentAppendMessageTask
def __init__(self, path):
maildir.initializeMaildir(path)
self.listeners = []
self.path = path
self.open_flags()
self.lastadded = None
self.__check_flags_()
def open_flags(self):
self.msg_info = SqliteDict(os.path.join(self.path, conf.imap_msg_info))
self.mbox_info = SqliteDict(os.path.join(self.path, conf.imap_mbox_info))
def _start_monitor(self):
self.notifier = inotify.INotify()
self.notifier.startReading()
self.notifier.watch(filepath.FilePath(os.path.join(self.path, 'new')),
callbacks=[self._new_files])
self.notifier.watch(filepath.FilePath(os.path.join(self.path,'cur')),
callbacks=[self._new_files])
def _stop_monitor(self):
self.notifier.stopReading()
self.notifier.loseConnection()
def _new_files(self, wo, path, code):
if code == inotify.IN_MOVED_TO or code == inotify.IN_DELETE:
for l in self.listeners:
l.newMessages(self.getMessageCount(), self.getRecentCount())
def __check_flags_(self):
if 'subscribed' not in self.mbox_info.keys(): self.mbox_info['subscribed'] = False
if 'flags' not in self.mbox_info.keys(): self.mbox_info['flags'] = []
if 'special' not in self.mbox_info.keys(): self.mbox_info['special'] = ''
if 'uidvalidity' not in self.mbox_info.keys(): self.mbox_info['uidvalidity'] = random.randint(0, 2**32)
if 'uidnext' not in self.mbox_info.keys(): self.mbox_info['uidnext'] = 1
#self.mbox_info.commit(blocking=False) # XXX
l = [l for l in self.__msg_list_()]
for i in l:
fn = i.split('/')[-1]
if fn not in self.msg_info.keys():
val1 = {'uid': self.getUIDNext()}
if i.split('/')[-2] == 'new':
val1['flags'] = []
else:
val1['flags'] = [misc.IMAP_FLAGS['SEEN']]
self.msg_info[fn] = val1
#self.msg_info.commit(blocking=False) # XXX
def subscribe(self):
self.mbox_info['subscribed'] = True
#self.mbox_info.commit(blocking=False) # XXX
def unsubscribe(self):
self.mbox_info['subscribed'] = False
#self.mbox_info.commit(blocking=False) # XXX
def is_subscribed(self):
return self.mbox_info['subscribed']
def __count_flagged_msgs_(self, flag):
val1 = [0 for fn in self.msg_info.keys() if flag in self.msg_info[fn]['flags']]
return len(val1)
def getHierarchicalDelimiter(self):
return misc.IMAP_HDELIM
def setSpecial(self, special):
self.mbox_info['special'] = special
#self.mbox_info.commit(blocking=False) # XXX
def getFlags(self):
return sorted(misc.IMAP_FLAGS.values())
def getMboxFlags(self):
f = list(self.mbox_info['flags'])
if self.mbox_info['special'] != '': f.append(self.mbox_info['special'])
return f
def addFlag(self, flag):
self.mbox_info['flags'] = list(set(self.mbox_info['flags']).union([flag]))
#self.mbox_info.commit(blocking=False) # XXX
def removeFlag(self, flag):
self.mbox_info['flags'] = list(set(self.mbox_info['flags']).difference([flag]))
#self.mbox_info.commit(blocking=False) # XXX
def hasChildren(self):
flags = self.getFlags()
if misc.MBOX_FLAGS['HASCHILDREN'] not in flags:
self.addFlag(misc.MBOX_FLAGS['HASCHILDREN'])
if misc.MBOX_FLAGS['HASNOCHILDREN'] in flags:
self.removeFlag(misc.MBOX_FLAGS['HASNOCHILDREN'])
def hasNoChildren(self):
flags = self.getFlags()
if misc.MBOX_FLAGS['HASNOCHILDREN'] not in flags:
#.........这里部分代码省略.........
示例7: print
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
print
print('bat out v: %f ' % top['battery.output_voltage'])
print('invert in v: %f' % top['inverter.input_voltage'])
print('mot in volt: %f' % top['motor.phase_voltage'])
print('invert out volt: %f' % top['inverter.output_voltage'])
print
print('invert in pow %f' % top['inverter.input_power'])
in_pow = top['inverter.input_voltage'] * top['inverter.input_current']
print('calc inverter in pow %f' % in_pow)
output_power = top['inverter.output_voltage'] * top[
'inverter.output_current'] * 3.0 * np.sqrt(2.0 / 3.0)
print('calc output pow %f' % output_power)
print('bat des pow: %f' % top['battery.des_power'])
print
print('Inv in cur %f' % top['inverter.input_current'])
print('mot des pow %f' % top['design_power'])
print('mot input cur %f' % top['motor.phase_current'])
print('mot input volt %f' % top['motor.phase_voltage'])
# print('ncells %f' % top['Battery'])
db = SqliteDict('drivetraindb', 'openmdao')
pprint(db.keys())
data = db['rank0:Driver/1']
pprint(data['Parameters'])
print
print
pprint(data['Unknowns'])
top.cleanup()
remove('drivetraindb')
示例8: OptimizationHistory
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
def OptimizationHistory(self):
"""
Reads in database history file and stores contents.
Function information is stored as a dict in func_data,
variable information is stored as a dict in var_data,
and bounds information is stored as a dict in bounds.
"""
# Initialize dictionaries for design variables and unknowns.
# The data is saved redundantly in dicts for all iterations and then
# for major iterations as well.
self.func_data_all = {}
self.func_data_major = {}
self.var_data_all = {}
self.var_data_major = {}
db = {}
self.num_iter = 0
# Loop over each history file name provided by the user.
for histIndex, histFileName in enumerate(self.histList):
# If they only have one history file, we don't change the keys' names
if len(self.histList) == 1:
histIndex = ''
else: # If multiple history files, append letters to the keys,
# such that 'key' becomes 'key_A', 'key_B', etc
histIndex = '_' + chr(histIndex + ord('A'))
self.histIndex = histIndex
try: # This is the classic method of storing history files
db = shelve.open(histFileName, 'r')
OpenMDAO = False
except: # Bare except because error is not in standard Python.
# If the db has the 'iterations' tag, it's an OpenMDAO db.
db = SqliteDict(histFileName, 'iterations')
OpenMDAO = True
# If it has no 'iterations' tag, it's a pyOptSparse db.
if db.keys() == []:
OpenMDAO = False
db = SqliteDict(histFileName)
# Specific instructions for OpenMDAO databases
if OpenMDAO:
# Get the number of iterations by looking at the largest number
# in the split string names for each entry in the db
if major_python_version == 3:
for string in db.keys():
string
string = string.split('|')
else:
string = db.keys()[-1].split('|')
nkey = int(string[-1])
self.solver_name = string[0]
# Initalize a list detailing if the iterations are major or minor
self.iter_type = np.zeros(nkey)
# Get the keys of the database where derivatives were evaluated.
# These correspond to major iterations, while no derivative
# info is calculated for gradient-free linesearches.
deriv_keys = SqliteDict(histFileName, 'derivs').keys()
self.deriv_keys = [int(key.split('|')[-1]) for key in deriv_keys]
# Save information from the history file for the unknowns.
self.SaveDBData(db, self.func_data_all, self.func_data_major, OpenMDAO=OpenMDAO, data_str='Unknowns')
# Save information from the history file for the design variables.
self.SaveDBData(db, self.var_data_all, self.var_data_major, OpenMDAO=OpenMDAO, data_str='Parameters')
# Add labels to OpenMDAO variables.
# Corresponds to constraints, design variables, and objective.
try:
db = SqliteDict(histFileName, 'metadata')
self.SaveOpenMDAOData(db)
except KeyError: # Skip metadata info if not included in OpenMDAO hist file
pass
else:
# Get the number of iterations
nkey = int(db['last']) + 1
self.nkey = nkey
# Initalize a list detailing if the iterations are major or minor
self.iter_type = np.zeros(nkey)
# Check to see if there is bounds information in the db file.
# If so, add them to self.bounds to plot later.
try:
bounds_dict = dict(db['varBounds'].items() + db['conBounds'].items())
for key in bounds_dict.keys():
bounds_dict[key + histIndex] = bounds_dict.pop(key)
self.bounds.update(bounds_dict)
except KeyError:
pass
#.........这里部分代码省略.........
示例9: object_list
# 需要导入模块: from sqlitedict import SqliteDict [as 别名]
# 或者: from sqlitedict.SqliteDict import keys [as 别名]
def object_list(self):
''' returns a list of objects
'''
objects_metadata = SqliteDict(self._meta.filename, 'objects')
return objects_metadata.keys()