本文整理汇总了Python中cache.Cache.exists方法的典型用法代码示例。如果您正苦于以下问题:Python Cache.exists方法的具体用法?Python Cache.exists怎么用?Python Cache.exists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cache.Cache
的用法示例。
在下文中一共展示了Cache.exists方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import exists [as 别名]
def __init__(self, q, cached=True):
url = "http://api.urbandictionary.com/soap"
key = "91cf66fb7f14bbf7fb59c7cf5e22155f"
# Live connect for uncached queries
# or queries we do not have in cache.
cache = Cache("urbandictionary", ".pickle")
if not cached or not cache.exists(q):
server = soap.SOAPProxy(url)
definitions = server.lookup(key, q)
data = []
for item in definitions:
ubd = UrbanDictionaryDefinition(
item.word, item.url, item.definition, item.example, item.author
)
self.append(ubd)
data.append( [item.word, item.word, item.definition, item.example, item.author] )
# Cache a pickled version of the response.
if cached:
data = pickle.dumps(data)
cache.write(q, data)
# For cached queries,
# unpack the pickled version in the cache.
else:
definitions = cache.read(q)
definitions = pickle.loads(definitions)
for item in definitions:
ubd = UrbanDictionaryDefinition(
item[0], item[1], item[2], item[3], item[4]
)
self.append(ubd)
示例2: __init__
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import exists [as 别名]
def __init__(self, q, cached=True):
url = "http://api.urbandictionary.com/soap?wsdl"
key = "91cf66fb7f14bbf7fb59c7cf5e22155f"
# Live connect for uncached queries
# or queries we do not have in cache.
cache = Cache("urbandictionary", ".pickle")
if not cached or not cache.exists(q):
server = soap.SOAPProxy(url)
try:
definitions = server.lookup(key, q)
except Exception, soap.faultType:
raise UrbanDictionaryError, "the API is no longer supported"
data = []
for item in definitions:
ubd = UrbanDictionaryDefinition(item.word, item.url, item.definition, item.example, item.author)
self.append(ubd)
data.append([item.word, item.word, item.definition, item.example, item.author])
# Cache a pickled version of the response.
if cached:
data = pickle.dumps(data)
cache.write(q, data)
示例3: ChainDb
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import exists [as 别名]
#.........这里部分代码省略.........
'scriptPubKey': binascii.hexlify(txout.scriptPubKey)}
return txouts
def gettxidx(self, txhash):
ser_txhash = ser_uint256(txhash)
try:
ser_value = self.db.Get('tx:'+ser_txhash)
except KeyError:
return None
pos = string.find(ser_value, ' ')
txidx = TxIdx()
txidx.blkhash = long(ser_value[:pos], 16)
txidx.spentmask = long(ser_value[pos+1:], 16)
return txidx
def gettx(self, txhash):
txidx = self.gettxidx(txhash)
if txidx is None:
return None
block = self.getblock(txidx.blkhash)
for tx in block.vtx:
tx.calc_sha256()
if tx.sha256 == txhash:
return tx
self.log.write("ERROR: Missing TX %064x in block %064x" % (txhash, txidx.blkhash))
return None
def haveblock(self, blkhash, checkorphans):
if self.blk_cache.exists(blkhash):
return True
if checkorphans and blkhash in self.orphans:
return True
ser_hash = ser_uint256(blkhash)
try:
self.db.Get('blocks:'+ser_hash)
return True
except KeyError:
return False
def have_prevblock(self, block):
if self.getheight() < 0 and block.sha256 == self.netmagic.block0:
return True
if self.haveblock(block.hashPrevBlock, False):
return True
return False
def getblock(self, blkhash):
block = self.blk_cache.get(blkhash)
if block is not None:
return block
ser_hash = ser_uint256(blkhash)
try:
# Lookup the block index, seek in the file
fpos = long(self.db.Get('blocks:'+ser_hash))
self.blk_read.seek(fpos)
# read and decode "block" msg
msg = message_read(self.netmagic, self.blk_read)
if msg is None:
return None
示例4: CachedRecovery
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import exists [as 别名]
class CachedRecovery(object):
"""
Create a batch of transactions from master branch keys. Will cache each step and pick up where left off.
"""
def __init__(self, origin_branch, destination_branch, provider, account_gap=None, leaf_gap=None, first_account=0): # todo - increase gaps
self.origin_branch = origin_branch
self.destination_branch = destination_branch
self.cache = Cache(self.origin_branch.id)
self.known_accounts = {}
self.tx_db = TxDb(lookup_methods=[provider.get_tx], read_only_paths=[], writable_cache_path='./cache/tx_db')
self.account_gap = int(account_gap) if account_gap is not None else None
self.leaf_gap = int(leaf_gap) if leaf_gap is not None else None
self.first_account = first_account
self.account_lookahead = True
self.total_to_recover = 0
def add_known_account(self, account_index, external_leafs=None, internal_leafs=None):
"""
Adding known account indexes speeds up recovery.
If leafs are specified, these will be the only ones recovered(LEAF_GAP_LIMIT not used).
If leafs not specified, addresses will be recovered using LEAF_GAP_LIMIT.
batch.add_known_account(0)
batch.add_known_account(1, external_leafs=[0,1,2,3,4], internal_leafs=[0,1,2])
batch.add_known_account(1, external_leafs={0: "receiving addr 0", 1: "receiving addr 1", ...}, internal_leafs={0: "change addr 0", ...})
"""
leafs = {0: None, 1: None}
if external_leafs is not None:
leafs[0] = {int(v): None for v in external_leafs} if type(external_leafs) == list else {int(k): v for k, v in external_leafs.items()}
if internal_leafs is not None:
leafs[1] = {int(v): None for v in internal_leafs} if type(internal_leafs) == list else {int(k): v for k, v in external_leafs.items()}
self.known_accounts[int(account_index)] = leafs # each branch stored as {leaf_i: None or receiving address, ...} or None
self.account_lookahead = False
def recover_origin_accounts(self):
"""will pick up where left off due to caching (caching part not implemented)"""
if not self.account_lookahead: # accounts already known
for account_index, leafs in self.known_accounts.items():
existed = self.recover_origin_account(account_index, internal_leafs=leafs[0], external_leafs=leafs[1])
if not existed:
self.known_accounts[account_index] = False
else: # searching for accounts
accounts_ahead_to_check = self.account_gap
while accounts_ahead_to_check:
account_index = max(self.known_accounts.keys()) + 1 if self.known_accounts else 0
existed = self.recover_origin_account(account_index)
if existed:
accounts_ahead_to_check = self.account_gap
self.known_accounts[account_index] = True
else:
accounts_ahead_to_check -= 1
self.known_accounts[account_index] = False
def recover_origin_account(self, account_index, internal_leafs=None, external_leafs=None):
"""
:returns bool account_found
If Oracle is one of account sources, will return True on success from Oracle.
Else, will return True if there is any balance on the acct.
"""
# todo - balance caching so we don't pull it repeatedly for each acct
# todo - not exising accounts should get cached too so we don't go through it again
account = self.cache.load(Cache.ORIGINAL_ACCOUNT, account_index)
if account is None:
try:
account = self.origin_branch.account(account_index)
if internal_leafs is None or external_leafs is None:
account.set_lookahead(self.leaf_gap)
previous_balance = 0
while True:
address_map = account.make_address_map(do_lookahead=True)
balance = account.balance()
if balance == previous_balance:
for address, path in address_map.items():
print 'original %d/%s %s' % (account_index, path, address)
break
else:
account._cache['issued']['0'] += self.leaf_gap # todo - can be optimized
account._cache['issued']['0'] += self.leaf_gap
previous_balance = balance
else:
account.set_lookahead(0)
for for_change, leaf_n_array in [(0, internal_leafs), (1, external_leafs)]:
for leaf_n in leaf_n_array:
address = account.address(leaf_n, change=int(for_change)) # this will get cached in the account object
print "original %d/%d/%d %s" % (account_index, for_change, leaf_n, address)
self.cache.save(Cache.ORIGINAL_ACCOUNT, account_index, account)
except OracleUnknownKeychainException:
print "! account %d: unkown" % account_index
self.cache.save(Cache.ORIGINAL_ACCOUNT, account_index, False)
return False # todo - needs more sophisticated checks, eg bad connection, CC timeouts, etc
return True if self.origin_branch.needs_oracle else bool(account.balance())
def recover_destination_accounts(self):
"""will pick up where left off due to caching"""
for account_index in self.known_accounts:
if self.known_accounts[account_index] and not self.cache.exists(Cache.DESTINATION_ACCOUNT, account_index):
account = self.destination_branch.account(account_index)
address = account.address(0, change=False) # this will get cached in the account object
print "destination %d/%d/%d %s" % (account_index, 0, 0, address)
self.cache.save(Cache.DESTINATION_ACCOUNT, account_index, account)
#.........这里部分代码省略.........
示例5: TelemetaPreprocessImport
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import exists [as 别名]
class TelemetaPreprocessImport(object):
def __init__(self, media_dir, dest_dir, log_file):
self.root_dir = media_dir
self.dest_dir = dest_dir
self.threads = 1
self.logger = Logger(log_file)
self.counter = 0
self.force = 0
self.cache = Cache(self.dest_dir)
self.scheme = GrapherScheme()
self.width = self.scheme.width
self.height = self.scheme.height
self.bg_color = self.scheme.bg_color
self.color_scheme = self.scheme.color_scheme
self.force = self.scheme.force
self.threads = self.scheme.threads
self.logger = Logger(log_file)
self.counter = 0
self.analyzers = timeside.core.processors(timeside.api.IAnalyzer)
self.grapher = timeside.grapher.WaveformAwdio(width=self.width,
height=self.height,
bg_color=self.bg_color,
color_scheme=self.color_scheme)
self.media_list = self.get_media_list()
if not os.path.exists(self.dest_dir):
os.makedirs(self.dest_dir)
def get_media_list(self):
media_list = []
for root, dirs, files in os.walk(self.root_dir):
if root:
for file in files:
if file[0] != '.':
ext = file.split('.')[-1]
media_list.append(root+os.sep+file)
return media_list
def process(self):
for media in self.media_list:
filename = media.split(os.sep)[-1]
name, ext = os.path.splitext(filename)
size = str(self.width) + '_' + str(self.height)
image = self.dest_dir + os.sep + name + '.' + self.scheme.id + '.' + size + '.png'
xml = name + '.xml'
if not self.cache.exists(image) or not self.cache.exists(xml):
mess = 'Processing ' + media
self.logger.write_info(mess)
decoder = timeside.decoder.FileDecoder(media)
pipe = decoder | self.grapher
analyzers = []
analyzers_sub = []
for analyzer in self.analyzers:
subpipe = analyzer()
analyzers_sub.append(subpipe)
pipe = pipe | subpipe
pipe.run()
mess = 'Rendering ' + image
self.logger.write_info(mess)
self.grapher.render(output=image)
mess = 'Frames / Pixel = ' + str(self.grapher.graph.samples_per_pixel)
self.logger.write_info(mess)
for analyzer in analyzers_sub:
value = analyzer.result()
if analyzer.id() == 'duration':
value = datetime.timedelta(0,value)
analyzers.append({'name':analyzer.name(),
'id':analyzer.id(),
'unit':analyzer.unit(),
'value':str(value)})
self.cache.write_analyzer_xml(analyzers, xml)
filename = name
data = name.split('.')
date = data[0]
collection_name = data[1]
other = ''
if len(data) > 2:
other = '.'.join(data[2:])
item = telemeta.models.media.MediaItem.objects.filter(code=filename)
collections = telemeta.models.media.MediaCollection.objects.filter(code=collection_name)
if not collections:
c = telemeta.models.media.MediaCollection(code=collection_name)
c.title = collection_name
c.save()
msg = 'added'
self.logger.write_info(collection_name, msg)
collection = c
else:
collection = collections[0]
#.........这里部分代码省略.........
示例6: ChainDb
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import exists [as 别名]
#.........这里部分代码省略.........
self.logger.debug("Vault confirmed %064x" % txin.prevout.hash)
#TODO(obulpathi): optimize
for txhash in txhashes:
tmp_tx = self.gettx(txhash)
if tmp_tx.vin[0].prevout.hash == txin.prevout.hash:
tmp_tx.calc_sha256()
txhashes.remove(tmp_tx.sha256)
return txhashes
def getpendingtransactions(self):
pending_txs = {}
txhashs = self.listpendingtxhashes()
for i, txhash in enumerate(txhashs):
pending_tx = {}
tx = self.gettx(txhash)
inputs = []
for txin in tx.vin:
inputs.append(utils.scriptSig_to_vault_address(txin.scriptSig))
pending_tx['inputs'] = inputs
outputs = []
for n, txout in enumerate(tx.vout):
output = {
'toaddress': utils.pending_tx_output_script_to_address(
txout.scriptPubKey),
'amount': txout.nValue}
outputs.append(output)
pending_tx['outputs'] = outputs
pending_txs[str(n)] = pending_tx
return pending_txs
def haveblock(self, blkhash, checkorphans):
if self.blk_cache.exists(blkhash):
return True
if checkorphans and blkhash in self.orphans:
return True
ser_hash = ser_uint256(blkhash)
try:
self.db.Get('blocks:'+ser_hash)
return True
except KeyError:
return False
def have_prevblock(self, block):
if self.getheight() < 0 and block.sha256 == self.netmagic.block0:
return True
if self.haveblock(block.hashPrevBlock, False):
return True
return False
def getblock(self, blkhash):
block = self.blk_cache.get(blkhash)
if block is not None:
return block
ser_hash = ser_uint256(blkhash)
try:
# Lookup the block index, seek in the file
fpos = long(self.db.Get('blocks:'+ser_hash))
self.blk_read.seek(fpos)
# read and decode "block" msg
msg = message_read(self.netmagic, self.blk_read)
if msg is None:
return None