本文整理汇总了Python中cache.Cache类的典型用法代码示例。如果您正苦于以下问题:Python Cache类的具体用法?Python Cache怎么用?Python Cache使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Cache类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, q, cached=True):
url = "http://api.urbandictionary.com/soap"
key = "91cf66fb7f14bbf7fb59c7cf5e22155f"
# Live connect for uncached queries
# or queries we do not have in cache.
cache = Cache("urbandictionary", ".pickle")
if not cached or not cache.exists(q):
server = soap.SOAPProxy(url)
definitions = server.lookup(key, q)
data = []
for item in definitions:
ubd = UrbanDictionaryDefinition(
item.word, item.url, item.definition, item.example, item.author
)
self.append(ubd)
data.append( [item.word, item.word, item.definition, item.example, item.author] )
# Cache a pickled version of the response.
if cached:
data = pickle.dumps(data)
cache.write(q, data)
# For cached queries,
# unpack the pickled version in the cache.
else:
definitions = cache.read(q)
definitions = pickle.loads(definitions)
for item in definitions:
ubd = UrbanDictionaryDefinition(
item[0], item[1], item[2], item[3], item[4]
)
self.append(ubd)
示例2: BaseHandler
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, *arg, **arg_key_word):
super(BaseHandler, self).__init__(*arg, **arg_key_word)
self.cache = Cache()
self.m = hashlib.md5()
self.dbm = DataBaseManager()
@tornado.gen.coroutine
def response_as_json(self, res):
self.set_header("Content-Type", 'application/json; charset="utf-8"')
self.write(json.dumps(res))
self.finish()
def md5_code(self, string):
self.m.update(string)
return self.m.hexdigest()
def set_cache(self, key, value, time_out):
self.cache.set_cache(
key=key,
value=value,
time_out=time_out
)
def get_cache(self, key):
return self.cache.get_cache(key)
def clear_cache(self, key):
self.cache.clear_cache(key)
示例3: MetadataReader
class MetadataReader(object):
"""Get metadata from images"""
def __init__(self, globstring, cache=None):
super(MetadataReader, self).__init__()
self.globstring = globstring
if cache:
from cache import Cache
self.cache = Cache(cache)
else:
self.cache = None
def read(self):
files = glob.glob(self.globstring)
if self.cache:
self.cache.update(files, metadata_read)
metadatas = self.cache.get_metadatas()
else:
metadatas = []
for fname in files:
timestamp, meta, mtime = metadata_read(fname)
d = dict(file=os.path.basename(fname),
meta=meta,
timestamp=unicode(timestamp))
metadatas.append(d)
return metadatas
示例4: __init__
def __init__(self, db=None, artist=None, album=None, min_cache=100,
max_cache=1000, commit_after=100):
"""Create a new track factory.
>>> a = Track(min_cache=1, max_cache=1)
"""
self.__cache_key = Cache(min_cache, max_cache)
self.__cache_id = Cache(min_cache, max_cache)
if db is None:
db = sqlite3.connect(':memory:')
self.__db = db
self.__db.row_factory = dict_factory
self.__db.isolation_level = 'Immediate'
self.__cursor = self.__db.cursor()
if artist is None:
artist = Artist(db=db, min_cache=min_cache, max_cache=max_cache,
commit_after=commit_after)
self.__artist = artist
if album is None:
album = Album(db=db, artist=artist, min_cache=min_cache,
max_cache=max_cache, commit_after=commit_after)
self.__album = album
self.__tbl_name = 'track'
self.__pending_changes = 0
self.__commit_after = commit_after
self.__init_db__()
示例5: get
def get(url, dest):
"""Get file from <url> and save it to <dest>.
Tries to retrieve <url> from cache, otherwise stores it in
cache following retrieval.
"""
url = urllib.unquote(url)
if url.endswith("/"):
raise Error("illegal url - can't get a directory")
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(url))
else:
if dest.endswith("/"):
raise Error("no such directory: " + dest)
if os.path.lexists(dest):
raise Error("won't overwrite already existing file: " + dest)
cache = Cache()
cached_path = cache.retrieve(url, dest)
if cached_path:
print "* get: retrieved file from cache"
else:
print "* get: retrieving file from network..."
system("curl -L -f %s -o %s" % (mkarg(url), mkarg(dest)))
cached_path = cache.store(url, dest)
return cached_path
示例6: __init__
def __init__(self, inidir, inifile, amount):
"""Commence download operation.
Arguments
inidir -- working directory
inifile -- config file
amount -- amount of items to download
"""
print('Download data for display 3...')
self._data = []
#Create dummy GUI
root = tki.Tk()
settings = Settings3(inidir, inifile)
dsdblog = InifileDataSourceDescription(sBlog, inidir, inifile)
itemarg = (dsdblog.cachedir, (settings.previewx, settings.previewy),
(settings.smallpreviewx, settings.smallpreviewy), settings.library,
settings.booksearchprefix, settings.booksearchsuffix)
cache = Cache(dsdblog.cachedir, BlogspotItemWithIsbn, itemarg)
harvester = BlogspotHarvester(dsdblog, self._addandcheck, BlogspotItemWithIsbn)
harvester.itemarg = itemarg
harvester.newestId = ''
harvester.update(amount)
cache.updateContents(self._data, harvester.newestId)
print('Done!')
示例7: main
def main(send=False):
key = get_value('key')
html = None
# get movie info for all categories
for cat in CATEGORIES:
td = Tmdb(key, cat)
movies = td.get_movies(NUM_RES)
ca = Cache(os.path.basename(cat))
ca.shelve_results(movies)
newMovies = ca.shelve_results(movies)
movieObjects = ca.shelve_get_items(newMovies) # only new ones
op = Output(movieObjects)
if html is None:
html = [op.generate_header()]
catPrettified = cat.title().replace("_", " ")
html.append(op.generate_category_title(catPrettified))
html.append(op.generate_movie_html_div())
# save html
f = open(OUTFILE, "w")
f.write("\n".join(html))
f.close()
# email
if send:
subject = "Sharemovi.es / %s movies / week %s" % (", ".join(CATEGORIES), str(THIS_WEEK))
sender = get_value('sender')
recipients = load_emails('recipients')
ma = Mail(sender)
ma.mail_html(recipients, subject, "\n".join(html))
示例8: cache_remove
def cache_remove(cmode):
if cmode == None:
Cache.cache_remove()
else:
Cache.cache_remove_by_cmode(cmode)
return True
示例9: main
def main():
(opts, args) = cli()
key = get_value('key')
td = Tmdb(key, opts.category)
if opts.listing:
li = Listing(opts.category)
movies = li.get_movies()
prefix = "list_"
subject = "Week %s: %s" % (THIS_WEEK, li.title)
else:
movies = td.get_movies(opts.numres)
prefix = ""
subject = "%s movies - week %s" % (opts.category.title().replace("_", " "), THIS_WEEK)
ca = Cache(prefix + os.path.basename(opts.category))
newMovies = ca.shelve_results(movies)
if opts.listing:
movieObjects = ca.shelve_get_items(movies) # allow dups
else:
movieObjects = ca.shelve_get_items(newMovies) # only new ones
op = Output(movieObjects)
html = [op.generate_header()]
html.append(op.generate_movie_html_div())
if opts.printres:
print "\n".join(html)
if opts.mailres:
sender = get_value('sender')
recipients = load_emails('recipients')
ma = Mail(sender)
ma.mail_html(recipients, subject, "\n".join(html))
示例10: testCacheWithPrefix
def testCacheWithPrefix(self):
s = Storage({'application': 'admin',
'folder': 'applications/admin'})
cache = Cache(s)
prefix = cache.with_prefix(cache.ram,'prefix')
self.assertEqual(prefix('a', lambda: 1, 0), 1)
self.assertEqual(prefix('a', lambda: 2, 100), 1)
self.assertEqual(cache.ram('prefixa', lambda: 2, 100), 1)
示例11: UTKhashmir
class UTKhashmir(khashmir.KhashmirBase):
_Node = UTNode
def setup(self, host, port, data_dir, rlcount, checkpoint=True):
khashmir.KhashmirBase.setup(self, host, port,data_dir, rlcount, checkpoint)
self.cur_token = self.last_token = sha('')
self.tcache = Cache()
self.gen_token(loop=True)
self.expire_cached_tokens(loop=True)
def expire_cached_tokens(self, loop=False):
self.tcache.expire(time() - TOKEN_UPDATE_INTERVAL)
if loop:
self.rawserver.external_add_task(self.expire_cached_tokens, TOKEN_UPDATE_INTERVAL, (True,))
def gen_token(self, loop=False):
self.last_token = self.cur_token
self.cur_token = sha(newID())
if loop:
self.rawserver.external_add_task(self.gen_token, TOKEN_UPDATE_INTERVAL, (True,))
def get_token(self, host, port):
x = self.cur_token.copy()
x.update("%s%s" % (host, port))
h = x.digest()
return h
def val_token(self, token, host, port):
x = self.cur_token.copy()
x.update("%s%s" % (host, port))
a = x.digest()
if token == a:
return True
x = self.last_token.copy()
x.update("%s%s" % (host, port))
b = x.digest()
if token == b:
return True
return False
def addContact(self, host, port, callback=None):
# use dns on host, then call khashmir.addContact
Thread(target=self._get_host, args=[host, port, callback]).start()
def _get_host(self, host, port, callback):
# this exception catch can go away once we actually fix the bug
try:
ip = gethostbyname(host)
except TypeError, e:
raise TypeError(str(e) + (": host(%s) port(%s)" % (repr(host), repr(port))))
self.rawserver.external_add_task(self._got_host, 0, (ip, port, callback))
示例12: S3Iterable
class S3Iterable(object):
def __init__(self):
'''
Subclasses must handle setting up config including:
* bucketname
* parser
'''
self.bucketname = None
self.parser = None
self.cache = Cache()
self.iterator = iter
self.decompress = None
def subsets(self):
l = self.cache.s3listcontents(self.bucketname)
o = []
for i in l:
o.append(i.key)
return o
def iter(self, subset):
h = self.cache.directhandle(self.bucketname, subset, decompress=self.decompress)
for l in self.iterator(h):
if self.parser is None:
yield l
else:
yield self.parser(l)
def filter(self, subset, f):
h = self.cache.directhandle(self.bucketname, subset, decompress=self.decompress)
for l in self.iterator(h):
if self.parser is None:
j = l
else:
j = self.parser(l)
if f(j):
yield j
def byid(self, index):
(subset, i) = index
h = self.cache.directhandle(self.bucketname, subset, decompress=self.decompress)
c = 0
for l in self.iterator(h):
if c == i:
if self.parser is None:
return l
else:
return self.parser(l)
else:
c += 1
return None
def display(self, items):
for i in items:
print i
示例13: export_network
def export_network(data, cache=None, **kwargs):
if cache is None:
from cache import Cache
cache = Cache()
#log.debug('CreateNetwork {0}'.format(data))
# We'll deal with two additional attributes, '_network' and '_uid'.
# Thoses two attributes allow us to find the network from the value and vice-versa.
# Note that since the '_uid' refer to the current python context,
# it's value could be erroned when calling import_network.
# However the change of collisions are extremely improbable so checking the type of the python variable
# is sufficient.
# Please feel free to provide a better design if any if possible.
# todo: after refactoring, the network cache will be merged with the import cache
data_id = id(data)
result = cache.get_network_by_id(data_id)
if result is not None:
return result
# Create network
# Optimisation: Use existing network if already present in scene
#if hasattr(data, '_network') and is_valid_PyNode(data._network):
# network = data._network
#else:
# Automaticly name network whenever possible
try:
network_name = data.__getNetworkName__()
except (AttributeError, TypeError):
network_name = data.__class__.__name__
network = pymel.createNode('network', name=network_name)
# Monkey patch the network in a _network attribute if supported.
if isinstance(data, object) and not isinstance(data, dict):
data._network = network
# Ensure the network have the current python id stored
if not network.hasAttr('_uid'):
pymel.addAttr(network, longName='_uid', niceName='_uid', at='long') # todo: validate attributeType
# network._uid.set(id(_data))
# Cache as soon as possible since we'll use recursivity soon.
cache.set_network_by_id(data_id, network)
# Convert _pData to basic data dictionary (recursive for now)
data_dict = core.export_dict(data, recursive=False, cache=cache, **kwargs)
assert (isinstance(data_dict, dict))
fnNet = network.__apimfn__()
for key, val in data_dict.items():
if _can_export_attr_by_name(key):
_add_attr(fnNet, key, val, cache=cache)
return network
示例14: test_loadCache
def test_loadCache(self):
self.assertTrue( self.g.login(self.username, self.password) )
c = self.g.loadCache("GC4808G")
self.assertTrue( isinstance(c, Cache) )
self.assertEquals( "GC4808G", Cache.__str__(c) )
# Cache with non-ascii chars
c = self.g.loadCache("GC4FRG5")
self.assertTrue( isinstance(c, Cache) )
self.assertEquals( "GC4FRG5", Cache.__str__(c) )
示例15: delete
def delete(url):
"""Delete <url> from cache"""
url = urllib.unquote(url)
if url.endswith("/"):
raise Error("illegal url - can't delete a directory")
print "* del: removing file from cache..."
cache = Cache()
result = cache.delete(url)
return result