本文整理汇总了Python中cache.Cache.fetch方法的典型用法代码示例。如果您正苦于以下问题:Python Cache.fetch方法的具体用法?Python Cache.fetch怎么用?Python Cache.fetch使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cache.Cache
的用法示例。
在下文中一共展示了Cache.fetch方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import fetch [as 别名]
def test(self):
storage = shelve.open(self.shelve_filename)
locking_storage = CacheStorageLock(storage)
try:
fc = Cache(locking_storage)
# First fetch the data through the cache
parsed_data = fc.fetch(self.TEST_URL)
self.failUnlessEqual(parsed_data.feed.title, 'CacheTest test data')
# Now retrieve the same data directly from the shelf
modified, shelved_data = storage[self.TEST_URL]
# The data should be the same
self.failUnlessEqual(parsed_data, shelved_data)
finally:
storage.close()
return
示例2: test
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import fetch [as 别名]
def test(self):
# First fetch the data through the cache
storage = shove.Shove('file://' + self.shove_dirname)
try:
fc = Cache(storage)
parsed_data = fc.fetch(self.TEST_URL)
self.failUnlessEqual(parsed_data.feed.title, 'CacheTest test data')
finally:
storage.close()
# Now retrieve the same data directly from the shelf
storage = shove.Shove('file://' + self.shove_dirname)
try:
modified, shelved_data = storage[self.TEST_URL]
finally:
storage.close()
# The data should be the same
self.failUnlessEqual(parsed_data, shelved_data)
return
示例3: loadCache
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import fetch [as 别名]
def loadCache(self, keyFrame, prefix, name):
cache = Cache(prefix, name)
return cache.fetch(keyFrame)
示例4: Communicator
# 需要导入模块: from cache import Cache [as 别名]
# 或者: from cache.Cache import fetch [as 别名]
class Communicator(object):
'''
Communicator talks to the client and receives commands from the client
in terms of HTTP GET, PUT and DETELE verbs.
Uses the HEAD verb as a control channel signaling the nodes to perform clean ups,
remove dead peers from the key ring etc
see utils package for sample standalone commands
'''
base_file_path = os.path.abspath('.')
#------------------------------------------------------------------------------
# Logging setup
#------------------------------------------------------------------------------
logger = LogHelper.getLogger()
def __init__(self, config):
'''
Constructor
'''
#=======================================================================
# By this time, topology has configured itself based on the config file
# or the defaults
#=======================================================================
self.config = config
self.topology = Topology(config)
self.cache = Cache(reconstruct=self.config.local_reconstruct)
self.logger.info('setup up communicator for node: ' + str(self.config.node_id) + '\n')
def GET(self, key, origin='client'):
if origin == 'client':
self.logger.info("Request from client")
else:
if str(origin) == self.config.node_address:
self.logger.error("Houston, we have a problem. Cycle detected. Have to fail...")
return None
node = self.topology.key_manager.get_node(key)
# for node in node_gen:
self.logger.info("Key requested: " + key)
self.logger.info("Node responsible: " + node)
if node == self.config.node_address: #we are responsible for fetching this key from the cache
return self.cache.fetch(key)
else: #tell our peer to handle this key
return self.topology.instructPeer(node, 'GET', cherrypy.serving.request.path_info) #TODO change faux get/post to auto route
#else:
#===============================================================================
# ideally there should be a for loop at the commented for node in node_gen where node_gen
# is a generator that loops over the set of possible peers
# need to figure out a way to update underlying datastructure over which the generator rotates
# this is against the laws of a generator, so will have to think of some other clever trick.
# This means the node we contacted was down. It was removed from the key ring
# since node_gen is a generator, we will sping until we hit next node who will
# assume responsibility for this key
#===============================================================================
def PUT(self, key, value, origin='client'):
if origin == 'client':
self.logger.info("Request from client")
else:
if str(origin) == self.config.node_address:
self.logger.error("Houston, we have a problem. Cycle detected. Have to fail...")
return None
node = self.topology.key_manager.get_node(key)
# for node in node_gen:
if node == self.config.node_address: #we are responsible for storing this key in the cache
#also forward the request to mirrors
print "mirroring"
self.topology.mirror('PUT', cherrypy.serving.request.path_info)
return self.cache.store(key, value)
else: #tell our peer to handle this key
return self.topology.instructPeer(node, 'PUT', cherrypy.serving.request.path_info) #TODO change faux get/post to auto route
#===============================================================================
# This means the node we contacted was down. It was removed from the key ring
# since node_gen is a generator, we will sping until we hit next node who will
# assume responsibility for this key
#===============================================================================
def DELETE(self, key, origin='client'):
if origin == 'client':
self.logger.info("Request from Client")
else:
if str(origin) == self.config.node_address:
self.logger.error("Houston, we have a problem. Cycle detected. Have to fail...")
#.........这里部分代码省略.........