本文整理汇总了Python中memcached.helper.data_helper.VBucketAwareMemcached.observe方法的典型用法代码示例。如果您正苦于以下问题:Python VBucketAwareMemcached.observe方法的具体用法?Python VBucketAwareMemcached.observe怎么用?Python VBucketAwareMemcached.observe使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类memcached.helper.data_helper.VBucketAwareMemcached
的用法示例。
在下文中一共展示了VBucketAwareMemcached.observe方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_basic_ops
# 需要导入模块: from memcached.helper.data_helper import VBucketAwareMemcached [as 别名]
# 或者: from memcached.helper.data_helper.VBucketAwareMemcached import observe [as 别名]
def do_basic_ops(self):
KEY_NAME = 'key1'
KEY_NAME2 = 'key2'
CAS = 1234
self.log.info('Starting basic ops')
rest = RestConnection(self.master)
client = VBucketAwareMemcached(rest, 'default')
mcd = client.memcached(KEY_NAME)
# MB-17231 - incr with full eviction
rc = mcd.incr(KEY_NAME, 1)
print 'rc for incr', rc
# MB-17289 del with meta
rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
print 'set is', rc
cas = rc[1]
# wait for it to persist
persisted = 0
while persisted == 0:
opaque, rep_time, persist_time, persisted, cas = client.observe(KEY_NAME)
try:
rc = mcd.evict_key(KEY_NAME)
except MemcachedError as exp:
self.fail("Exception with evict meta - {0}".format(exp) )
CAS = 0xabcd
# key, value, exp, flags, seqno, remote_cas
try:
#key, exp, flags, seqno, cas
rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)
except MemcachedError as exp:
self.fail("Exception with del_with meta - {0}".format(exp) )
示例2: _run_observe
# 需要导入模块: from memcached.helper.data_helper import VBucketAwareMemcached [as 别名]
# 或者: from memcached.helper.data_helper.VBucketAwareMemcached import observe [as 别名]
def _run_observe(self):
tasks = []
query_set = "true"
persisted = 0
mutated = False
count = 0
for bucket in self.buckets:
self.cluster.create_view(self.master, self.default_design_doc,
self.default_view, bucket , self.wait_timeout * 2)
client = VBucketAwareMemcached(RestConnection(self.master), bucket)
self.max_time = timedelta(microseconds=0)
if self.mutate_by == "multi_set":
key_val = self._create_multi_set_batch()
client.setMulti(0, 0, key_val)
keys = ["observe%s" % (i) for i in xrange(self.num_items)]
for key in keys:
mutated = False
while not mutated and count < 60:
try:
if self.mutate_by == "set":
# client.memcached(key).set(key, 0, 0, "set")
client.set(key, 0, 0, "setvalue")
elif self.mutate_by == "append":
client.memcached(key).append(key, "append")
elif self.mutate_by == "prepend" :
client.memcached(key).prepend(key, "prepend")
elif self.mutate_by == "incr":
client.memcached(key).incr(key, 1)
elif self.mutate_by == "decr":
client.memcached(key).decr(key)
mutated = True
t_start = datetime.now()
except MemcachedError as error:
if error.status == 134:
loaded = False
self.log.error("Memcached error 134, wait for 5 seconds and then try again")
count += 1
time.sleep(5)
while persisted == 0:
opaque, rep_time, persist_time, persisted, cas = client.observe(key)
t_end = datetime.now()
self.log.info("##########key:-%s################" % (key))
self.log.info("Persisted:- %s" % (persisted))
self.log.info("Persist_Time:- %s" % (rep_time))
self.log.info("Time2:- %s" % (t_end - t_start))
if self.max_time <= (t_end - t_start):
self.max_time = (t_end - t_start)
self.log.info("Max Time taken for observe is :- %s" % self.max_time)
self.log.info("Cas Value:- %s" % (cas))
query = {"stale" : "false", "full_set" : "true", "connection_timeout" : 60000}
self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items, bucket, timeout=self.wait_timeout)
self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, bucket))
# check whether observe has to run with delete and delete parallel with observe or not
if len (self.observe_with) > 0 :
if self.observe_with == "delete" :
self.log.info("Deleting 0- %s number of items" % (self.num_items / 2))
self._load_doc_data_all_buckets('delete', 0, self.num_items / 2)
query_set = "true"
elif self.observe_with == "delete_parallel":
self.log.info("Deleting Parallel 0- %s number of items" % (self.num_items / 2))
tasks = self._async_load_doc_data_all_buckets('delete', 0, self.num_items / 2)
query_set = "false"
for key in keys:
opaque, rep_time, persist_time, persisted, cas = client.memcached(key).observe(key)
self.log.info("##########key:-%s################" % (key))
self.log.info("Persisted:- %s" % (persisted))
if self.observe_with == "delete_parallel":
for task in tasks:
task.result()
query = {"stale" : "false", "full_set" : query_set, "connection_timeout" : 60000}
self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items / 2, bucket, timeout=self.wait_timeout)
self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, self.default_bucket_name))
"""test_observe_basic_data_load_delete will test observer basic scenario