本文整理汇总了Python中memcached.helper.data_helper.VBucketAwareMemcached类的典型用法代码示例。如果您正苦于以下问题:Python VBucketAwareMemcached类的具体用法?Python VBucketAwareMemcached怎么用?Python VBucketAwareMemcached使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VBucketAwareMemcached类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_full_eviction_changed_to_value_eviction
def test_full_eviction_changed_to_value_eviction(self):
KEY_NAME = 'key1'
gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_create, "create", 0)
self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
self._verify_stats_all_buckets(self.servers[:self.nodes_init])
remote = RemoteMachineShellConnection(self.master)
for bucket in self.buckets:
output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
cluster_host="localhost",
user=self.master.rest_username,
password=self.master.rest_password,
options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
ClusterOperationHelper.wait_for_ns_servers_or_assert(
self.servers[:self.nodes_init], self,
wait_time=self.wait_timeout, wait_if_warmup=True)
self.sleep(10, 'Wait some time before next load')
#self._load_all_buckets(self.master, gen_create2, "create", 0)
#import pdb;pdb.set_trace()
rest = RestConnection(self.master)
client = VBucketAwareMemcached(rest, 'default')
mcd = client.memcached(KEY_NAME)
try:
rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
self.fail('Bucket is incorrectly functional')
except MemcachedError, e:
pass # this is the exception we are hoping for
示例2: _verify_es_values
def _verify_es_values(self, src_server, dest_server, kv_store=1, verification_count=10000):
cb_rest = RestConnection(src_server)
es_rest = RestConnection(dest_server)
buckets = self.xd_ref._get_cluster_buckets(src_server)
for bucket in buckets:
mc = VBucketAwareMemcached(cb_rest, bucket)
es_valid = es_rest.all_docs(indices=[bucket.name], size=verification_count)
# compare values of es documents to documents in couchbase
for row in es_valid[:verification_count]:
key = str(row["meta"]["id"])
try:
_, _, doc = mc.get(key)
val_src = str(json.loads(doc)["site_name"])
val_dest = str(row["doc"]["site_name"])
if val_src != val_dest:
self.xd_ref.fail(
"Document %s has unexpected value (%s) expected (%s)" % (key, val_src, val_dest)
)
except MemcachedError as e:
self.xd_ref.fail("Error during verification. Index contains invalid key: %s" % key)
self._log.info(
"Verified doc values in couchbase bucket (%s) match values in elastic search" % (bucket.name)
)
示例3: verify_single_node
def verify_single_node(self, server, kv_store=1):
"""This is the verification function for single node backup.
Args:
server: the master server in the cluster as self.master.
kv_store: default value is 1. This is the key of the kv_store of each bucket.
If --single-node flag appears in backup commad line, we just backup all the items
from a single node (the master node in this case). For each bucket, we request for the vBucketMap. For every key
in the kvstore of that bucket, we use hash function to get the vBucketId corresponding to that
key. By using the vBucketMap, we can know whether that key is in master node or not.
If yes, keep it. Otherwise delete it."""
rest = RestConnection(server)
for bucket in self.buckets:
VBucketAware = VBucketAwareMemcached(rest, bucket.name)
memcacheds, vBucketMap, vBucketMapReplica = VBucketAware.request_map(rest, bucket.name)
valid_keys, deleted_keys = bucket.kvs[kv_store].key_set()
for key in valid_keys:
vBucketId = VBucketAware._get_vBucket_id(key)
which_server = vBucketMap[vBucketId]
sub = which_server.find(":")
which_server_ip = which_server[:sub]
if which_server_ip != server.ip:
partition = bucket.kvs[kv_store].acquire_partition(key)
partition.delete(key)
bucket.kvs[kv_store].release_partition(key)
self._verify_all_buckets(server, kv_store, self.wait_timeout * 50, self.max_verify, True, 1)
示例4: load_docs
def load_docs(self, node, num_docs, bucket = 'default', password = '',
exp = 0, flags = 0):
client = VBucketAwareMemcached(RestConnection(node), bucket)
for i in range(num_docs):
key = "key%s"%i
rc = client.set(key, 0, 0, "value")
示例5: insert_docs
def insert_docs(self, num_of_docs, prefix, extra_values={}, wait_for_persistence=True, return_docs=False):
rest = RestConnection(self.master)
smart = VBucketAwareMemcached(rest, self.bucket)
doc_names = []
for i in range(0, num_of_docs):
key = doc_name = "{0}-{1}".format(prefix, i)
geom = {"type": "Point", "coordinates": [random.randrange(-180, 180), random.randrange(-90, 90)]}
value = {"name": doc_name, "age": random.randrange(1, 1000), "geometry": geom}
value.update(extra_values)
if not return_docs:
doc_names.append(doc_name)
else:
doc_names.append(value)
# loop till value is set
fail_count = 0
while True:
try:
smart.set(key, 0, 0, json.dumps(value))
break
except MemcachedError as e:
fail_count += 1
if (e.status == 133 or e.status == 132) and fail_count < 60:
if i == 0:
self.log.error("waiting 5 seconds. error {0}".format(e))
time.sleep(5)
else:
self.log.error(e)
time.sleep(1)
else:
raise e
if wait_for_persistence:
self.wait_for_persistence()
self.log.info("inserted {0} json documents".format(num_of_docs))
return doc_names
示例6: get_rev_info
def get_rev_info(rest_conn, bucket, keys):
vbmc = VBucketAwareMemcached(rest_conn,bucket)
ris = []
for k in keys:
mc = vbmc.memcached(k)
ri = mc.getRev(k)
ris.append(ri)
return ris
示例7: wait_for_vbuckets_ready_state
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''):
log = logger.Logger.get_logger()
start_time = time.time()
end_time = start_time + timeout_in_seconds
ready_vbuckets = {}
rest = RestConnection(node)
servers = rest.get_nodes()
RestHelper(rest).vbucket_map_ready(bucket, 60)
vbucket_count = len(rest.get_vbuckets(bucket))
vbuckets = rest.get_vbuckets(bucket)
obj = VBucketAwareMemcached(rest, bucket)
memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket)
#Create dictionary with key:"ip:port" and value: a list of vbuckets
server_dict = defaultdict(list)
for everyID in range(0, vbucket_count):
memcached_ip_port = str(vbucket_map[everyID])
server_dict[memcached_ip_port].append(everyID)
while time.time() < end_time and len(ready_vbuckets) < vbucket_count:
for every_ip_port in server_dict:
#Retrieve memcached ip and port
ip, port = every_ip_port.split(":")
client = MemcachedClient(ip, int(port), timeout=30)
client.vbucket_count = len(vbuckets)
bucket_info = rest.get_bucket(bucket)
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
for i in server_dict[every_ip_port]:
try:
(a, b, c) = client.get_vbucket_state(i)
except mc_bin_client.MemcachedError as e:
ex_msg = str(e)
if "Not my vbucket" in log_msg:
log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..."
if e.status == memcacheConstants.ERR_NOT_MY_VBUCKET:
# May receive this while waiting for vbuckets, continue and retry...S
continue
log.error("%s: %s" % (log_msg, ex_msg))
continue
except exceptions.EOFError:
# The client was disconnected for some reason. This can
# happen just after the bucket REST API is returned (before
# the buckets are created in each of the memcached processes.)
# See here for some details: http://review.couchbase.org/#/c/49781/
# Longer term when we don't disconnect clients in this state we
# should probably remove this code.
log.error("got disconnected from the server, reconnecting")
client.reconnect()
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
continue
if c.find("\x01") > 0 or c.find("\x02") > 0:
ready_vbuckets[i] = True
elif i in ready_vbuckets:
log.warning("vbucket state changed from active to {0}".format(c))
del ready_vbuckets[i]
client.close()
return len(ready_vbuckets) == vbucket_count
示例8: test_meta_failover
def test_meta_failover(self):
KEY_NAME = 'key2'
rest = RestConnection(self.master)
client = VBucketAwareMemcached(rest, self.bucket)
for i in range(10):
# set a key
value = 'value' + str(i)
client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
vbucket_id = client._get_vBucket_id(KEY_NAME)
#print 'vbucket_id is {0}'.format(vbucket_id)
mc_active = client.memcached(KEY_NAME)
mc_master = client.memcached_for_vbucket( vbucket_id )
mc_replica = client.memcached_for_replica_vbucket(vbucket_id)
cas_active = mc_active.getMeta(KEY_NAME)[4]
#print 'cas_a {0} '.format(cas_active)
max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )
self.assertTrue(cas_active == max_cas, '[ERROR]Max cas is not 0 it is {0}'.format(cas_active))
# failover that node
self.log.info('Failing over node with active data {0}'.format(self.master))
self.cluster.failover(self.servers, [self.master])
self.log.info('Remove the node with active data {0}'.format(self.master))
rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])
rebalance.result()
time.sleep(60)
replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
#print 'replica CAS {0}'.format(replica_CAS)
# add the node back
self.log.info('Add the node back, the max_cas should be healed')
rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])
rebalance.result()
# verify the CAS is good
client = VBucketAwareMemcached(rest, self.bucket)
mc_active = client.memcached(KEY_NAME)
active_CAS = mc_active.getMeta(KEY_NAME)[4]
#print 'active cas {0}'.format(active_CAS)
get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
#print 'replica CAS {0}'.format(replica_CAS)
#print 'replica ext meta {0}'.format(get_meta_resp)
self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')
示例9: load
def load(self, path, bucket, prefix='test'):
client = VBucketAwareMemcached(RestConnection(self.master), bucket)
for file in os.listdir(path):
f = open(path + '/' + file, 'r')
rq_s = f.read()
f.close()
rq_json = json.loads(rq_s)
key = str(file)
try:
o, c, d = client.set(key, 0, 0, json.dumps(rq_json))
except Exception, ex:
print 'WARN======================='
print ex
示例10: _verify_data_all_buckets
def _verify_data_all_buckets(self, gen_check):
for bucket in self.buckets:
self.log.info("Check bucket %s" % bucket.name)
gen = copy.deepcopy(gen_check)
rest = RestConnection(self.server_recovery)
client = VBucketAwareMemcached(rest, bucket)
while gen.has_next():
key, value = gen.next()
try:
_, _, d = client.get(key)
self.assertEquals(d, value, 'Key: %s expected. Value expected %s. Value actual %s' % (
key, value, d))
except Exception, ex:
raise Exception('Key %s not found %s' % (key, str(ex)))
示例11: _verify_es_results
def _verify_es_results(self, bucket='default'):
esrest_conn = EsRestConnection(self.dest_master)
es_docs = esrest_conn.all_docs()
self.log.info("Retrieved ES Docs")
rest_conn = RestConnection(self.src_master)
memcached_conn = VBucketAwareMemcached(rest_conn, bucket)
self.log.info("Comparing CB and ES data")
for doc in es_docs:
es_data = doc['doc']
mc_active = memcached_conn.memcached(str(es_data['_id']))
cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id']))
self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}".
format(str(es_data), str(cb_data)))
self.log.info("Data verified")
示例12: getr_negative_corrupted_keys_test
def getr_negative_corrupted_keys_test(self):
key = self.input.param("key", '')
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
start=0, end=self.num_items)
self.perform_docs_ops(self.master, [gen], 'create')
self.log.info("Checking replica read")
client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name)
try:
o, c, d = client.getr(key)
except Exception, ex:
if self.error and str(ex).find(self.error) != -1:
self.log.info("Expected error %s appeared as expected" % self.error)
else:
raise ex
示例13: _test_view_on_multiple_docs
def _test_view_on_multiple_docs(self, num_docs, params={"stale":"update_after"}, delay=10):
self.log.info("description : create a view on {0} documents".format(num_docs))
master = self.servers[0]
rest = RestConnection(master)
bucket = "default"
view_name = "dev_test_view_on_{1}_docs-{0}".format(str(uuid.uuid4())[:7], self.num_docs)
map_fn = "function (doc) {if(doc.name.indexOf(\"" + view_name + "\") != -1) { emit(doc.name, doc);}}"
rest.create_view(view_name, bucket, [View(view_name, map_fn, dev_view=False)])
self.created_views[view_name] = bucket
rest = RestConnection(self.servers[0])
smart = VBucketAwareMemcached(rest, bucket)
doc_names = []
prefix = str(uuid.uuid4())[:7]
total_time = 0
self.log.info("inserting {0} json objects".format(num_docs))
for i in range(0, num_docs):
key = doc_name = "{0}-{1}-{2}".format(view_name, prefix, i)
doc_names.append(doc_name)
value = {"name": doc_name, "age": 1000}
smart.set(key, 0, 0, json.dumps(value))
self.log.info("inserted {0} json documents".format(len(doc_names)))
time.sleep(10)
results = ViewBaseTests._get_view_results(self, rest, bucket, view_name, len(doc_names), extra_params=params)
view_time = results['view_time']
keys = ViewBaseTests._get_keys(self, results)
RebalanceHelper.wait_for_persistence(master, bucket, 0)
total_time = view_time
# Keep trying this for maximum 5 minutes
start_time = time.time()
# increase timeout to 600 seconds for windows testing
while (len(keys) != len(doc_names)) and (time.time() - start_time < 900):
msg = "view returned {0} items , expected to return {1} items"
self.log.info(msg.format(len(keys), len(doc_names)))
self.log.info("trying again in {0} seconds".format(delay))
time.sleep(delay)
results = ViewBaseTests._get_view_results(self, rest, bucket, view_name, len(doc_names), extra_params=params)
view_time = results['view_time']
total_time += view_time
keys = ViewBaseTests._get_keys(self, results)
self.log.info("View time: {0} secs".format(total_time))
# Only if the lengths are not equal, look for missing keys
if len(keys) != len(doc_names):
not_found = list(set(doc_names) - set(keys))
ViewBaseTests._print_keys_not_found(self, not_found, 10)
self.fail("map function did not return docs for {0} keys".format(len(not_found)))
示例14: do_basic_ops
def do_basic_ops(self):
KEY_NAME = 'key1'
KEY_NAME2 = 'key2'
CAS = 1234
self.log.info('Starting basic ops')
rest = RestConnection(self.master)
client = VBucketAwareMemcached(rest, 'default')
mcd = client.memcached(KEY_NAME)
# MB-17231 - incr with full eviction
rc = mcd.incr(KEY_NAME, 1)
print 'rc for incr', rc
# MB-17289 del with meta
rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
print 'set is', rc
cas = rc[1]
# wait for it to persist
persisted = 0
while persisted == 0:
opaque, rep_time, persist_time, persisted, cas = client.observe(KEY_NAME)
try:
rc = mcd.evict_key(KEY_NAME)
except MemcachedError as exp:
self.fail("Exception with evict meta - {0}".format(exp) )
CAS = 0xabcd
# key, value, exp, flags, seqno, remote_cas
try:
#key, exp, flags, seqno, cas
rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)
except MemcachedError as exp:
self.fail("Exception with del_with meta - {0}".format(exp) )
示例15: getr_rebalance_test
def getr_rebalance_test(self):
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
start=0, end=self.num_items)
self.perform_docs_ops(self.master, [gen], 'create')
self.log.info("Checking replica read")
client = VBucketAwareMemcached(RestConnection(self.master), self.default_bucket_name)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init : self.nodes_init + self.nodes_in],
[])
try:
while gen.has_next():
key, _ = gen.next()
o, c, d = client.getr(key)
finally:
rebalance.result()