本文整理汇总了Python中membase.api.rest_client.RestConnection.get_vbuckets方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_vbuckets方法的具体用法?Python RestConnection.get_vbuckets怎么用?Python RestConnection.get_vbuckets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.get_vbuckets方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wait_for_vbuckets_ready_state
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''):
log = logger.Logger.get_logger()
start_time = time.time()
end_time = start_time + timeout_in_seconds
ready_vbuckets = {}
rest = RestConnection(node)
servers = rest.get_nodes()
RestHelper(rest).vbucket_map_ready(bucket, 60)
vbucket_count = len(rest.get_vbuckets(bucket))
vbuckets = rest.get_vbuckets(bucket)
obj = VBucketAwareMemcached(rest, bucket)
memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket)
#Create dictionary with key:"ip:port" and value: a list of vbuckets
server_dict = defaultdict(list)
for everyID in range(0, vbucket_count):
memcached_ip_port = str(vbucket_map[everyID])
server_dict[memcached_ip_port].append(everyID)
while time.time() < end_time and len(ready_vbuckets) < vbucket_count:
for every_ip_port in server_dict:
#Retrieve memcached ip and port
ip, port = every_ip_port.split(":")
client = MemcachedClient(ip, int(port), timeout=30)
client.vbucket_count = len(vbuckets)
bucket_info = rest.get_bucket(bucket)
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
for i in server_dict[every_ip_port]:
try:
(a, b, c) = client.get_vbucket_state(i)
except mc_bin_client.MemcachedError as e:
ex_msg = str(e)
if "Not my vbucket" in log_msg:
log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..."
if e.status == memcacheConstants.ERR_NOT_MY_VBUCKET:
# May receive this while waiting for vbuckets, continue and retry...S
continue
log.error("%s: %s" % (log_msg, ex_msg))
continue
except exceptions.EOFError:
# The client was disconnected for some reason. This can
# happen just after the bucket REST API is returned (before
# the buckets are created in each of the memcached processes.)
# See here for some details: http://review.couchbase.org/#/c/49781/
# Longer term when we don't disconnect clients in this state we
# should probably remove this code.
log.error("got disconnected from the server, reconnecting")
client.reconnect()
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
continue
if c.find("\x01") > 0 or c.find("\x02") > 0:
ready_vbuckets[i] = True
elif i in ready_vbuckets:
log.warning("vbucket state changed from active to {0}".format(c))
del ready_vbuckets[i]
client.close()
return len(ready_vbuckets) == vbucket_count
示例2: test_stream_all_buckets
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def test_stream_all_buckets(self):
doc_gen = BlobGenerator(
'dcpdata', 'dcpdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, doc_gen, "create", 0)
user_name = self.input.param("user_name",None)
password = self.input.param("password",None)
nodeA = self.servers[0]
rest = RestConnection(nodeA)
vbuckets = rest.get_vbuckets()
buckets = ['default']
for i in xrange(self.standard_buckets):
buckets.append('standard_bucket'+str(i))
for bucket in buckets:
if user_name is not None:
self.add_built_in_server_user([{'id': user_name, 'name': user_name, 'password': password}], \
[{'id': user_name, 'name': user_name, 'roles': 'data_dcp_reader[default]'}], self.master)
dcp_client = self.dcp_client(nodeA, PRODUCER, bucket_name=bucket, auth_user=user_name, auth_password=password)
else:
dcp_client = self.dcp_client(nodeA, PRODUCER, bucket_name=bucket)
for vb in vbuckets[0:16]:
vbucket = vb.id
vb_uuid, _, high_seqno = self.vb_info(nodeA, vbucket, bucket = bucket)
stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid)
responses = stream.run()
assert high_seqno == stream.last_by_seqno
示例3: all_vb_info
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def all_vb_info(self, node, table_entry=0, bucket='default', password=''):
print '*****in all vbinfo'
vbInfoMap = {}
rest = RestConnection(node)
vbuckets = rest.get_vbuckets()
mcd_client = self.mcd_client(
node, auth_user=bucket, auth_password=password)
failoverStats = mcd_client.stats(FAILOVER_STAT)
seqnoStats = mcd_client.stats(VBSEQNO_STAT)
for vb in vbuckets:
vbucket = vb.id
id_key = 'vb_{0}:{1}:id'.format(vbucket, table_entry)
seq_key = 'vb_{0}:{1}:seq'.format(vbucket, table_entry)
hi_key = 'vb_{0}:high_seqno'.format(vbucket)
vb_uuid, seqno, high_seqno = \
(long(failoverStats[id_key]),
long(failoverStats[seq_key]),
long(seqnoStats[hi_key]))
vbInfoMap[vbucket] = (vb_uuid, seqno, high_seqno)
return vbInfoMap
示例4: __init__
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def __init__(self, master, num_docs, bucket='default',prefix=''):
rest = RestConnection(master)
vBuckets = rest.get_vbuckets(bucket)
self.vbucket_count = len(vBuckets)
self.cfg = {
'max-items': num_docs,
'max-creates': num_docs,
'min-value-size': 512,
'exit-after-creates': 1,
'ratio-sets': 1,
'ratio-misses': 0,
'ratio-creates': 1,
'ratio-deletes': 0,
'ratio-hot': 0,
'ratio-hot-sets': 1,
'ratio-hot-gets': 0,
'ratio-expirations': 0,
'expiration': 0,
'threads': 1,
'json': 1,
'batch': 1000,
'vbuckets': self.vbucket_count,
'doc-cache': 0,
'prefix': prefix,
}
# Right now only supports membase binary and 'default' bucket
protocol="membase-binary://{0}:8091".format(master.ip)
self.protocol, self.host_port, self.user, self.pswd = self.protocol_parse(protocol)
示例5: wait_for_vbuckets_ready_state
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''):
log = logger.Logger.get_logger()
start_time = time.time()
end_time = start_time + timeout_in_seconds
ready_vbuckets = {}
rest = RestConnection(node)
servers = rest.get_nodes()
RestHelper(rest).vbucket_map_ready(bucket, 60)
vbucket_count = len(rest.get_vbuckets(bucket))
vbuckets = rest.get_vbuckets(bucket)
obj = VBucketAwareMemcached(rest, bucket)
memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket)
#Create dictionary with key:"ip:port" and value: a list of vbuckets
server_dict = defaultdict(list)
for everyID in range(0, vbucket_count):
memcached_ip_port = str(vbucket_map[everyID])
server_dict[memcached_ip_port].append(everyID)
while time.time() < end_time and len(ready_vbuckets) < vbucket_count:
for every_ip_port in server_dict:
#Retrieve memcached ip and port
ip, port = every_ip_port.split(":")
client = MemcachedClient(ip, int(port), timeout=30)
client.vbucket_count = len(vbuckets)
bucket_info = rest.get_bucket(bucket)
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
for i in server_dict[every_ip_port]:
try:
(a, b, c) = client.get_vbucket_state(i)
except mc_bin_client.MemcachedError as e:
ex_msg = str(e)
if "Not my vbucket" in log_msg:
log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..."
if "Not my vbucket" in ex_msg:
#reduce output
ex_msg = str(e)[:str(e).find('Not my vbucket') + 14] + "..."
log.error("%s: %s" % (log_msg, ex_msg))
continue
if c.find("\x01") > 0 or c.find("\x02") > 0:
ready_vbuckets[i] = True
elif i in ready_vbuckets:
log.warning("vbucket state changed from active to {0}".format(c))
del ready_vbuckets[i]
client.close()
return len(ready_vbuckets) == vbucket_count
示例6: test_failover_swap_rebalance
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def test_failover_swap_rebalance(self):
""" add and failover node then perform swap rebalance """
assert len(self.servers) > 2, "not enough servers"
nodeA = self.servers[0]
nodeB = self.servers[1]
nodeC = self.servers[2]
gen_create = BlobGenerator('dcp', 'dcp-', 64, start=0, end=self.num_items)
self._load_all_buckets(nodeA, gen_create, "create", 0)
vbucket = 0
vb_uuid, seqno, high_seqno = self.vb_info(nodeA, vbucket)
# rebalance in nodeB
assert self.cluster.rebalance([nodeA], [nodeB], [])
# add nodeC
rest = RestConnection(nodeB)
rest.add_node(user=nodeC.rest_username,
password=nodeC.rest_password,
remoteIp=nodeC.ip,
port=nodeC.port)
# stop and failover nodeA
assert self.stop_node(0)
self.stopped_nodes.append(0)
assert self.cluster.failover([nodeB], [nodeA])
try:
assert self.cluster.rebalance([nodeB], [], [])
except:
pass
# verify seqnos and stream mutations
rest = RestConnection(nodeB)
vbuckets = rest.get_vbuckets()
total_mutations = 0
for vb in vbuckets:
mcd_client = self.mcd_client(nodeB)
stats = mcd_client.stats(VBSEQNO_STAT)
vbucket = vb.id
key = 'vb_{0}:high_seqno'.format(vbucket)
total_mutations += int(stats[key])
assert total_mutations == self.num_items #/ 2 # divide by because the items are split between 2 servers
task = self.cluster.async_rebalance([nodeB], [], [nodeC])
task.result()
示例7: wait_for_memcached
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def wait_for_memcached(node, bucket, timeout_in_seconds=300):
log = logger.Logger.get_logger()
msg = "waiting for memcached bucket : {0} in {1} to accept set ops"
log.info(msg.format(bucket, node.ip))
start_time = time.time()
end_time = start_time + timeout_in_seconds
client = None
keys = {}
rest = RestConnection(node)
RestHelper(rest).vbucket_map_ready(bucket, 60)
vbucket_count = len(rest.get_vbuckets(bucket))
'''
while len(keys) < vbucket_count:
key = str(uuid.uuid4())
vBucketId = crc32.crc32_hash(key) & (vbucket_count - 1)
keys[vBucketId] = {'key': key, 'inserted': False}
counter = 0
while time.time() < end_time and counter < (vbucket_count - 1):
try:
if not client:
client = MemcachedClientHelper.direct_client(node, bucket)
for vBucketId in keys:
if not keys[vBucketId]["inserted"]:
client.set(keys[vBucketId]['key'], 0, 0, str(uuid.uuid4()))
keys[vBucketId]["inserted"] = True
counter += 1
except mc_bin_client.MemcachedError as error:
msg = "(memcachedError {0} - {1} when invoking set or get)"
log.error(msg.format(error.status, error.msg))
except Exception as ex:
log.error("{0} while setting key ".format(ex))
if client:
try:
client.flush(5)
client.stats('reset')
except Exception:
pass
client.close()
client = None
time.sleep(5)
'''
time.sleep(10)
all_vbuckets_ready = BucketOperationHelper.wait_for_vbuckets_ready_state(node,
bucket, timeout_in_seconds)
#return (counter == vbucket_count) and all_vbuckets_ready
return all_vbuckets_ready
示例8: _get_server_by_state
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def _get_server_by_state(self, servers, bucket, vb_state):
rest = RestConnection(servers[0])
vbuckets = rest.get_vbuckets(bucket)[0]
addr = None
if vb_state == ACTIVE:
addr = vbuckets.master
elif vb_state == REPLICA1:
addr = vbuckets.replica[0].encode("ascii", "ignore")
elif vb_state == REPLICA2:
addr = vbuckets.replica[1].encode("ascii", "ignore")
elif vb_state == REPLICA3:
addr = vbuckets.replica[2].encode("ascii", "ignore")
else:
return None
addr = addr.split(':', 1)[0]
for server in servers:
if addr == server.ip:
return server
return None
示例9: __init__
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def __init__(
self, master, num_docs, prefix="", bucket="default", password="", protocol="membase-binary", port=11211
):
rest = RestConnection(master)
vBuckets = rest.get_vbuckets(bucket)
self.vbucket_count = len(vBuckets)
self.cfg = {
"max-items": num_docs,
"max-creates": num_docs,
"min-value-size": 128,
"exit-after-creates": 1,
"ratio-sets": 1,
"ratio-misses": 0,
"ratio-creates": 1,
"ratio-deletes": 0,
"ratio-hot": 0,
"ratio-hot-sets": 1,
"ratio-hot-gets": 0,
"ratio-expirations": 0,
"expiration": 0,
"threads": 1,
"json": 1,
"batch": 1000,
"vbuckets": self.vbucket_count,
"doc-cache": 0,
"prefix": prefix,
}
self.protocol = protocol
self.user = bucket
self.pswd = password
if protocol == "membase-binary":
self.host_port = "{0}:{1}:{2}".format(master.ip, master.port, port)
elif protocol == "memcached-binary":
self.host_port = "{0}:{1}:{1}".format(master.ip, port)
self.ctl = {"run_ok": True}
示例10: load_some_data
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def load_some_data(serverInfo,
fill_ram_percentage=10.0,
bucket_name='default'):
log = logger.Logger.get_logger()
if fill_ram_percentage <= 0.0:
fill_ram_percentage = 5.0
client = MemcachedClientHelper.direct_client(serverInfo, bucket_name)
#populate key
rest = RestConnection(serverInfo)
RestHelper(rest).vbucket_map_ready(bucket_name, 60)
vbucket_count = len(rest.get_vbuckets(bucket_name))
testuuid = uuid.uuid4()
info = rest.get_bucket(bucket_name)
emptySpace = info.stats.ram - info.stats.memUsed
log.info('emptySpace : {0} fill_ram_percentage : {1}'.format(emptySpace, fill_ram_percentage))
fill_space = (emptySpace * fill_ram_percentage) / 100.0
log.info("fill_space {0}".format(fill_space))
# each packet can be 10 KB
packetSize = int(10 * 1024)
number_of_buckets = int(fill_space) / packetSize
log.info('packetSize: {0}'.format(packetSize))
log.info('memory usage before key insertion : {0}'.format(info.stats.memUsed))
log.info('inserting {0} new keys to memcached @ {0}'.format(number_of_buckets, serverInfo.ip))
keys = ["key_%s_%d" % (testuuid, i) for i in range(number_of_buckets)]
inserted_keys = []
for key in keys:
vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1)
client.vbucketId = vbucketId
try:
client.set(key, 0, 0, key)
inserted_keys.append(key)
except mc_bin_client.MemcachedError as error:
log.error(error)
client.close()
log.error("unable to push key : {0} to vbucket : {1}".format(key, client.vbucketId))
if test:
test.fail("unable to push key : {0} to vbucket : {1}".format(key, client.vbucketId))
else:
break
client.close()
return inserted_keys
示例11: wait_for_vbuckets_ready_state
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300):
log = logger.Logger.get_logger()
start_time = time.time()
end_time = start_time + timeout_in_seconds
ready_vbuckets = {}
rest = RestConnection(node)
RestHelper(rest).vbucket_map_ready(bucket, 60)
vbucket_count = len(rest.get_vbuckets(bucket))
client = MemcachedClientHelper.direct_client(node, bucket)
while time.time() < end_time and len(ready_vbuckets) < vbucket_count:
for i in range(0, vbucket_count):
try:
(a, b, c) = client.get_vbucket_state(i)
except mc_bin_client.MemcachedError as e:
log.error(e)
break
if c.find("\x01") > 0 or c.find("\x02") > 0:
ready_vbuckets[i] = True
elif i in ready_vbuckets:
log.warning("vbucket state changed from active to {0}".format(c))
del ready_vbuckets[i]
return len(ready_vbuckets) == vbucket_count
示例12: proxy_client
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def proxy_client(server, bucket, timeout=30, force_ascii=False):
# for this bucket on this node what is the proxy ?
rest = RestConnection(server)
log = logger.Logger.get_logger()
bucket_info = rest.get_bucket(bucket)
nodes = bucket_info.nodes
if (
TestInputSingleton.input
and "ascii" in TestInputSingleton.input.test_params
and TestInputSingleton.input.test_params["ascii"].lower() == "true"
) or force_ascii:
ascii = True
else:
ascii = False
for node in nodes:
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = rest.get_vbuckets(bucket)
if ascii:
log = logger.Logger.get_logger()
log.info("creating ascii client {0}:{1} {2}".format(server.ip, bucket_info.port, bucket))
client = MemcachedAsciiClient(server.ip, bucket_info.port, timeout=timeout)
else:
log = logger.Logger.get_logger()
if isinstance(server, dict):
log.info("creating proxy client {0}:{1} {2}".format(server["ip"], node.moxi, bucket))
client = MemcachedClient(server["ip"], node.moxi, timeout=timeout)
else:
log.info("creating proxy client {0}:{1} {2}".format(server.ip, node.moxi, bucket))
client = MemcachedClient(server.ip, node.moxi, timeout=timeout)
client.vbucket_count = len(vBuckets)
if bucket_info.authType == "sasl":
client.sasl_auth_plain(bucket_info.name.encode("ascii"), bucket_info.saslPassword.encode("ascii"))
return client
if isinstance(server, dict):
raise Exception("unable to find {0} in get_nodes()".format(server["ip"]))
else:
raise Exception("unable to find {0} in get_nodes()".format(server.ip))
示例13: test_stream_all_buckets
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def test_stream_all_buckets(self):
doc_gen = BlobGenerator(
'dcpdata', 'dcpdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, doc_gen, "create", 0)
nodeA = self.servers[0]
rest = RestConnection(nodeA)
vbuckets = rest.get_vbuckets()
buckets = ['default']
for i in xrange(self.standard_buckets):
buckets.append('standard_bucket'+str(i))
for bucket in buckets:
dcp_client = self.dcp_client(nodeA, PRODUCER, auth_user = bucket)
for vb in vbuckets[0:16]:
vbucket = vb.id
vb_uuid, _, high_seqno = self.vb_info(nodeA, vbucket, bucket = bucket)
stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid)
responses = stream.run()
assert high_seqno == stream.last_by_seqno
示例14: test_ephemeral_bucket_NRU_eviction
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
def test_ephemeral_bucket_NRU_eviction(self):
rest = RestConnection(self.servers[0])
vbuckets = rest.get_vbuckets()
generate_load = BlobGenerator(EphemeralBucketsOOM.KEY_ROOT, 'param2', self.value_size, start=0,
end=self.num_items)
self._load_all_ephemeral_buckets_until_no_more_memory(self.servers[0], generate_load, "create", 0,
self.num_items, percentage=0.80)
self.log.info('Memory is full')
# get the sequence numbers so far
# This bit of code is slow with 1024 vbuckets
pre_delete_sequence_numbers = [0] * self.vbuckets
for v in range(self.vbuckets):
vb_uuid, seqno, high_seqno = self.vb_info(self.master, v)
pre_delete_sequence_numbers[v] = high_seqno
item_count = rest.get_bucket(self.buckets[0]).stats.itemCount
self.log.info('Reached OOM, the number of items is {0}'.format(item_count))
keys_that_were_accessed = []
# load some more, this should trigger some deletes
mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
dcp_client = self.dcp_client(self.servers[0], 'producer')
num = 0
deleted_keys = []
for vb in vbuckets[0:self.vbuckets]:
vbucket = vb.id
vb_uuid, _, high_seqno = self.vb_info(self.servers[0], vbucket, bucket=self.buckets[0])
stream = dcp_client.stream_req(vbucket, 0, pre_delete_sequence_numbers[vb.id], high_seqno, vb_uuid)
responses = stream.run()
for i in responses:
if i['opcode'] == constants.CMD_DELETION: #
# have a delete, get the key number
index = int(i['key'][EphemeralBucketsOOM.KEY_ROOT_LENGTH:])
deleted_keys.append(index)
num += 1
self.assertEquals([], deleted_keys)
for i in xrange(200):
key = random.randint(0, 1200)
mc_client.get(EphemeralBucketsOOM.KEY_ROOT + str(key))
keys_that_were_accessed.append(key)
# add ~20% of new items
try:
for i in range(item_count, int(item_count * 1.2)):
mc_client.set(EphemeralBucketsOOM.KEY_ROOT + str(i), 0, 0, 'a' * self.value_size)
except mc_bin_client.MemcachedError:
self.log.info ("OOM Reached NRU Started")
time.sleep(15)
item_count = rest.get_bucket(self.buckets[0]).stats.itemCount
self.log.info('The number of items is {0}'.format(item_count))
deleted_keys = []
# creating a DCP client fails, maybe an auth issue?
dcp_client = self.dcp_client(self.servers[0], 'producer')
num = 0
for vb in vbuckets[0:self.vbuckets]:
vbucket = vb.id
vb_uuid, _, high_seqno = self.vb_info(self.servers[0], vbucket, bucket=self.buckets[0])
stream = dcp_client.stream_req(vbucket, 0, pre_delete_sequence_numbers[vb.id], high_seqno, vb_uuid)
responses = stream.run()
for i in responses:
if i['opcode'] == constants.CMD_DELETION: #
# have a delete, get the key number
index = int(i['key'][EphemeralBucketsOOM.KEY_ROOT_LENGTH:])
deleted_keys.append(index)
num += 1
item_count = rest.get_bucket(self.buckets[0]).stats.itemCount
self.log.info('The number of items is {0}'.format(item_count))
self.assertEquals(set([]), set(keys_that_were_accessed).intersection(deleted_keys))
# one more iteration
deleted_keys = []
keys_that_were_accessed = []
set(keys_that_were_accessed).intersection(deleted_keys)
for i in xrange(200):
key = random.randint(0, 12000)
try:
mc_client.get(EphemeralBucketsOOM.KEY_ROOT + str(key))
keys_that_were_accessed.append(key)
except mc_bin_client.MemcachedError:
self.log.info('key %s already deleted' % key)
# add ~15% of new items
try:
for i in range(int(item_count * 1.2), int(item_count * 1.4)):
mc_client.set(EphemeralBucketsOOM.KEY_ROOT + str(i), 0, 0, 'a' * self.value_size)
keys_that_were_accessed.append(i)
except mc_bin_client.MemcachedError:
self.log.info ("OOM Reached NRU Started")
time.sleep(15)
item_count = rest.get_bucket(self.buckets[0]).stats.itemCount
#.........这里部分代码省略.........
示例15: ViewMergingTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_vbuckets [as 别名]
class ViewMergingTests(BaseTestCase):
def setUp(self):
try:
if 'first_case' not in TestInputSingleton.input.test_params:
TestInputSingleton.input.test_params['default_bucket'] = False
TestInputSingleton.input.test_params['skip_cleanup'] = True
TestInputSingleton.input.test_params['skip_buckets_handle'] = True
self.default_bucket_name = 'default'
super(ViewMergingTests, self).setUp()
if 'first_case' in TestInputSingleton.input.test_params:
self.cluster.rebalance(self.servers[:], self.servers[1:], [])
# We use only one bucket in this test suite
self.rest = RestConnection(self.master)
self.bucket = self.rest.get_bucket(Bucket(name=self.default_bucket_name))
# num_docs must be a multiple of the number of vbuckets
self.num_docs = self.input.param("num_docs_per_vbucket", 1) * \
len(self.bucket.vbuckets)
self.is_dev_view = self.input.param("is_dev_view", False)
self.map_view_name = 'mapview1'
self.red_view_name = 'redview1'
self.red_view_stats_name = 'redview_stats'
self.keys_view_name = 'mapviewkeys'
self.clients = self.init_clients()
if 'first_case' in TestInputSingleton.input.test_params:
self.create_ddocs(False)
self.create_ddocs(True)
except Exception as ex:
self.input.test_params["stop-on-failure"] = True
self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
self.fail(ex)
def tearDown(self):
# clean up will only performed on the last run
if 'last_case' in TestInputSingleton.input.test_params:
TestInputSingleton.input.test_params['skip_cleanup'] = False
TestInputSingleton.input.test_params['skip_buckets_handle'] = False
super(ViewMergingTests, self).tearDown()
else:
self.cluster.shutdown(force=True)
self._log_finish(self)
def test_empty_vbuckets(self):
results = self.merged_query(self.map_view_name)
self.assertEquals(results.get(u'total_rows', None), 0)
self.assertEquals(len(results.get(u'rows', None)), 0)
def test_nonexisting_views(self):
view_names = ['mapview2', 'mapview3', 'mapview4', 'mapview5']
for view_name in view_names:
try:
self.merged_query(view_name)
except QueryViewException:
self.log.info("QueryViewException is raised as expected")
except Exception:
self.assertFail("QueryViewException is expected, but not raised")
else:
self.assertFail("QueryViewException is expected, but not raised")
def test_non_empty_view(self):
num_vbuckets = len(self.rest.get_vbuckets(self.bucket))
docs = ViewMergingTests.make_docs(1, self.num_docs + 1)
self.populate_sequenced(num_vbuckets, docs)
results = self.merged_query(self.map_view_name)
self.assertEquals(results.get(u'total_rows', 0), self.num_docs)
self.assertEquals(len(results.get(u'rows', [])), self.num_docs)
def test_queries(self):
all_query_params = ['skip', 'limit', 'startkey', 'endkey', 'startkey_docid',
'endkey_docid', 'inclusive_end', 'descending', 'key']
current_params = {}
for key in self.input.test_params:
if key in all_query_params:
current_params[key] = str(self.input.test_params[key])
results = self.merged_query(self.map_view_name, current_params)
self.verify_results(results, current_params)
def test_keys(self):
keys = [5, 3, 10, 39, 666666, 21]
results = self.merged_query(self.map_view_name, {"keys": keys})
self.verify_results(results, {"keys": keys})
keys = [2, 1, 4]
results = self.merged_query(self.keys_view_name,
{"keys": keys, "reduce": "false"},
ddoc='test3')
self.verify_results(results, {"keys": keys})
keys = [0, 2, 1]
results = self.merged_query(self.keys_view_name,
{"keys": keys, "group": "true"},
ddoc='test3')
self.verify_results(results, {"keys": keys})
def test_include_docs(self):
results = self.merged_query(self.map_view_name,
{"include_docs": "true"})
self.verify_results(results, {"include_docs": "true"})
num = 1
for row in results.get(u'rows', []):
#.........这里部分代码省略.........