本文整理汇总了Python中membase.api.rest_client.RestConnection.fetch_bucket_stats方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.fetch_bucket_stats方法的具体用法?Python RestConnection.fetch_bucket_stats怎么用?Python RestConnection.fetch_bucket_stats使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.fetch_bucket_stats方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: repetitive_create_delete
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def repetitive_create_delete(self):
self.repetitions = self.input.param("repetition_count", 1)
self.bufferspace = self.input.param("bufferspace", 600000)
# the first front end load
self._load_all_buckets(self.master, self.gen_create, "create", 0,
batch_size=10000, pause_secs=5, timeout_secs=100)
self._wait_for_stats_all_buckets(self.servers)
rest = RestConnection(self.servers[0])
max_data_sizes = {}
initial_memory_usage = {}
self.sleep(30)
for bucket in self.buckets:
max_data_sizes[bucket.name] = rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["ep_max_size"][
-1]
self.log.info("Initial max_data_size of bucket '{0}': {1}".format(bucket.name, max_data_sizes[bucket.name]))
initial_memory_usage[bucket.name] = \
rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_used"][-1]
self.log.info("initial memory consumption of bucket '{0}' with load: {1}".format(bucket.name,
initial_memory_usage[
bucket.name]))
mem_usage = {}
self.sleep(10)
# the repetitions
for i in range(0, self.repetitions):
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
del self.buckets[:]
self.log.info('About to create the buckets')
self._bucket_creation()
self.log.info('Done bucket creation, about to load them')
self._load_all_buckets(self.master, self.gen_create, "create", 0,
batch_size=10000, pause_secs=5, timeout_secs=100)
self.log.info('Buckets are loaded, waiting for stats')
self._wait_for_stats_all_buckets(self.servers)
self.log.info('Have the stats, sleeping for 30 seconds')
self.sleep(60)
for bucket in self.buckets:
mem_usage[bucket.name] = rest.fetch_bucket_stats(bucket.name)["op"]["samples"]["mem_used"][-1]
self.log.info("Memory used after attempt {0} = {1}, Difference from initial snapshot: {2}" \
.format(i + 1, mem_usage[bucket.name],
(mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
self.sleep(10)
if (self.repetitions > 0):
self.log.info(
"After {0} repetitive deletion-creation-load of the buckets, the memory consumption difference is .." \
.format(self.repetitions));
for bucket in self.buckets:
self.log.info("{0} :: Initial: {1} :: Now: {2} :: Difference: {3}" \
.format(bucket.name, initial_memory_usage[bucket.name], mem_usage[bucket.name],
(mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
msg = "Memory used now, much greater than initial usage!"
assert mem_usage[bucket.name] <= initial_memory_usage[bucket.name] + self.bufferspace, msg
else:
self.log.info("Verification skipped, as there weren't any repetitions..");
示例2: check_memory_stats
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def check_memory_stats(self):
rest = RestConnection(self.servers[0])
mem_stats_init = {}
self.sleep(5)
self.log.info("*** Check mem_used and mem_total before load data ***")
for bucket in self.buckets:
mem_stats_init[bucket.name] = {}
mem_stats_init[bucket.name]["mem_used"] = \
rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_used"][-1]
mem_stats_init[bucket.name]["mem_total"] = \
rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_total"][-1]
if int(mem_stats_init[bucket.name]["mem_used"]) <= 0:
self.fail("Memory used should be greater than 0. "
"Memory used of this bucket is {0}"
.format(mem_stats_init[bucket.name]["mem_used"]))
elif int(mem_stats_init[bucket.name]["mem_total"]) <= 0:
self.fail("Memory total should be greater than 0. "
"Memory total of this bucket is {0}"
.format(mem_stats_init[bucket.name]["mem_total"]))
self.log.info("*** Load data to buckets ***")
self._load_all_buckets(self.master, self.gen_create, "create", 0,
batch_size=5000, pause_secs=2, timeout_secs=100)
self._wait_for_stats_all_buckets(self.servers)
mem_stats_load = {}
self.log.info("*** Check mem_used and mem_total after load data ***")
for bucket in self.buckets:
mem_stats_load[bucket.name] = {}
mem_stats_load[bucket.name]["mem_used"] = \
rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_used"][-1]
mem_stats_load[bucket.name]["mem_total"] = \
rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_total"][-1]
if int(mem_stats_load[bucket.name]["mem_used"]) <= 0:
self.fail("Memory used should be greater than 0. "
"Memory used of this bucket is {0}"
.format(mem_stats_load[bucket.name]["mem_used"]))
elif int(mem_stats_load[bucket.name]["mem_total"]) <= 0:
self.fail("Memory total should be greater than 0. "
"Memory total of this bucket is {0}"
.format(mem_stats_load[bucket.name]["mem_total"]))
if int(mem_stats_load[bucket.name]["mem_used"]) - \
int(mem_stats_init[bucket.name]["mem_used"]) <= 0:
self.log.info("Initial memory used {0}"
.format(mem_stats_init[bucket.name]["mem_used"]))
self.log.info("Memory after loaded {0}"
.format(mem_stats_load[bucket.name]["mem_used"]))
self.fail("Memory used should be greater than 0 since data is loaded.")
else:
self.log.info("Mem stats works as expected")
self.log.info("Initial memory used {0}"
.format(mem_stats_init[bucket.name]["mem_used"]))
self.log.info("Memory after loaded {0}"
.format(mem_stats_load[bucket.name]["mem_used"]))
示例3: wait_for_catchup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def wait_for_catchup(self, _healthy_, _compromised_, bucket):
start = time.time()
_flag = False
rest1 = RestConnection(_healthy_)
rest2 = RestConnection(_compromised_)
while time.time() - start < 60:
_count1 = rest1.fetch_bucket_stats(bucket=bucket)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket)["op"]["samples"]["curr_items"][-1]
if _count1 == _count2:
self.log.info("Cbrecovery caught up bucket {0}... {1} == {2}".format(bucket, _count1, _count2))
_flag = True
break
self.log.warn("Waiting for cbrecovery to catch up bucket {0}... {1} != {2}".format(bucket, _count1, _count2))
self.sleep(self.wait_timeout)
return _flag
示例4: collect_replication_stats
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def collect_replication_stats(self):
"""Monitor remote replication job and report related stats"""
# Start general stats collector
test_params = {'test_time': time.time(), 'test_name': self.id(),
'json': 0}
sc = self.start_stats('load', test_params=test_params, client_id=0)
# Wait for all items to be replicated
slave = self.input.clusters[1][0]
rest = RestConnection(slave)
num_clients = self.parami('num_clients', len(self.input.clients) or 1)
target_items = self.parami('items', 0) / num_clients
replicated_items = 0
start_time = time.time()
while replicated_items < target_items:
stats = rest.fetch_bucket_stats()
replicated_items = stats['op']['samples']['curr_items'][-1]
print "Replicated items: {0}".format(replicated_items)
time.sleep(10)
# Print average rate
end_time = time.time()
elapsed_time = end_time - start_time
rate = float(target_items/elapsed_time)
print "Average replication rate: {0:.3f} items/sec".format(rate)
# Stop general stats collector
ops = {'start-time': start_time, 'end-time': end_time}
self.end_stats(sc, ops, 'load')
示例5: test_create_bucket_test_load
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def test_create_bucket_test_load(self):
shell = RemoteMachineShellConnection(self.master)
self.init_rebalance_cluster_create_testbucket()
if self._os == "centos" or self._os == "ubuntu":
self.log.info("Load {0} through cbworkloadgen ..".format(self.num_items))
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbworkloadgen -n localhost:8091"
_3 = " -r .8 -i {0} -s 256 -b testbucket -t 1".format(self.num_items)
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_load = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_load)
shell.log_command_output(o, e)
time.sleep(20)
rest = RestConnection(self.master)
item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
if (item_count == self.num_items):
self.log.info("Item count matched, {0}={1}".format(item_count, self.num_items))
else:
self.fail("Item count: Not what's expected, {0}!={1}".format(item_count, self.num_items))
self.log.info("Deleting testbucket ..");
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-delete -c localhost:8091"
_3 = " --bucket=testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_delete_bucket = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_delete_bucket)
shell.log_command_output(o, e)
time.sleep(10)
elif self._os == "windows":
# TODO: Windows support
self.log.info("Yet to add support for windows!")
pass
示例6: ns_server_stats
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
self._task["ns_server_stats"] = []
self._task["ns_server_stats_system"] = []
d = {}
for node in nodes:
d[node] = {"snapshots": [], "system_snapshots": [] }
while not self._aborted():
time.sleep(frequency)
print "Collecting ns_server_stats"
for node in nodes:
rest = RestConnection(node)
data_json = rest.fetch_bucket_stats(bucket=bucket, zoom='minute')
d[node]["snapshots"].append(data_json)
data_json = rest.fetch_system_stats()
d[node]["system_snapshots"].append(data_json)
for node in nodes:
for snapshot in d[node]["snapshots"]:
self._task["ns_server_stats"].append(snapshot)
for snapshot in d[node]["system_snapshots"]:
self._task["ns_server_stats_system"].append(snapshot)
print " finished ns_server_stats"
示例7: ns_server_stats
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def ns_server_stats(self, nodes, bucket, frequency, verbose=False):
self._task["ns_server_stats"] = []
self._task["ns_server_stats_system"] = []
d = {}
for node in nodes:
d[node] = {"snapshots": [], "system_snapshots": [] }
not_null = lambda v: v if v is not None else 0
while not self._aborted():
time.sleep(frequency)
print "Collecting ns_server_stats"
for node in nodes:
rest = RestConnection(node)
data_json = rest.fetch_bucket_stats(bucket=bucket, zoom='minute')
fixed_data = dict(
(k, not_null(v)) for k, v in data_json["op"]["samples"].iteritems()
)
data_json["op"]["samples"] = fixed_data
d[node]["snapshots"].append(data_json)
data_json = rest.fetch_system_stats()
d[node]["system_snapshots"].append(data_json)
for node in nodes:
for snapshot in d[node]["snapshots"]:
self._task["ns_server_stats"].append(snapshot)
for snapshot in d[node]["system_snapshots"]:
self._task["ns_server_stats_system"].append(snapshot)
print " finished ns_server_stats"
示例8: wait_for_replication_to_catchup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def wait_for_replication_to_catchup(self, src, dest, timeout, _str_):
rest1 = RestConnection(src)
rest2 = RestConnection(dest)
# 20 minutes by default
end_time = time.time() + timeout
_count1 = rest1.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
while _count1 != _count2 and (time.time() - end_time) < 0:
self.log.info("Waiting for replication to catch up ..")
time.sleep(60)
_count1 = rest1.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
if _count1 != _count2:
self.fail("not all items replicated in {0} sec for bucket: testbucket. on source cluster:{1}, on dest:{2}".\
format(timeout, _count1, _count2))
self.log.info("Replication caught up at {0}, for testbucket".format(_str_))
示例9: test_utf_16_keys
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def test_utf_16_keys(self):
gen_create = UTF_16_Generator('load', 'load_', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_create, "create", 0)
self._wait_for_stats_all_buckets(self.servers)
rest = RestConnection(self.servers[0])
for bucket in self.buckets:
item_count = rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
if self.num_items != item_count:
self.fail("Item count didn't match on bucket {0} => expected {1}:seen {2}"
.format(bucket.name, self.num_items, item_count))
else:
self.log.info("Expected item count seen on bucket {0}: {1}=={2}"
.format(bucket.name, self.num_items, item_count))
示例10: test_rollback
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def test_rollback(self):
bucket = self.src_cluster.get_buckets()[0]
nodes = self.src_cluster.get_nodes()
# Stop Persistence on Node A & Node B
for node in nodes:
mem_client = MemcachedClientHelper.direct_client(node, bucket)
mem_client.stop_persistence()
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
self.setup_xdcr()
self.src_cluster.pause_all_replications()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
self.src_cluster.resume_all_replications()
# Perform mutations on the bucket
self.async_perform_update_delete()
rest1 = RestConnection(self.src_cluster.get_master_node())
rest2 = RestConnection(self.dest_cluster.get_master_node())
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("Before rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
# Kill memcached on Node A so that Node B becomes master
shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
shell.kill_memcached()
# Start persistence on Node B
mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
mem_client.start_persistence()
# Failover Node B
failover_task = self.src_cluster.async_failover()
failover_task.result()
# Wait for Failover & rollback to complete
self.sleep(60)
# Fetch count of docs in src and dest cluster
_count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
_count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
self.log.info("After rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2))
self.assertTrue(self.src_cluster.wait_for_outbound_mutations(),
"Mutations in source cluster not replicated to target after rollback")
self.log.info("Mutations in source cluster replicated to target after rollback")
count = NodeHelper.check_goxdcr_log(
nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count, 0, "rollback did not happen as expected")
self.log.info("rollback happened as expected")
示例11: test_bucket_backup_restore
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def test_bucket_backup_restore(self):
shell = RemoteMachineShellConnection(self.master)
self.init_rebalance_cluster_create_testbucket()
if self._os == "centos" or self._os == "ubuntu":
self.log.info("Load {0} through cbworkloadgen ..".format(self.num_items))
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbworkloadgen -n localhost:8091"
_3 = " -r .8 -i {0} -s 256 -b testbucket -t 1".format(self.num_items)
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_load = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_load)
shell.log_command_output(o, e)
time.sleep(20)
rest = RestConnection(self.master)
ini_item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
self.log.info("Backing up bucket 'testbucket' ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbbackup http://localhost:8091"
_3 = " /home/{0}/backup".format(self.master.ssh_username)
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_backup = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_backup)
shell.log_command_output(o, e)
time.sleep(10)
self.log.info("Deleting bucket ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-delete -c localhost:8091"
_3 = " --bucket=testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_delete_bucket = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_delete_bucket)
shell.log_command_output(o, e)
time.sleep(20)
if len(self.servers) < 2:
rep_count = 0
else:
rep_count = 1
self.log.info("Recreating bucket ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-create -c localhost:8091"
_3 = " --bucket=testbucket --bucket-type=couchbase --bucket-port=11211"
_4 = " --bucket-ramsize=500 --bucket-replica={0} --wait".format(rep_count)
_5 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_create_bucket = _1 + _2 + _3 + _4 + _5
o, e = shell.execute_non_sudo_command(command_to_create_bucket)
shell.log_command_output(o, e)
time.sleep(20)
self.log.info("Restoring bucket 'testbucket' ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbrestore /home/{0}/backup http://localhost:8091".format(self.master.ssh_username)
_3 = " -b testbucket -B testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_restore = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_restore)
shell.log_command_output(o, e)
time.sleep(10)
rest = RestConnection(self.master)
fin_item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
self.log.info("Removing backed-up folder ..")
command_to_remove_folder = "rm -rf /home/{0}/backup".format(self.master.ssh_username)
o, e = shell.execute_non_sudo_command(command_to_remove_folder)
shell.log_command_output(o, e)
if (fin_item_count == ini_item_count):
self.log.info("Item count before and after deleting with backup/restore matched, {0}={1}".format(
fin_item_count, ini_item_count))
else:
self.fail("Item count didnt match - backup/restore, {0}!={1}".format(fin_item_count, ini_item_count))
self.log.info("Deleting testbucket ..");
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-delete -c localhost:8091"
_3 = " --bucket=testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_delete_bucket = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_delete_bucket)
shell.log_command_output(o, e)
time.sleep(10)
elif self._os == "windows":
# TODO: Windows support
self.log.info("Yet to add support for windows!")
pass
示例12: test_xdcr
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_bucket_stats [as 别名]
def test_xdcr(self):
_rep_type = self.input.param("replication_type", "capi") # capi or xmem
_bixdcr = self.input.param("bidirectional", "false")
_clusters_dic = self.input.clusters
_src_nodes = copy.copy(_clusters_dic[0])
_src_master = _src_nodes[0]
_dest_nodes = copy.copy(_clusters_dic[1])
_dest_master = _dest_nodes[0]
# Build source cluster
self.init_rebalance_cluster_create_testbucket(_src_master, _src_nodes)
# Build destination cluster
self.init_rebalance_cluster_create_testbucket(_dest_master, _dest_nodes)
# Setting up XDCR
self.setup_xdcr_start_replication(_src_master, _dest_master, _rep_type, _bixdcr)
shell1 = RemoteMachineShellConnection(_src_master)
shell2 = RemoteMachineShellConnection(_dest_master)
src_item_count = 0
dest_item_count = 0
if self._os == "centos" or self._os == "ubuntu":
self.log.info("Load {0} through cbworkloadgen at src..".format(self.num_items))
_1 = "cd /home/{0}/opt/couchbase &&".format(_src_master.ssh_username)
_2 = " ./bin/cbworkloadgen -n localhost:8091 --prefix=s_"
_3 = " -r .8 -i {0} -s 256 -b testbucket -t 1".format(self.num_items)
_4 = " -u {0} -p {1}".format(_src_master.rest_username, _src_master.rest_password)
command_to_load = _1 + _2 + _3 + _4
o, e = shell1.execute_non_sudo_command(command_to_load)
shell1.log_command_output(o, e)
time.sleep(20)
rest = RestConnection(_src_master)
src_item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
if _bixdcr:
self.log.info("Load {0} through cbworkloadgen at src..".format(self.num_items))
_1 = "cd /home/{0}/opt/couchbase &&".format(_dest_master.ssh_username)
_2 = " ./bin/cbworkloadgen -n localhost:8091 --prefix=d_"
_3 = " -r .8 -i {0} -s 256 -b testbucket -t 1".format(self.num_items)
_4 = " -u {0} -p {1}".format(_dest_master.rest_username, _dest_master.rest_password)
command_to_load = _1 + _2 + _3 + _4
o, e = shell2.execute_non_sudo_command(command_to_load)
shell2.log_command_output(o, e)
time.sleep(20)
rest = RestConnection(_dest_master)
dest_item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
self.wait_for_replication_to_catchup(_src_master, _dest_master, 1200, "destination")
if _bixdcr:
self.wait_for_replication_to_catchup(_dest_master, _src_master, 1200, "source")
self.log.info("XDC REPLICATION caught up")
rest1 = RestConnection(_src_master)
rest2 = RestConnection(_dest_master)
curr_count_on_src = rest1.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
curr_count_on_dest = rest2.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
assert(curr_count_on_src==(src_item_count + dest_item_count), "ItemCount on source not what's expected")
assert(curr_count_on_dest==(src_item_count + dest_item_count), "ItemCount on destination not what's expected")
elif self._os == "windows":
# TODO: Windows support
self.log.info("Yet to add support for windows!")
pass
shell1.disconnect()
shell2.disconnect()