本文整理汇总了Python中membase.api.rest_client.RestHelper类的典型用法代码示例。如果您正苦于以下问题:Python RestHelper类的具体用法?Python RestHelper怎么用?Python RestHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RestHelper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_stream_after_warmup
def test_stream_after_warmup(self):
nodeA = self.servers[0]
bucket = 'standard_bucket'+str(self.standard_buckets-1)
originalVbInfo = self.all_vb_info(nodeA, bucket = bucket)
expectedVbSeqno = {}
# load all buckets
doc_gen = BlobGenerator(
'dcpdata', 'dcpdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, doc_gen, "create", 0)
self._wait_for_stats_all_buckets([nodeA])
# store expected vb seqnos
originalVbInfo = self.all_vb_info(nodeA, bucket = bucket)
# restart node
assert self.stop_node(0)
time.sleep(5)
assert self.start_node(0)
rest = RestHelper(RestConnection(nodeA))
assert rest.is_ns_server_running()
time.sleep(2)
# verify original vbInfo can be streamed
dcp_client = self.dcp_client(nodeA, PRODUCER, auth_user = bucket)
for vbucket in originalVbInfo:
vb_uuid, _, high_seqno = originalVbInfo[vbucket]
stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid)
responses = stream.run()
assert high_seqno == stream.last_by_seqno
示例2: test_node_reboot
def test_node_reboot(self):
wait_timeout = 120
timeout = self.timeout / 2
status = self.rest.update_autoreprovision_settings(True, 1)
if not status:
self.fail('failed to change autoreprovision_settings!')
self.sleep(5)
shell = RemoteMachineShellConnection(self.server_fail)
if shell.extract_remote_info().type.lower() == 'windows':
o, r = shell.execute_command("shutdown -r -f -t 0")
elif shell.extract_remote_info().type.lower() == 'linux':
o, r = shell.execute_command("reboot")
shell.log_command_output(o, r)
if shell.extract_remote_info().type.lower() == 'windows':
time.sleep(wait_timeout * 5)
else:
time.sleep(wait_timeout)
# disable firewall on the node
shell = RemoteMachineShellConnection(self.server_fail)
shell.disable_firewall()
AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
helper = RestHelper(self.rest)
self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
self.assertFalse(helper.is_cluster_rebalanced(), "cluster is balanced")
self.rest.rebalance(otpNodes=[node.id for node in self.rest.node_statuses()], ejectedNodes=[])
self.assertTrue(self.rest.monitorRebalance())
buckets = self.rest.get_buckets()
for bucket in buckets:
self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])
示例3: test_crash_entire_cluster
def test_crash_entire_cluster(self):
self.cluster.rebalance(
[self.master],
self.servers[1:], [])
vbucket = 0
nodeA = self.servers[0]
n = 10000
self.load_docs(nodeA, vbucket, n)
dcp_client = self.dcp_client(nodeA, PRODUCER)
stream = dcp_client.stream_req(vbucket, 0, 0, 2*n, 0)
self.load_docs(nodeA, vbucket, n)
# stop all nodes
node_range = range(len(self.servers))
for i in node_range:
assert self.stop_node(i)
time.sleep(2)
# start all nodes in reverse order
node_range.reverse()
for i in node_range:
assert self.start_node(i)
rest = RestHelper(RestConnection(nodeA))
assert rest.is_ns_server_running()
_, _, high_seqno = self.vb_info(nodeA, vbucket)
dcp_client = self.dcp_client(nodeA, PRODUCER)
stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, 0)
stream.run()
assert stream.last_by_seqno == high_seqno
示例4: backup_restore
def backup_restore(self):
try:
backup_start = self.backups[int(self.backupset.start) - 1]
except IndexError:
backup_start = "{0}{1}".format(self.backups[-1], self.backupset.start)
try:
backup_end = self.backups[int(self.backupset.end) - 1]
except IndexError:
backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
args = "restore --archive {0} --repo {1} --host http://{2}:{3} --username {4} --password {5} --start {6} " \
"--end {7}".format(self.backupset.directory, self.backupset.name,
self.backupset.restore_cluster_host.ip,
self.backupset.restore_cluster_host.port,
self.backupset.restore_cluster_host_username,
self.backupset.restore_cluster_host_password, backup_start,
backup_end)
if self.backupset.exclude_buckets:
args += " --exclude-buckets {0}".format(self.backupset.exclude_buckets)
if self.backupset.include_buckets:
args += " --include-buckets {0}".format(self.backupset.include_buckets)
if self.backupset.disable_bucket_config:
args += " --disable-bucket-config {0}".format(self.backupset.disable_bucket_config)
if self.backupset.disable_views:
args += " --disable-views {0}".format(self.backupset.disable_views)
if self.backupset.disable_gsi_indexes:
args += " --disable-gsi-indexes {0}".format(self.backupset.disable_gsi_indexes)
if self.backupset.disable_ft_indexes:
args += " --disable-ft-indexes {0}".format(self.backupset.disable_ft_indexes)
if self.backupset.disable_data:
args += " --disable-data {0}".format(self.backupset.disable_data)
if self.backupset.filter_keys:
args += " --filter_keys {0}".format(self.backupset.filter_keys)
if self.backupset.filter_values:
args += " --filter_values {0}".format(self.backupset.filter_values)
if self.backupset.force_updates:
args += " --force-updates"
if self.no_progress_bar:
args += " --no-progress-bar"
if not self.skip_buckets:
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_helper = RestHelper(rest_conn)
for bucket in self.buckets:
if not rest_helper.bucket_exists(bucket.name):
self.log.info("Creating bucket {0} in restore host {1}".format(bucket.name,
self.backupset.restore_cluster_host.ip))
rest_conn.create_bucket(bucket=bucket.name,
ramQuotaMB=512,
authType=bucket.authType if bucket.authType else 'none',
proxyPort=bucket.port,
saslPassword=bucket.saslPassword)
bucket_ready = rest_helper.vbucket_map_ready(bucket.name)
if not bucket_ready:
self.fail("Bucket %s not created after 120 seconds." % bucket.name)
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
return output, error
示例5: cleanup_cluster
def cleanup_cluster(servers, wait_for_rebalance=True, master = None):
log = logger.Logger.get_logger()
if master == None:
master = servers[0]
rest = RestConnection(master)
helper = RestHelper(rest)
helper.is_ns_server_running(timeout_in_seconds=testconstants.NS_SERVER_TIMEOUT)
nodes = rest.node_statuses()
master_id = rest.get_nodes_self().id
for node in nodes:
if int(node.port) in xrange(9091, 9991):
rest.eject_node(node)
nodes.remove(node)
if len(nodes) > 1:
log.info("rebalancing all nodes in order to remove nodes")
rest.log_client_error("Starting rebalance from test, ejected nodes %s" % \
[node.id for node in nodes if node.id != master_id])
removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in nodes if node.id != master_id],
wait_for_rebalance=wait_for_rebalance)
success_cleaned = []
for removed in [node for node in nodes if (node.id != master_id)]:
removed.rest_password = servers[0].rest_password
removed.rest_username = servers[0].rest_username
try:
rest = RestConnection(removed)
except Exception as ex:
log.error("can't create rest connection after rebalance out for ejected nodes,\
will retry after 10 seconds according to MB-8430: {0} ".format(ex))
time.sleep(10)
rest = RestConnection(removed)
start = time.time()
while time.time() - start < 30:
if len(rest.get_pools_info()["pools"]) == 0:
success_cleaned.append(removed)
break
else:
time.sleep(0.1)
if time.time() - start > 10:
log.error("'pools' on node {0}:{1} - {2}".format(
removed.ip, removed.port, rest.get_pools_info()["pools"]))
for node in set([node for node in nodes if (node.id != master_id)]) - set(success_cleaned):
log.error("node {0}:{1} was not cleaned after removing from cluster".format(
removed.ip, removed.port))
try:
rest = RestConnection(node)
rest.force_eject_node()
except Exception as ex:
log.error("force_eject_node {0}:{1} failed: {2}".format(removed.ip, removed.port, ex))
if len(set([node for node in nodes if (node.id != master_id)])\
- set(success_cleaned)) != 0:
raise Exception("not all ejected nodes were cleaned successfully")
log.info("removed all the nodes from cluster associated with {0} ? {1}".format(servers[0], \
[(node.id, node.port) for node in nodes if (node.id != master_id)]))
示例6: wait_for_bucket_creation
def wait_for_bucket_creation(bucket, rest, timeout_in_seconds=120):
log.info("waiting for bucket creation to complete....")
start = time.time()
helper = RestHelper(rest)
while (time.time() - start) <= timeout_in_seconds:
if helper.bucket_exists(bucket):
return True
else:
time.sleep(2)
return False
示例7: _create_default_bucket
def _create_default_bucket(self):
helper = RestHelper(self.rest)
if not helper.bucket_exists(self.bucket):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = self.rest.get_nodes_self()
available_ram = int(info.memoryQuota * node_ram_ratio)
if available_ram < 256:
available_ram = 256
self.rest.create_bucket(bucket=self.bucket, ramQuotaMB=available_ram)
ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
self.testcase.assertTrue(ready, "wait_for_memcached failed")
self.testcase.assertTrue(helper.bucket_exists(self.bucket), "unable to create {0} bucket".format(self.bucket))
示例8: wait_for_bucket_deletion
def wait_for_bucket_deletion(bucket,
rest,
timeout_in_seconds=120):
log = logger.Logger.get_logger()
log.info('waiting for bucket deletion to complete....')
start = time.time()
helper = RestHelper(rest)
while (time.time() - start) <= timeout_in_seconds:
if not helper.bucket_exists(bucket):
return True
else:
time.sleep(2)
return False
示例9: _create_default_bucket
def _create_default_bucket(self, replica=1):
name = "default"
master = self.servers[0]
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
示例10: cleanup_cluster
def cleanup_cluster(servers, wait_for_rebalance=True):
log = logger.Logger.get_logger()
rest = RestConnection(servers[0])
helper = RestHelper(rest)
helper.is_ns_server_running(timeout_in_seconds=testconstants.NS_SERVER_TIMEOUT)
nodes = rest.node_statuses()
master_id = rest.get_nodes_self().id
if len(nodes) > 1:
log.info("rebalancing all nodes in order to remove nodes")
removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in nodes if node.id != master_id],
wait_for_rebalance=wait_for_rebalance)
log.info("removed all the nodes from cluster associated with {0} ? {1}".format(servers[0], removed))
示例11: _create_default_bucket
def _create_default_bucket(self):
name = "default"
master = self.master
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
self.load_thread = None
self.shutdown_load_data = False
示例12: test_reset_count
def test_reset_count(self):
timeout = self.timeout / 2
server_fail1 = self.servers[1]
server_fail2 = self.servers[2]
status = self.rest.update_autoreprovision_settings(True, 2)
if not status:
self.fail('failed to change autoreprovision_settings!')
self.sleep(5)
self.log.info("stopping the first server")
self._stop_couchbase(server_fail1)
AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 1,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
self.log.info("resetting the autoreprovision count")
if not self.rest.reset_autoreprovision():
self.fail('failed to reset autoreprovision count!')
self.log.info("stopping the second server")
self._stop_couchbase(server_fail2)
AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 2,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
settings = self.rest.get_autoreprovision_settings()
self.assertEquals(settings.enabled, True)
self.assertEquals(settings.max_nodes, 2)
self.assertEquals(settings.count, 0)
self._start_couchbase(server_fail2)
self._start_couchbase(server_fail1)
self.sleep(30)
settings = self.rest.get_autoreprovision_settings()
self.assertEquals(settings.enabled, True)
self.assertEquals(settings.max_nodes, 2)
self.assertEquals(settings.count, 2)
self.log.info("resetting the autoreprovision count")
if not self.rest.reset_autoreprovision():
self.fail('failed to reset autoreprovision count!')
settings = self.rest.get_autoreprovision_settings()
self.assertEquals(settings.enabled, True)
self.assertEquals(settings.max_nodes, 2)
self.assertEquals(settings.count, 0)
helper = RestHelper(self.rest)
self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
self.assertFalse(helper.is_cluster_rebalanced(), "cluster is balanced")
self.rest.rebalance(otpNodes=[node.id for node in self.rest.node_statuses()], ejectedNodes=[])
self.assertTrue(self.rest.monitorRebalance())
示例13: _create_default_bucket
def _create_default_bucket(self, unittest):
name = "default"
master = self.master
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
ready = BucketOperationHelper.wait_for_memcached(master, name)
BucketOperationHelper.wait_for_vbuckets_ready_state(master, name)
unittest.assertTrue(ready, msg="wait_for_memcached failed")
unittest.assertTrue(helper.bucket_exists(name),
msg="unable to create {0} bucket".format(name))
示例14: _create_default_bucket
def _create_default_bucket(self):
rest = RestConnection(self.master)
helper = RestHelper(RestConnection(self.master))
if not helper.bucket_exists(self.bucket):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio([self.master])
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
serverInfo = self.master
rest.init_cluster(username=serverInfo.rest_username,
password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
rest.create_bucket(bucket=self.bucket, ramQuotaMB=int(available_ram))
ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(self.bucket),
msg="unable to create {0} bucket".format(self.bucket))
示例15: test_node_memcached_failure
def test_node_memcached_failure(self):
timeout = self.timeout / 2
status = self.rest.update_autoreprovision_settings(True, 1)
if not status:
self.fail('failed to change autoreprovision_settings!')
self.sleep(5)
self._pause_couchbase(self.server_fail)
self.sleep(5)
AutoReprovisionBaseTest.wait_for_warmup_or_assert(self.master, 1,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
RemoteUtilHelper.common_basic_setup([self.server_fail])
AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
helper = RestHelper(self.rest)
self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
buckets = self.rest.get_buckets()
for bucket in buckets:
self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])