本文整理汇总了Python中lib.membase.helper.cluster_helper.ClusterOperationHelper类的典型用法代码示例。如果您正苦于以下问题:Python ClusterOperationHelper类的具体用法?Python ClusterOperationHelper怎么用?Python ClusterOperationHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ClusterOperationHelper类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_eventing_processes_mutations_when_mutated_through_subdoc_api_and_set_expiry_through_sdk
def test_eventing_processes_mutations_when_mutated_through_subdoc_api_and_set_expiry_through_sdk(self):
# set expiry pager interval
ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
bucket = Bucket(url, username="cbadminbucket", password="password")
for docid in ['customer123', 'customer1234', 'customer12345']:
bucket.insert(docid, {'some': 'value'})
body = self.create_save_function_body(self.function_name, self.handler_code,
dcp_stream_boundary="from_now")
# deploy eventing function
self.deploy_function(body)
# upserting a new sub-document
bucket.mutate_in('customer123', SD.upsert('fax', '775-867-5309'))
# inserting a sub-document
bucket.mutate_in('customer1234', SD.insert('purchases.complete', [42, True, None], create_parents=True))
# Creating and populating an array document
bucket.mutate_in('customer12345', SD.array_append('purchases.complete', ['Hello'], create_parents=True))
self.verify_eventing_results(self.function_name, 3, skip_stats_validation=True)
for docid in ['customer123', 'customer1234', 'customer12345']:
# set expiry on all the docs created using sub doc API
bucket.touch(docid, ttl=5)
self.sleep(10, "wait for expiry of the documents")
# Wait for eventing to catch up with all the expiry mutations and verify results
self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
示例2: reboot_server
def reboot_server(self, server):
remote_client = RemoteMachineShellConnection(server)
remote_client.reboot_node()
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout * 5)
# disable firewall on these nodes
self.stop_firewall_on_node(server)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
示例3: save_snapshots
def save_snapshots(self, file_base, bucket):
"""Save snapshots on all servers"""
if not self.input.servers or not bucket:
self.log.error("invalid server list or bucket name")
return False
ClusterOperationHelper.stop_cluster(self.input.servers)
for server in self.input.servers:
self._save_snapshot(server, bucket, file_base)
ClusterOperationHelper.start_cluster(self.input.servers)
return True
示例4: kill_erlang_service
def kill_erlang_service(self, server):
remote_client = RemoteMachineShellConnection(server)
os_info = remote_client.extract_remote_info()
log.info("os_info : {0}", os_info)
if os_info.type.lower() == "windows":
remote_client.kill_erlang(os="windows")
else:
remote_client.kill_erlang()
remote_client.start_couchbase()
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout * 2)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
示例5: set_up_dgm
def set_up_dgm(self):
"""Download fragmented, DGM dataset onto each cluster node, if not
already locally available.
The number of vbuckets and database schema must match the
target cluster.
Shutdown all cluster nodes.
Do a cluster-restore.
Restart all cluster nodes."""
bucket = self.param("bucket", "default")
ClusterOperationHelper.stop_cluster(self.input.servers)
for server in self.input.servers:
remote = RemoteMachineShellConnection(server)
#TODO: Better way to pass num_nodes and db_size?
self.get_data_files(remote, bucket, 1, 10)
remote.disconnect()
ClusterOperationHelper.start_cluster(self.input.servers)
示例6: load_snapshots
def load_snapshots(self, file_base, bucket):
"""Load snapshots on all servers"""
if not self.input.servers or not bucket:
print "[perf: load_snapshot] invalid server list or bucket name"
return False
ClusterOperationHelper.stop_cluster(self.input.servers)
for server in self.input.servers:
if not self._load_snapshot(server, bucket, file_base):
ClusterOperationHelper.start_cluster(self.input.servers)
return False
ClusterOperationHelper.start_cluster(self.input.servers)
return True
示例7: setUp
def setUp(self):
self.setUpBase0()
mc_threads = self.parami("mc_threads", PerfDefaults.mc_threads)
if mc_threads != PerfDefaults.mc_threads:
for node in self.input.servers:
self.set_mc_threads(node, mc_threads)
erlang_schedulers = self.param("erlang_schedulers",
PerfDefaults.erlang_schedulers)
if erlang_schedulers:
ClusterOperationHelper.set_erlang_schedulers(self.input.servers,
erlang_schedulers)
master = self.input.servers[0]
self.is_multi_node = False
self.data_path = master.data_path
# Number of items loaded by load() method.
# Does not include or count any items that came from set_up_dgm().
#
self.num_items_loaded = 0
if self.input.clusters:
for cluster in self.input.clusters.values():
master = cluster[0]
self.set_up_rest(master)
self.set_up_cluster(master)
else:
master = self.input.servers[0]
self.set_up_cluster(master)
# Rebalance
num_nodes = self.parami("num_nodes", 10)
self.rebalance_nodes(num_nodes)
if self.input.clusters:
for cluster in self.input.clusters.values():
master = cluster[0]
self.set_up_rest(master)
self.set_up_buckets()
else:
self.set_up_buckets()
self.set_up_proxy()
if self.input.clusters:
for cluster in self.input.clusters.values():
master = cluster[0]
self.set_up_rest(master)
self.reconfigure()
else:
self.reconfigure()
if self.parami("dgm", getattr(self, "dgm", 1)):
self.set_up_dgm()
time.sleep(10)
self.setUpBase1()
if self.input.clusters:
for cluster in self.input.clusters.values():
self.wait_until_warmed_up(cluster[0])
else:
self.wait_until_warmed_up()
ClusterOperationHelper.flush_os_caches(self.input.servers)
示例8: tear_down_cluster
def tear_down_cluster(self):
self.log.info("tearing down cluster")
ClusterOperationHelper.cleanup_cluster(self.input.servers)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.input.servers,
self)
self.log.info("Cluster teared down")
示例9: loop
#.........这里部分代码省略.........
'cbm': self.parami('cbm', PerfDefaults.cbm),
'cbm-host': self.param('cbm_host', PerfDefaults.cbm_host),
'cbm-port': self.parami('cbm_port', PerfDefaults.cbm_port)}
cfg_params = cfg.copy()
cfg_params['test_time'] = time.time()
cfg_params['test_name'] = test_name
client_id = ''
stores = None
if is_eperf:
client_id = self.parami("prefix", 0)
sc = None
if self.parami("collect_stats", 1):
sc = self.start_stats(self.spec_reference + ".loop",
test_params=cfg_params, client_id=client_id,
collect_server_stats=collect_server_stats,
ddoc=ddoc)
self.cur = {'cur-items': num_items}
if start_at >= 0:
self.cur['cur-gets'] = start_at
if num_ops is None:
num_ops = num_items
if isinstance(num_ops, int):
cfg['max-ops'] = num_ops
else:
# Here, we num_ops looks like "time to run" tuple of...
# ('seconds', integer_num_of_seconds_to_run)
cfg['time'] = num_ops[1]
# For Black box, multi node tests
# always use membase-binary
if self.is_multi_node:
protocol = self.mk_protocol(host=self.input.servers[0].ip,
port=self.input.servers[0].port)
self.log.info("mcsoda - protocol %s" % protocol)
protocol, host_port, user, pswd = \
self.protocol_parse(protocol, use_direct=use_direct)
if not user.strip():
if "11211" in host_port:
user = self.param("bucket", "default")
else:
user = self.input.servers[0].rest_username
if not pswd.strip():
if not "11211" in host_port:
pswd = self.input.servers[0].rest_password
self.log.info("mcsoda - %s %s %s %s" %
(protocol, host_port, user, pswd))
self.log.info("mcsoda - cfg: " + str(cfg))
self.log.info("mcsoda - cur: " + str(self.cur))
# For query tests always use StoreCouchbase
if protocol == "couchbase":
stores = [StoreCouchbase()]
self.cur, start_time, end_time = \
self.mcsoda_run(cfg, self.cur, protocol, host_port, user, pswd,
stats_collector=sc, ctl=ctl, stores=stores,
heartbeat=self.parami("mcsoda_heartbeat", 0),
why="loop", bucket=self.param("bucket", "default"))
ops = {'tot-sets': self.cur.get('cur-sets', 0),
'tot-gets': self.cur.get('cur-gets', 0),
'tot-items': self.cur.get('cur-items', 0),
'tot-creates': self.cur.get('cur-creates', 0),
'tot-misses': self.cur.get('cur-misses', 0),
"start-time": start_time,
"end-time": end_time}
# Wait until there are no active indexing tasks
if self.parami('wait_for_indexer', 0):
ClusterOperationHelper.wait_for_completion(self.rest, 'indexer')
# Wait until there are no active view compaction tasks
if self.parami('wait_for_compaction', 0):
ClusterOperationHelper.wait_for_completion(self.rest,
'view_compaction')
if self.parami("loop_wait_until_drained",
PerfDefaults.loop_wait_until_drained):
self.wait_until_drained()
if self.parami("loop_wait_until_repl",
PerfDefaults.loop_wait_until_repl):
self.wait_until_repl()
if self.parami("collect_stats", 1) and \
not self.parami("reb_no_fg", PerfDefaults.reb_no_fg):
self.end_stats(sc, ops, self.spec_reference + ".loop")
self.log.info(
'Finished access phase for worker: {0}:{1}.'\
.format(self.params("why", "main"), self.parami("prefix", 0))
)
return ops, start_time, end_time
示例10: tear_down_cluster
def tear_down_cluster(self):
print "[perf.tearDown] Tearing down cluster"
ClusterOperationHelper.cleanup_cluster(self.input.servers)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.input.servers,
self)
print "[perf.tearDown] Cluster teared down"