本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.delete_all_function方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.delete_all_function方法的具体用法?Python RestConnection.delete_all_function怎么用?Python RestConnection.delete_all_function使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.delete_all_function方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: EventingBaseTest
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import delete_all_function [as 别名]
#.........这里部分代码省略.........
"""
Push the bucket into DGM and return the number of items it took to push the bucket to DGM
"""
def push_to_dgm(self, bucket, dgm_percent):
doc_size = 1024
curr_active = self.bucket_stat('vb_active_perc_mem_resident', bucket)
total_items = self.bucket_stat('curr_items', bucket)
batch_items = 20000
# go into dgm
while curr_active > dgm_percent:
curr_items = self.bucket_stat('curr_items', bucket)
gen_create = BlobGenerator('dgmkv', 'dgmkv-', doc_size, start=curr_items + 1, end=curr_items + 20000)
total_items += batch_items
try:
self.cluster.load_gen_docs(self.master, bucket, gen_create, self.buckets[0].kvs[1],
'create', exp=0, flag=0, batch_size=1000)
except:
pass
curr_active = self.bucket_stat('vb_active_perc_mem_resident', bucket)
log.info("bucket {0} in DGM, resident_ratio : {1}%".format(bucket, curr_active))
total_items = self.bucket_stat('curr_items', bucket)
return total_items
def bucket_stat(self, key, bucket):
stats = StatsCommon.get_stats([self.master], bucket, "", key)
val = stats.values()[0]
if val.isdigit():
val = int(val)
return val
def bucket_compaction(self):
for bucket in self.buckets:
log.info("Compacting bucket : {0}".format(bucket.name))
self.rest.compact_bucket(bucket=bucket.name)
def kill_consumer(self, server):
remote_client = RemoteMachineShellConnection(server)
remote_client.kill_eventing_process(name="eventing-consumer")
remote_client.disconnect()
def kill_producer(self, server):
remote_client = RemoteMachineShellConnection(server)
remote_client.kill_eventing_process(name="eventing-producer")
remote_client.disconnect()
def kill_memcached_service(self, server):
remote_client = RemoteMachineShellConnection(server)
remote_client.kill_memcached()
remote_client.disconnect()
def kill_erlang_service(self, server):
remote_client = RemoteMachineShellConnection(server)
os_info = remote_client.extract_remote_info()
log.info("os_info : {0}", os_info)
if os_info.type.lower() == "windows":
remote_client.kill_erlang(os="windows")
else:
remote_client.kill_erlang()
remote_client.start_couchbase()
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout * 2)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
def reboot_server(self, server):
remote_client = RemoteMachineShellConnection(server)
remote_client.reboot_node()
remote_client.disconnect()
# wait for restart and warmup on all node
self.sleep(self.wait_timeout * 5)
# disable firewall on these nodes
self.stop_firewall_on_node(server)
# wait till node is ready after warmup
ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
def undeploy_delete_all_functions(self):
content=self.rest.get_deployed_eventing_apps()
res = content.keys()
log.info("all keys {}".format(res))
for a in res:
self.rest.undeploy_function(a)
for a in res:
self.wait_for_undeployment(a)
self.rest.delete_all_function()
def change_time_zone(self,server,timezone="UTC"):
remote_client = RemoteMachineShellConnection(server)
remote_client.execute_command("timedatectl set-timezone "+timezone)
remote_client.disconnect()
def cleanup_eventing(self):
ev_node = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=False)
ev_rest = RestConnection(ev_node)
log.info("Running eventing cleanup api...")
ev_rest.cleanup_eventing()
def generate_docs_bigdata(self, docs_per_day, start=0, document_size=1024000):
json_generator = JsonGenerator()
return json_generator.generate_docs_bigdata(end=(2016 * docs_per_day), start=start, value_size=document_size)