当前位置: 首页>>代码示例>>Python>>正文


Python RemoteMachineShellConnection.wait_till_compaction_end方法代码示例

本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.wait_till_compaction_end方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.wait_till_compaction_end方法的具体用法?Python RemoteMachineShellConnection.wait_till_compaction_end怎么用?Python RemoteMachineShellConnection.wait_till_compaction_end使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在remote.remote_util.RemoteMachineShellConnection的用法示例。


在下文中一共展示了RemoteMachineShellConnection.wait_till_compaction_end方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _cancel_bucket_compaction

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
    def _cancel_bucket_compaction(self, rest, bucket):
        remote_client = RemoteMachineShellConnection(self.master)

        try:
            result = self.cluster.cancel_bucket_compaction(self.master, bucket)
            self.assertTrue(result)
            remote_client.wait_till_compaction_end(rest, self.default_bucket_name, self.wait_timeout)
            compaction_running = False
        except Exception, ex:
            self.is_crashed.set()
            self.log.error("Compaction cannot be cancelled: %s" % str(ex))
开发者ID:arod1987,项目名称:testrunner,代码行数:13,代码来源:autocompaction.py

示例2: test_auto_compaction_with_multiple_buckets

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
 def test_auto_compaction_with_multiple_buckets(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     for bucket in self.buckets:
         if bucket.name == "default":
             self.disable_compaction()
         else:
             self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, bucket=bucket.name)
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     end_time = time.time() + self.wait_timeout * 30
     for bucket in self.buckets:
         monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, self.autocompaction_value, bucket.name)
         while monitor_fragm.state != "FINISHED":
             if end_time < time.time():
                 self.fail("Fragmentation level is not reached in %s sec" % self.wait_timeout * 30)
             try:
                 self._load_all_buckets(self.servers[0], self.gen_update, "update", 0)
             except Exception, ex:
                 self.log.error("Load cannot be performed: %s" % str(ex))
                 self.fail(ex)
         monitor_fragm.result()
         compact_run = remote_client.wait_till_compaction_end(rest, bucket.name,
                                                                  timeout_in_seconds=(self.wait_timeout * 5))
         if compact_run:
             self.log.info("auto compaction run successfully")
开发者ID:arod1987,项目名称:testrunner,代码行数:27,代码来源:autocompaction.py

示例3: rebalance_in_with_DB_time_compaction

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
 def rebalance_in_with_DB_time_compaction(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     currTime = datetime.datetime.now()
     fromTime = currTime + datetime.timedelta(hours=1)
     toTime = currTime + datetime.timedelta(hours=24)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=fromTime.hour,
                              allowedTimePeriodFromMin=fromTime.minute, allowedTimePeriodToHour=toTime.hour, allowedTimePeriodToMin=toTime.minute,
                              allowedTimePeriodAbort="false")
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     self._monitor_DB_fragmentation()
     for i in xrange(10):
         active_tasks = self.cluster.async_monitor_active_task(self.master, "bucket_compaction", "bucket", wait_task=False)
         for active_task in active_tasks:
             result = active_task.result()
             self.assertTrue(result)
             self.sleep(2)
     currTime = datetime.datetime.now()
     #Need to make it configurable
     newTime = currTime + datetime.timedelta(minutes=5)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=currTime.hour,
                              allowedTimePeriodFromMin=currTime.minute, allowedTimePeriodToHour=newTime.hour, allowedTimePeriodToMin=newTime.minute,
                              allowedTimePeriodAbort="false")
     servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
     rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
     compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                                  timeout_in_seconds=(self.wait_timeout * 5))
     rebalance.result()
     if compact_run:
         self.log.info("auto compaction run successfully")
     else:
         self.fail("auto compaction does not run")
     remote_client.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:35,代码来源:autocompaction.py

示例4: test_large_file_version

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
    def test_large_file_version(self):
        rest = RestConnection(self.master)
        remote_client = RemoteMachineShellConnection(self.master)
        remote_client.extract_remote_info()

        self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
        self.disable_compaction()
        self._monitor_DB_fragmentation()

        # rename here

        remote_client.stop_couchbase()
        time.sleep(5)
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .1 .65535 *.1")
        remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .2 .65535 *.2")
        remote_client.start_couchbase()

        for i in range(5):
            self.log.info("starting a compaction iteration")
            compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)

            compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
            res = compaction_task.result(self.wait_timeout)


        if compact_run:
            self.log.info("auto compaction run successfully")
        else:
            self.fail("auto compaction does not run")

        remote_client.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:33,代码来源:autocompaction.py

示例5: rebalance_in_out_with_auto_DB_compaction

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
 def rebalance_in_out_with_auto_DB_compaction(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
                         "ERROR: Not enough nodes to do rebalance in and out")
     servs_init = self.servers[:self.nodes_init]
     servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
     servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
     result_nodes = set(servs_init + servs_in) - set(servs_out)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
     while rebalance.state != "FINISHED":
         self._monitor_DB_fragmentation()
         compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                              timeout_in_seconds=(self.wait_timeout * 5))
     rebalance.result()
     monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
     result = monitor_fragm.result()
     if compact_run:
         self.log.info("auto compaction run successfully")
     elif result:
         self.log.info("Compaction is already completed")
     else:
         self.fail("auto compaction does not run")
     self.verify_cluster_stats(result_nodes)
     remote_client.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:29,代码来源:autocompaction.py

示例6: _cancel_bucket_compaction

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
 def _cancel_bucket_compaction(self, rest, bucket):
     remote_client = RemoteMachineShellConnection(self.master)
     compaction_running = True
     end_time = time.time() + self.wait_timeout * 5
     while compaction_running:
         if end_time < time.time():
             self.is_crashed.set()
             self.fail("Compaction is not started in %s sec" % end_time)
         tasks = rest.active_tasks()
         for task in tasks:
             if task["type"] == "bucket_compaction":
                 try:
                     result = self.cluster.cancel_bucket_compaction(self.master, bucket)
                     self.assertTrue(result)
                     remote_client.wait_till_compaction_end(rest, self.default_bucket_name, self.wait_timeout)
                     compaction_running = False
                 except Exception, ex:
                     self.is_crashed.set()
                     self.log.error("Compaction cannot be cancelled: %s" % str(ex))
         remote_client.disconnect()
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:22,代码来源:autocompaction.py

示例7: test_database_fragmentation

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
    def test_database_fragmentation(self):
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(97 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)
        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)

        output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=100)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(output, "it should be  impossible to set compaction value = {0}%".format(percent_threshold))
            import json
            self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
            self.assertTrue(json.loads(rq_content)["errors"].find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(json.loads(rq_content)["errors"]))

            self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))
        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()

            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='password', replicaNumber=1, proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
            self.insert_key(serverInfo, bucket_name, items, item_size)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            self.insert_key(serverInfo, bucket_name, items, int(update_item_size))

            compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
            if not compact_run:
                self.log.error("auto compaction does not run")
            elif compact_run:
                self.log.info("auto compaction runs successfully")
        else:
            self.log.error("Unknown error")
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:51,代码来源:autocompaction.py

示例8: load_DB_fragmentation

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
 def load_DB_fragmentation(self):
     monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, self.fragmentation_value, self.default_bucket_name)
     rest = RestConnection(self.master)
     remote_client = RemoteMachineShellConnection(self.master)
     end_time = time.time() + self.wait_timeout * 10
     if end_time < time.time() and monitor_fragm.state != "FINISHED":
         self.fail("Fragmentation level is not reached in {0} sec".format(self.wait_timeout * 10))
     monitor_fragm.result()
     try:
         compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                                  timeout_in_seconds=(self.wait_timeout * 5))
         self.assertTrue(compact_run, "Compaction didn't finished correctly. Please check diags")
     except Exception, ex:
         self.thread_crashed.set()
         raise ex
开发者ID:EricACooper,项目名称:testrunner,代码行数:17,代码来源:compactionviewtests.py

示例9: test_start_stop_DB_compaction

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
 def test_start_stop_DB_compaction(self):
     rest = RestConnection(self.master)
     remote_client = RemoteMachineShellConnection(self.master)
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     self.disable_compaction()
     self._montior_DB_fragmentation()
     compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
     self._cancel_bucket_compaction(rest, self.default_bucket_name)
     compaction_task.result(self.wait_timeout)
     self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
     compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
     compaction_task.result(self.wait_timeout)
     if compact_run:
         self.log.info("auto compaction run successfully")
     else:
         self.fail("auto compaction does not run")
     remote_client.disconnect()
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:19,代码来源:autocompaction.py

示例10: _database_fragmentation

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
    def _database_fragmentation(self, percent_threshold):
        bucket_name = "default"
        MAX_RUN = 99
        item_size = 1024
        update_item_size = item_size*((float(97 - percent_threshold))/100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)
        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)

        rest.reset_auto_compaction()
        parallelDBAndView = "false"
        output = rest.set_autoCompaction(parallelDBAndView, percent_threshold, 100)
        if not output and percent_threshold < MIN_COMPACTION_THRESHOLD:
            self.log.error("Need to set minimum threshold above {0}%".format(MIN_COMPACTION_THRESHOLD))
        elif not output and percent_threshold > MAX_COMPACTION_THRESHOLD:
            self.log.error("Need to set maximum threshold under {0}".format(MAX_COMPACTION_THRESHOLD))
        elif output and percent_threshold == MAX_COMPACTION_THRESHOLD:
            self.log.info("Auto compaction will not run at {0}% setting".format(MAX_COMPACTION_THRESHOLD))
        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()

            available_ram = info.memoryQuota * (node_ram_ratio)/2
            items = (int(available_ram*1000)/2)/item_size
            rest.create_bucket(bucket= bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='password', replicaNumber=1, proxyPort=11211)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
            self.insert_key(serverInfo, bucket_name, items, item_size)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            self.insert_key(serverInfo, bucket_name, items, int(update_item_size))
            compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
            if not compact_run:
                self.log.error("auto compaction does not run")
            elif compact_run:
                self.log.info("auto compaction runs successfully")
        else:
            self.log.error("Unknown error")
开发者ID:steveyen,项目名称:testrunner,代码行数:48,代码来源:autocompaction.py

示例11: rebalance_in_with_auto_DB_compaction

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
 def rebalance_in_with_auto_DB_compaction(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     self._monitor_DB_fragmentation()
     servs_in = self.servers[1:self.nodes_in + 1]
     rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
     compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                                  timeout_in_seconds=(self.wait_timeout * 5))
     rebalance.result()
     monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
     result = monitor_fragm.result()
     if compact_run:
         self.log.info("auto compaction run successfully")
     elif result:
         self.log.info("Compaction is already completed")
     else:
         self.fail("auto compaction does not run")
     self.verify_cluster_stats(self.servers[:self.nodes_in + 1])
     remote_client.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:23,代码来源:autocompaction.py

示例12: test_database_fragmentation

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import wait_till_compaction_end [as 别名]
    def test_database_fragmentation(self):


        self.log.info('start test_database_fragmentation')

        self.err = None
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        percent_threshold = self.autocompaction_value
        bucket_name = "default"
        MAX_RUN = 100
        item_size = 1024
        update_item_size = item_size * ((float(100 - percent_threshold)) / 100)
        serverInfo = self.servers[0]
        self.log.info(serverInfo)

        rest = RestConnection(serverInfo)
        remote_client = RemoteMachineShellConnection(serverInfo)
        output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=None)

        if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
            self.assertFalse(output, "it should be  impossible to set compaction value = {0}%".format(percent_threshold))
            import json
            self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
            self.assertTrue(str(json.loads(rq_content)["errors"]).find("Allowed range is 2 - 100") > -1, \
                            "Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(str(json.loads(rq_content)["errors"])))
            self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))

        elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
                     and percent_threshold <= MAX_RUN):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
            info = rest.get_nodes_self()
            available_ram = info.memoryQuota * (node_ram_ratio) / 2
            items = (int(available_ram * 1000) / 2) / item_size
            print "ITEMS =============%s" % items

            rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
                               saslPassword='password', replicaNumber=1, proxyPort=11211)
            BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)

            self.log.info("******start to load {0}K keys with {1} bytes/key".format(items, item_size))
            #self.insert_key(serverInfo, bucket_name, items, item_size)
            generator = BlobGenerator('compact', 'compact-', int(item_size), start=0, end=(items * 1000))
            self._load_all_buckets(self.master, generator, "create", 0, 1, batch_size=1000)
            self.log.info("sleep 10 seconds before the next run")
            time.sleep(10)

            self.log.info("********start to update {0}K keys with smaller value {1} bytes/key".format(items,
                                                                             int(update_item_size)))
            generator_update = BlobGenerator('compact', 'compact-', int(update_item_size), start=0, end=(items * 1000))
            if self.during_ops:
                if self.during_ops == "change_port":
                    self.change_port(new_port=self.input.param("new_port", "9090"))
                    self.master.port = self.input.param("new_port", "9090")
                elif self.during_ops == "change_password":
                    old_pass = self.master.rest_password
                    self.change_password(new_password=self.input.param("new_password", "new_pass"))
                    self.master.rest_password = self.input.param("new_password", "new_pass")
                rest = RestConnection(self.master)
            insert_thread = Thread(target=self.load,
                                   name="insert",
                                   args=(self.master, self.autocompaction_value,
                                         self.default_bucket_name, generator_update))
            try:
                self.log.info('starting the load thread')
                insert_thread.start()

                compact_run = remote_client.wait_till_compaction_end(rest, bucket_name,
                                                                     timeout_in_seconds=(self.wait_timeout * 10))

                if not compact_run:
                    self.fail("auto compaction does not run")
                elif compact_run:
                    self.log.info("auto compaction run successfully")
            except Exception, ex:
                self.log.info("exception in auto compaction")
                if self.during_ops:
                     if self.during_ops == "change_password":
                         self.change_password(new_password=old_pass)
                     elif self.during_ops == "change_port":
                         self.change_port(new_port='8091',
                                          current_port=self.input.param("new_port", "9090"))
                if str(ex).find("enospc") != -1:
                    self.is_crashed.set()
                    self.log.error("Disk is out of space, unable to load more data")
                    insert_thread._Thread__stop()
                else:
                    insert_thread._Thread__stop()
                    raise ex
            else:
                insert_thread.join()
                if self.err is not None:
                    self.fail(self.err)
开发者ID:arod1987,项目名称:testrunner,代码行数:95,代码来源:autocompaction.py


注:本文中的remote.remote_util.RemoteMachineShellConnection.wait_till_compaction_end方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。