当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.flush_bucket方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestConnection.flush_bucket方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.flush_bucket方法的具体用法?Python RestConnection.flush_bucket怎么用?Python RestConnection.flush_bucket使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.flush_bucket方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_couchbase_bucket_flush

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
 def test_couchbase_bucket_flush(self):
     pre_recovery_tasks = self.async_run_operations(phase="before")
     self._run_tasks([pre_recovery_tasks])
     self.get_dgm_for_plasma()
     kvOps_tasks = self._run_kvops_tasks()
     #Flush the bucket
     for bucket in self.buckets:
         log.info("Flushing bucket {0}...".format(bucket.name))
         rest = RestConnection(self.master)
         rest.flush_bucket(bucket.name)
         count = 0
         while rest.get_bucket_status(bucket.name) != "healthy" and \
                         count < 10:
             log.info("Bucket {0} Status is {1}. Sleeping...".format(
                 bucket.name, rest.get_bucket_status(bucket.name)))
             count += 1
             self.sleep(10)
         log.info("Bucket {0} is {1}".format(
             bucket.name, rest.get_bucket_status(bucket.name)))
     mid_recovery_tasks = self.async_run_operations(phase="in_between")
     self._run_tasks([kvOps_tasks, mid_recovery_tasks])
     #check if the nodes in cluster are healthy
     msg = "Cluster not in Healthy state"
     self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
     log.info("==== Cluster in healthy state ====")
     self.sleep(180)
     self._check_all_bucket_items_indexed()
     post_recovery_tasks = self.async_run_operations(phase="after")
     self.sleep(180)
     self._run_tasks([post_recovery_tasks])
开发者ID:arod1987,项目名称:testrunner,代码行数:32,代码来源:recovery_2i.py

示例2: _load_buckets

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
 def _load_buckets(self):
     """
     1. Remove existing buckets
     2. Create 2 buckets and load documents
     3. Create full_doc_list for both buckets
     :return:
     """
     rest = RestConnection(self.master)
     json_generator = JsonGenerator()
     self.standard_gens_load = json_generator.generate_doc_for_aggregate_pushdown(docs_per_day=self.docs_per_day,
                                                                                  start=0)
     self.standard_full_docs_list = self.generate_full_docs_list(self.standard_gens_load)
     self.default_gens_load = json_generator.generate_doc_for_aggregate_pushdown(docs_per_day=self.docs_per_day,
                                                                                 start=0)
     self.default_full_docs_list = self.generate_full_docs_list(self.default_gens_load)
     for bucket in self.buckets:
         rest.flush_bucket(bucket.name)
         count = 0
         while rest.get_bucket_status(bucket.name) != "healthy" and count < 10:
             log.info("Bucket {0} Status is {1}. Sleeping...".format(bucket.name, rest.get_bucket_status(bucket.name)))
             count += 1
             self.sleep(15)
         if bucket.name.startswith("standard"):
             self.load(self.standard_gens_load, flag=self.item_flag, buckets=[bucket], verify_data=False)
         if bucket.name.startswith("default"):
             self.load(self.default_gens_load, flag=self.item_flag, buckets=[bucket], verify_data=False)
开发者ID:arod1987,项目名称:testrunner,代码行数:28,代码来源:n1ql_ansi_nest_unnest.py

示例3: test_create_index_on_empty_bucket

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
 def test_create_index_on_empty_bucket(self):
     """
     Fix for MB-15329
     Create indexes on empty buckets
     :return:
     """
     rest = RestConnection(self.master)
     for bucket in self.buckets:
         log.info("Flushing bucket {0}...".format(bucket.name))
         rest.flush_bucket(bucket)
     self.sleep(30)
     self.multi_create_index(buckets=self.buckets,query_definitions=self.query_definitions)
     self._verify_bucket_count_with_index_count()
开发者ID:EricACooper,项目名称:testrunner,代码行数:15,代码来源:indexcreatedrop_2i.py

示例4: test_bucketEvents

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
    def test_bucketEvents(self):
        ops = self.input.param("ops", None)
        user = self.master.rest_username
        source = 'ns_server'
        rest = RestConnection(self.master)

        if (ops in ['create']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1,
                               'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                "flush_enabled":False, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno', \
                               'storage_mode':'couchstore','max_ttl':400,'compression_mode':'passive'}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly', maxTTL=expectedResults['max_ttl'])

        elif (ops in ['update']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':209715200, 'num_replicas':1, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":'true', "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457 , 'sessionid':'','storage_mode':'couchstore', 'max_ttl':400}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], '11211', 'membase', \
                               0, expectedResults['num_threads'], 0 , 'valueOnly', maxTTL=expectedResults['max_ttl'])
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore', 'max_ttl':200}
            rest.change_bucket_props(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                     '11211', 1, 1, maxTTL=expectedResults['max_ttl'])

        elif (ops in ['delete']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":False, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 1, expectedResults['num_threads'], 0 , 'valueOnly')
            rest.delete_bucket(expectedResults['bucket_name'])

        elif (ops in ['flush']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':100, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
			    'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore'}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'], expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 1, expectedResults['num_threads'], 1, 'valueOnly')
            self.sleep(10)
            rest.flush_bucket(expectedResults['bucket_name'])

        self.checkConfig(self.eventID, self.master, expectedResults)
开发者ID:mihirkamdarcouchbase,项目名称:testrunner,代码行数:48,代码来源:audittest.py

示例5: test_couchbase_bucket_flush

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
 def test_couchbase_bucket_flush(self):
     self._run_initial_index_tasks()
     kvOps_tasks = self.kv_mutations()
     before_index_ops = self._run_before_index_tasks()
     self._run_tasks([before_index_ops])
     #Flush the bucket
     for bucket in self.buckets:
         log.info("Flushing bucket {0}...".format(bucket.name))
         rest = RestConnection(self.master)
         rest.flush_bucket(bucket.name)
         count = 0
         while rest.get_bucket_status(bucket.name) != "healthy" and count < 10:
             log.info("Bucket Status is {0}. Sleeping...".format(rest.get_bucket_status(bucket.name)))
             count += 1
             self.sleep(10)
         log.info("Bucket {0} is {0}".format(rest.get_bucket_status(bucket.name)))
     in_between_index_ops = self._run_in_between_tasks()
     self._run_tasks([kvOps_tasks, in_between_index_ops])
     self._run_after_index_tasks()
开发者ID:bharath-gp,项目名称:testrunner,代码行数:21,代码来源:recovery_2i.py

示例6: test_oom_flush_bucket

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
 def test_oom_flush_bucket(self):
     """
     1. Get OOM
     2. Flush a bucket
     3. Verify if state of indexes is changed
     :return:
     """
     self.assertTrue(self._push_indexer_off_the_cliff(), "OOM Can't be achieved")
     rest = RestConnection(self.oomServer)
     for bucket in self.buckets:
         log.info("Flushing bucket {0}...".format(bucket.name))
         rest.flush_bucket(bucket)
         self.sleep(120)
         if not self._validate_indexer_status_oom():
             log.info("Indexer out of OOM...")
             break
     self.sleep(120)
     self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM")
     self._verify_bucket_count_with_index_count(self.load_query_definitions)
     self.multi_query_using_index(buckets=self.buckets,
                 query_definitions=self.load_query_definitions, verify_results=False)
开发者ID:chethanrao,项目名称:testrunner-archive,代码行数:23,代码来源:memdb_oom_2i.py

示例7: test_couchbase_bucket_flush

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
 def test_couchbase_bucket_flush(self):
     before_tasks = self.async_run_operations(buckets=self.buckets,
                                              phase="before")
     self._run_tasks([before_tasks])
     in_between_tasks = self.async_run_operations(buckets=self.buckets,
                                              phase="in_between")
     for bucket in self.buckets:
         log.info("Flushing bucket {0}...".format(bucket.name))
         rest = RestConnection(self.master)
         rest.flush_bucket(bucket.name)
         count = 0
         while rest.get_bucket_status(bucket.name) != "healthy" and \
                         count < 10:
             log.info("Bucket Status is {0}. Sleeping...".format(
                 rest.get_bucket_status(bucket.name)))
             count += 1
             self.sleep(10)
         log.info("Bucket {0} is {0}".format(rest.get_bucket_status(
             bucket.name)))
     self._run_tasks([in_between_tasks])
     self._verify_bucket_count_with_index_count()
     after_tasks = self.async_run_operations(buckets=self.buckets,
                                         phase="after")
     self._run_tasks([after_tasks])
开发者ID:prasanna135,项目名称:testrunner,代码行数:26,代码来源:new_recovery_2i.py

示例8: SecondaryIndexingClusterOpsTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):

    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type = "n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index=True, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)

    def test_change_bucket_properties(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                      ramQuotaMB=None,
                      authType=None,
                      saslPassword=None,
                      replicaNumber=0,
                      proxyPort=None,
                      replicaIndex=None,
                      flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = False, drop_index = True,
            query_with_explain = True, query = True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
            query_definitions=self.query_definitions,
            create_index=True, drop_index=False,
            query_with_explain=True, query=True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        rollback_exception = True
        query_try_count = 0
        while rollback_exception and query_try_count < 10:
            self.sleep(5)
            query_try_count += 1
            #Query and bucket with empty result set
            try:
                self.multi_query_using_index_with_emptyresult(
                    query_definitions=self.query_definitions, buckets=self.buckets)
                rollback_exception = False
            except Exception, ex:
                msg = "Indexer rollback"
                if msg not in str(ex):
                    rollback_exception = False
                    self.log.info(ex)
                    raise
        self.assertFalse(rollback_exception, "Indexer still in rollback after 50 secs.")
开发者ID:membase,项目名称:testrunner,代码行数:74,代码来源:cluster_ops_2i.py

示例9: SecondaryIndexArrayIndexTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]

#.........这里部分代码省略.........
                start = end
                end = end + len(self.full_docs_list)/len(DATATYPES)
                doc_list = self.full_docs_list[start:end]
                self.change_index_field_type(bucket.name,
                                             "travel_history",
                                             doc_list, data, query_definition)
        self.multi_create_index_using_rest(buckets=self.buckets,
                                           query_definitions=[query_definition])
        self.sleep(10)
        index_map = self.rest.get_index_id_map()
        for bucket in self.buckets:
            index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
            actual_result = self.rest.full_table_scan_gsi_index_with_rest(
                        index_id, body={"stale": "false"})
            expected_result = self._get_expected_results_for_full_table_scan(
                        query_definition)
            msg = "Results don't match for index {0}. Actual number: {1}, Expected number: {2}"
            self.assertEqual(sorted(actual_result), sorted(expected_result),
                             msg.format(query_definition.index_name,
                                        actual_result, expected_result))
        self.multi_drop_index_using_rest(buckets=self.buckets,
                                         query_definitions=[query_definition])

    def test_lookup_array_index(self):
        secExpr = ["ALL DISTINCT countries"]
        log.info("Creating index index_name_1 on {0}...".format(self.buckets[0]))
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
        self.assertIsNotNone(id, "Array Index is not created.")
        log.info("Array Index index_name_1 on field {0} is created.".format(self.index_field))
        body = {"equal": "[\"Netherlands\"]"}
        content = self.rest.lookup_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Lookup not performed")

    def test_create_query_flush_bucket(self):
        self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)
        log.info("Flushing bucket {0}...".format(self.buckets[0]))
        self.rest.flush_bucket(self.buckets[0])
        self.sleep(60)
        log.info("Performing Full Table Scan...")
        for query_definition in self.query_definitions:
            self.run_full_table_scan_using_rest(self.buckets[0], query_definition)

    def test_create_query_drop_bucket(self):
        self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)
        log.info("Deleting bucket {0}...".format(self.buckets[0]))
        BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.restServer, bucket=self.buckets[0].name)
        log.info("Performing Full Table Scan...")
        buckets = self.buckets[1:]
        if buckets:
            for bucket in buckets:
                for query_definition in self.query_definitions:
                    self.run_full_table_scan_using_rest(bucket, query_definition)

    def test_array_item_limit(self):
        query_definition =  QueryDefinition(index_name="index_name_big_values",
                                                index_fields=["DISTINCT ARRAY t FOR t in bigValues END"],
                                                query_template="SELECT {0} FROM %s WHERE bigValues IS NOT NULL",
                                                groups=["array"], index_where_clause=" bigValues IS NOT NULL ")
        self.rest.flush_bucket(self.buckets[0])
        generators = []
        template = '{{"name":"{0}", "age":{1}, "bigValues":{2} }}'
        for i in range(10):
            name = FIRST_NAMES[random.choice(range(len(FIRST_NAMES)))]
            id = "{0}-{1}".format(name, str(i))
            age = random.choice(range(4, 19))
            bigValues = []
开发者ID:arod1987,项目名称:testrunner,代码行数:70,代码来源:array_index_2i.py

示例10: SecondaryIndexingClusterOpsTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):

    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type = "n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = self.run_create_index, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)

    def test_change_bucket_properties(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                      ramQuotaMB=None,
                      authType=None,
                      saslPassword=None,
                      replicaNumber=0,
                      proxyPort=None,
                      replicaIndex=None,
                      flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = False, drop_index = True,
            query_with_explain = True, query = True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        self.sleep(2)
        #Query and bucket with empty result set
        self.multi_query_using_index_with_empty_result(query_definitions = self.query_definitions,
             buckets = self.buckets)

    def test_delete_create_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = self.run_create_index, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        self.sleep(2)
        #Flush bucket and recreate it
        self._bucket_creation()
        self.sleep(2)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)
        index_map = self.get_index_stats()
        self.assertTrue(len(index_map) == 0, "Index Stats still show {0}".format(index_map))

    def test_data_loss(self):
        #Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = False, query = False)
        self._verify_bucket_count_with_index_count()
        try:
            servr_out = self.servers[1:self.nodes_init]
            failover_task = self.cluster.async_failover([self.master],
                        failover_nodes = servr_out, graceful=False)
            failover_task.result()
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                                    [], servr_out)
            rebalance.result()
            # get the items in the index and check if the data loss is reflected correctly
            self.sleep(2)
        except Exception, ex:
            raise
        finally:
开发者ID:prasanna135,项目名称:testrunner,代码行数:101,代码来源:cluster_ops_2i.py

示例11: SecondaryIndexArrayIndexTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]

#.........这里部分代码省略.........
            for bucket in self.buckets:
                for query_definition in definitions_list:
                    self.run_full_table_scan_using_rest(bucket, query_definition, verify_result=True)
            self.multi_drop_index_using_rest(buckets=self.buckets, query_definitions=definitions_list)
            self.full_docs_list = self.generate_full_docs_list(self.gens_load)

    def test_create_query_drop_index_on_mixed_datatypes(self):
        query_definition = QueryDefinition(index_name="index_name_travel_history",
                                           index_fields=["DISTINCT ARRAY t FOR t in `travel_history` END"],
                                           query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
                                           groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
        end = 0
        for bucket in self.buckets:
            for data in DATATYPES:
                start = end
                end = end + len(self.full_docs_list)/len(DATATYPES)
                doc_list = self.full_docs_list[start:end]
                self.change_index_field_type(bucket.name, "travel_history", doc_list, data)
        self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=[query_definition])
        for bucket in self.buckets:
            self.run_full_table_scan_using_rest(bucket, query_definition, verify_result=True)
        self.multi_drop_index_using_rest(buckets=self.buckets, query_definitions=[query_definition])

    def test_lookup_array_index(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
        log.info("Creating index index_name_1 on {0}...".format(self.buckets[0]))
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
        self.assertIsNotNone(id, "Array Index is not created.")
        log.info("Array Index index_name_1 on field {0} is created.".format(self.index_field))
        body = {"equal": "[\"Netherlands\"]"}
        content = self.rest.lookup_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Lookup not performed")

    def test_create_query_flush_bucket(self):
        self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)
        log.info("Flushing bucket {0}...".format(self.buckets[0]))
        self.rest.flush_bucket(self.buckets[0])
        log.info("Performing Full Table Scan...")
        for query_definition in self.query_definitions:
            self.run_full_table_scan_using_rest(self.buckets[0], query_definition)
        self.multi_drop_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)

    def test_create_query_drop_bucket(self):
        self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)
        log.info("Deleting bucket {0}...".format(self.buckets[0]))
        BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.restServer, bucket=self.buckets[0].name)
        log.info("Performing Full Table Scan...")
        for query_definition in self.query_definitions:
            self.run_full_table_scan_using_rest(self.buckets[0], query_definition)
        self.multi_drop_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)

    def test_increase_array_size(self):
        query_definition =  QueryDefinition(index_name="index_name_big_values",
                                                index_fields=["DISTINCT ARRAY t FOR t in bigValues END"],
                                                query_template="SELECT {0} FROM %s WHERE bigValues IS NOT NULL",
                                                groups=["array"], index_where_clause=" bigValues IS NOT NULL ")
        self.rest.flush_bucket(self.buckets[0])
        sec_key_size = 1000
        self._change_array_size(sec_key_size)
        self.full_docs_list = []
        template = '{{"_id":"{0}" "name":"{1}", "age":{2}, "bigValues":{3} }}'
        for i in range(10):
            name = FIRST_NAMES[random.choice(range(len(FIRST_NAMES)))]
            id = "{0}-{1}".format(name, str(i))
            age = random.choice(range(4, 19))
            bigValues = []
开发者ID:prasanna135,项目名称:testrunner,代码行数:70,代码来源:array_index_2i.py

示例12: SecondaryIndexArrayIndexTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]
class SecondaryIndexArrayIndexTests(BaseSecondaryIndexingTests):
    def setUp(self):
        super(SecondaryIndexArrayIndexTests, self).setUp()
        self.doc_ops = self.input.param("doc_ops", True)
        self.index_field = self.input.param("index_field", "countries")
        self.restServer = self.get_nodes_from_services_map(service_type="index")
        self.rest = RestConnection(self.restServer)

    def tearDown(self):
        super(SecondaryIndexArrayIndexTests, self).tearDown()

    def test_create_query_drop_array_index(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
        self.assertIsNotNone(id, "Array Index is not created.")
        log.info("Array Index index_name_1 on field {0} is created.".format(self.index_field))
        log.info("Performing Full Table Scan...")
        body = {'stale': 'ok'}
        content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Table Scan not performed")
        log.info("Dropping 'index_name_1'...")
        self.rest.drop_index_with_rest(id)

    def test_create_query_drop_composite_index(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field), "codes", "name"]
        for i in range(10):
            random.shuffle(secExpr)
            index_name = "index_name_{0}".format(i)
            log.info("Composite index {0} creating on {1}".format(index_name, secExpr))
            id = self._create_rest_array_index(index_name, self.buckets[0], secExpr)
            self.assertIsNotNone(id, "Array Index is not created.")
            log.info("Array Index {0} on field {1} is created.".format(index_name, secExpr))
            log.info("Performing Full Table Scan...")
            body = {'stale': 'ok'}
            content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
            self.assertIsNotNone(content, "Table Scan not performed")
            log.info("Dropping '{0}'...".format(index_name))
            self.rest.drop_index_with_rest(id)

    def test_lookup_array_index(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
        log.info("Creating index index_name_1 on {0}...".format(self.buckets[0]))
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
        self.assertIsNotNone(id, "Array Index is not created.")
        log.info("Array Index index_name_1 on field Countries is created.")
        body = {"equal": "[\"Netherlands\"]"}
        content = self.rest.lookup_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Lookup not performed")

    def test_create_query_flush_bucket(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
        self.assertIsNotNone(id, "Array Index is not created.")
        log.info("Array Index index_name_1 on field Countries is created.")
        log.info("Performing Full Table Scan...")
        body = {'stale': 'ok'}
        content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Table Scan not performed")
        log.info("Flushing bucket {0}...".format(self.buckets[0]))
        self.rest.flush_bucket(self.buckets[0])
        log.info("Performing Full Table Scan...")
        content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Table Scan failed after flushing bucket {0}".format(self.buckets[0]))

    def test_create_query_drop_bucket(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
        self.assertIsNotNone(id, "Array Index is not created.")
        log.info("Array Index index_name_1 on field Countries is created.")
        log.info("Performing Full Table Scan...")
        body = {'stale': 'ok'}
        content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Table Scan not performed")
        log.info("Deleting bucket {0}...".format(self.buckets[0]))
        BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.restServer, bucket=self.buckets[0].name)
        self.sleep(10)
        self.assertIsNone(self._check_index_status(id, "index_name_1"), "Index still exists after dropping the bucket.")

    def test_remove_array_field(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
        log.info("Creating index index_name_1 on {0}...".format(self.buckets[0]))
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
        self.assertIsNotNone(id, "Array Index is not created.")
        log.info("Array Index index_name_1 on field Countries is created.")
        body = {'stale': 'ok'}
        content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Table Scan not performed")
        log.info("Removing {0} from all documents in bucket {1}...".format(self.index_field, self.buckets[0]))
        self._flush_field(self.buckets[0])
        self.sleep(10)
        log.info("Performing full table scan after removing array index field")
        content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
        self.assertIsNotNone(content, "Table Scan failed after removing array index field")
        log.info("Dropping index_name_1...")
        self.rest.drop_index_with_rest(id)

    def test_change_array_field(self):
        secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
        log.info("Creating index index_name_1 on {0}...".format(self.buckets[0]))
        id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
#.........这里部分代码省略.........
开发者ID:chethanrao,项目名称:testrunner-archive,代码行数:103,代码来源:array_index_2i.py

示例13: SecondaryIndexDatasizeTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import flush_bucket [as 别名]

#.........这里部分代码省略.........
            temp = index_field.split("`")
            if len(temp) > 1:
                index_fields.append(temp[1])
            else:
                index_fields.append(temp[0])
        expected_result = []
        for doc in self.full_docs_list:
            doc_list = []
            list_param = False
            for field in index_fields:
                if isinstance(doc[field], list):
                    list_param = True
                    if not doc_list:
                        doc_list = [[arr_item] for arr_item in doc[field]]
                    else:
                        temp_doc_list = []
                        for item in doc_list:
                            for arr_item in doc[field]:
                                temp_list = copy.deepcopy(item)
                                temp_list.append(arr_item)
                                temp_doc_list.append(temp_list)
                        doc_list = temp_doc_list
                else:
                    if not doc_list:
                        doc_list.append([doc[field]])
                    else:
                        for item in doc_list:
                            item.append(doc[field])
            if not allow_large_keys:
                if list_param:
                    actual_array_size = self._get_size_of_array(doc_list)
                    if actual_array_size > array_size:
                        doc_list = []
                for doc_items in doc_list:
                    if self._get_size_of_array(doc_items) > item_size:
                        doc_list = []
                        break
            for doc_items in doc_list:
                entry = {"docid": doc["_id"], "key": doc_items}
                expected_result.append(entry)
        return expected_result

    def _create_indexes(self, buckets):
        query_definitions = []
        query_definitions.append(QueryDefinition(index_name="index_long_name",
                            index_fields=["name"]))
        query_definitions.append(QueryDefinition(index_name="index_array_encoded",
                            index_fields=["ALL ARRAY t FOR t in `encoded_array` END"]))
        query_definitions.append(QueryDefinition(index_name="index_array_encoded_bigValue",
                            index_fields=["ALL ARRAY t FOR t in `encoded_big_value_array` END"]))
        query_definitions.append(QueryDefinition(index_name="index_long_name_age",
                            index_fields=["name", "age"]))
        query_definitions.append(QueryDefinition(
            index_name="index_long_endoded_age",
            index_fields=["ALL ARRAY t FOR t in `encoded_array` END", "age"]))
        query_definitions.append(QueryDefinition(
            index_name="index_long_endoded_name",
            index_fields=["ALL ARRAY t FOR t in `encoded_array` END", "name"]))
        query_definitions.append(QueryDefinition(
            index_name="index_long_name_encoded_age",
            index_fields=["name", "ALL ARRAY t FOR t in `encoded_array` END", "age"]))
        self.multi_create_index(buckets=buckets, query_definitions=query_definitions)
        return query_definitions

    def upload_documents(self, num_items, item_size, array_size, buckets=None, update_docs=False, array_elements=3):
        if not buckets:
            buckets = self.buckets
        if not update_docs:
            for bucket in buckets:
                self.rest.flush_bucket(bucket)
            self.sleep(30)
        generators = []
        template = '{{"name":"{0}", "age":{1}, "encoded_array": {2}, "encoded_big_value_array": {3}}}'
        item_length = item_size * 4
        array_element_size = (array_size * 4)/array_elements
        if update_docs:
            num_items = len(self.full_docs_list)
        for i in range(num_items):
            if update_docs:
                index_id = str(self.full_docs_list[i]["_id"].split("-")[0])
            else:
                index_id = "unhandled_items_" + str(random.random()*100000)
            encoded_array = []
            name = "".join(random.choice(lowercase) for k in range(item_length))
            age = random.choice(range(4, 59))
            big_value_array = [name]
            for j in range(array_elements):
                element = "".join(random.choice(lowercase) for k in range(array_element_size))
                encoded_array.append(element)
            generators.append(DocumentGenerator(
                index_id, template, [name], [age], [encoded_array],
                [big_value_array], start=0, end=1))
        self.full_docs_list = self.generate_full_docs_list(generators)
        if not update_docs:
            self.load(generators, buckets=buckets, flag=self.item_flag,
                  verify_data=False, batch_size=self.batch_size)
        else:
            for bucket in buckets:
                for doc in self.full_docs_list:
                    self._update_document(bucket.name, doc["_id"], doc)
开发者ID:arod1987,项目名称:testrunner,代码行数:104,代码来源:plasma_data_size.py


注:本文中的membase.api.rest_client.RestConnection.flush_bucket方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。