本文整理汇总了Python中membase.api.rest_client.RestConnection.get_index_id_map方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_index_id_map方法的具体用法?Python RestConnection.get_index_id_map怎么用?Python RestConnection.get_index_id_map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.get_index_id_map方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_increase_max_item_limits
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_index_id_map [as 别名]
def test_increase_max_item_limits(self):
self.set_allow_large_keys(self.allow_large_keys)
self.change_max_item_size(self.max_item_size)
self.change_max_array_size(self.max_array_size)
self._upload_documents(num_items=self.num_docs,
item_size=self.max_item_size,
array_size=self.max_array_size)
query_definitions = self._create_indexes()
self.sleep(30)
rest = RestConnection(self.master)
index_map = rest.get_index_id_map()
item_size_limit = self.max_item_size
for i in range(5):
item_size_limit += self.max_item_size
self.change_max_item_size(item_size_limit)
self._upload_documents(
num_items=self.num_docs, item_size=self.max_item_size,
array_size=self.max_array_size, update_docs=True)
self.sleep(10)
for bucket in self.buckets:
for query_definition in query_definitions:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_scan(query_definition)
msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
if not self.repeat:
break
示例2: test_max_limits_indexer_restart
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_index_id_map [as 别名]
def test_max_limits_indexer_restart(self):
self.set_allow_large_keys(self.allow_large_keys)
self.change_max_item_size(self.max_item_size)
self.change_max_array_size(self.max_array_size)
self._upload_documents(num_items=self.num_docs,
item_size=self.max_item_size,
array_size=self.max_array_size)
query_definitions = self._create_indexes()
self.sleep(30)
rest = RestConnection(self.master)
index_map = rest.get_index_id_map()
remote = RemoteMachineShellConnection(self.indexer_node)
remote.stop_server()
self.sleep(30)
remote = RemoteMachineShellConnection(self.indexer_node)
remote.start_server()
self.sleep(30)
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
for bucket in self.buckets:
for query_definition in query_definitions:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_scan(
query_definition)
msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
示例3: test_various_docid_keysize_combinations
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_index_id_map [as 别名]
def test_various_docid_keysize_combinations(self):
self.set_allow_large_keys(self.allow_large_keys)
self.change_max_item_size(self.max_item_size)
self.change_max_array_size(self.max_array_size)
query_definitions = self._create_indexes()
self.sleep(30)
rest = RestConnection(self.master)
index_map = rest.get_index_id_map()
for bucket in self.buckets:
self.rest.flush_bucket(bucket)
generators = []
template = '{{"name":"{0}", "age":{1}, "encoded_array": {2}, "encoded_big_value_array": {3}}}'
max_item_length = self.max_item_size * 4
max_array_element_size = (self.max_array_size * 4)/ 10
for i in range(self.num_docs):
index_id = "".join(random.choice(lowercase) for k in range(random.randint(1, 255)))
encoded_array = []
name = "".join(random.choice(lowercase) for k in range(random.randint(max_item_length)))
age = random.choice(range(4, 59))
big_value_array = [name]
for j in range(30):
element = "".join(random.choice(lowercase) for k in range(random.randint(max_array_element_size)))
encoded_array.append(element)
generators.append(DocumentGenerator(
index_id, template, [name], [age], [encoded_array],
[big_value_array], start=0, end=1))
index_id = "".join(random.choice(lowercase) for k in range(250))
name = "".join(random.choice(lowercase) for k in range(random.randint(max_item_length)))
age = random.choice(range(4, 59))
big_value_array = [name]
encoded_array = []
for j in range(30):
element = "".join(random.choice(lowercase) for k in range(random.randint(max_array_element_size)))
encoded_array.append(element)
generators.append(DocumentGenerator(
index_id, template, [name], [age], [encoded_array],
[big_value_array], start=0, end=1))
self.load(generators, buckets=self.buckets, flag=self.item_flag,
verify_data=False, batch_size=self.batch_size)
self.full_docs_list = self.generate_full_docs_list(generators)
for bucket in self.buckets:
for query_definition in query_definitions:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_scan(
query_definition)
msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
示例4: test_change_doc_size
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_index_id_map [as 别名]
def test_change_doc_size(self):
self.iterations = self.input.param("num_iterations", 1)
buckets = self._create_plasma_buckets()
if self.plasma_dgm:
self.get_dgm_for_plasma(indexer_nodes=[self.dgmServer])
query_definitions = self._create_indexes(buckets)
self.sleep(20)
array_size = random.choice(range(10, 15))
item_size = random.choice(range(10, 15))
self.upload_documents(num_items=1000, item_size=item_size, array_size=array_size, buckets=buckets)
rest = RestConnection(self.master)
index_map = rest.get_index_id_map()
for j in range(self.iterations):
log.info("Iteration: {0}".format(j))
array_size = random.choice(range(10, 15))
item_size = random.choice(range(10, 15))
self.upload_documents(num_items=1000, item_size=item_size,
array_size=array_size, buckets=buckets, update_docs=True)
for bucket in buckets:
for query_definition in query_definitions:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_scan(
query_definition)
msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
self.sleep(20)
array_size = random.choice(range(1000, 5000))
item_size = random.choice(range(1000, 5000))
self.upload_documents(num_items=1000, item_size=item_size,
array_size=array_size, buckets=buckets, update_docs=True)
for bucket in buckets:
for query_definition in query_definitions:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_scan(
query_definition)
msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
示例5: test_random_increase_decrease_size_limit
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_index_id_map [as 别名]
def test_random_increase_decrease_size_limit(self):
self.set_allow_large_keys(self.allow_large_keys)
self.change_max_item_size(self.max_item_size)
self.change_max_array_size(self.max_array_size)
self._upload_documents(num_items=self.num_docs,
item_size=self.max_item_size,
array_size=self.max_array_size)
query_definitions = self._create_indexes()
self.sleep(30)
rest = RestConnection(self.master)
index_map = rest.get_index_id_map()
choice_list = ["increase", "decrease"]
for i in range(random.randint(5, 10)):
option = random.choice(choice_list)
if option == "increase":
self.max_item_size += random.randint(1000, 3000)
self.change_max_item_size(self.max_item_size)
else:
self.max_item_size += random.randint(1000, 3000)
self.change_max_item_size(self.max_item_size)
option = random.choice(choice_list)
if option == "increase":
self.max_array_size += random.randint(1000, 3000)
self.change_max_item_size(self.max_array_size)
else:
self.max_array_size += random.randint(1000, 3000)
self.change_max_item_size(self.max_array_size)
self._upload_documents(
num_items=self.num_docs, item_size=self.max_item_size,
array_size=self.max_array_size, update_docs=True)
for bucket in self.buckets:
for query_definition in query_definitions:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_scan(
query_definition)
msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
示例6: SecondaryIndexArrayIndexTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_index_id_map [as 别名]
class SecondaryIndexArrayIndexTests(BaseSecondaryIndexingTests):
def setUp(self):
super(SecondaryIndexArrayIndexTests, self).setUp()
self.doc_ops = self.input.param("doc_ops", True)
self.index_field = self.input.param("index_field", "join_yr")
self.restServer = self.get_nodes_from_services_map(service_type="index")
self.rest = RestConnection(self.restServer)
self.index_id_map = {}
testuser = []
rolelist = []
for bucket in self.buckets:
testuser.append({'id': bucket.name, 'name': bucket.name, 'password': 'password'})
rolelist.append({'id': bucket.name, 'name': bucket.name, 'roles': 'admin'})
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
def tearDown(self):
super(SecondaryIndexArrayIndexTests, self).tearDown()
def test_create_query_drop_all_array_index(self):
self.multi_create_index_using_rest(
buckets=self.buckets, query_definitions=self.query_definitions)
for bucket in self.buckets:
for query_definition in self.query_definitions:
self.run_full_table_scan_using_rest(bucket,
query_definition,
verify_result=True)
self.multi_drop_index_using_rest(
buckets=self.buckets, query_definitions=self.query_definitions)
def test_simple_indexes_mutation(self):
query_definitions = []
query_definition = QueryDefinition(
index_name="index_name_travel_history",
index_fields=["ALL `travel_history`"],
query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
query_definitions.append(query_definition)
self.multi_create_index_using_rest(buckets=self.buckets,
query_definitions=query_definitions)
self.sleep(20)
index_map = self.rest.get_index_id_map()
doc_list = self.full_docs_list[:len(self.full_docs_list)/2]
for bucket in self.buckets:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
for data in DATATYPES:
self.change_index_field_type(bucket.name,
"travel_history",
doc_list, data, query_definition)
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_full_table_scan(
query_definition)
msg = "Results don't match for index {0}. Actual number: {1}, Expected number: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
self.full_docs_list = self.generate_full_docs_list(self.gens_load)
self.multi_drop_index_using_rest(buckets=self.buckets,
query_definitions=query_definitions)
def test_composite_indexes_mutation(self):
definitions_list = []
if not self.dataset is "array":
pass
else:
query_definition = QueryDefinition(index_name="index_name_travel_history_leading",
index_fields=["ALL `travel_history` END", "name", "age"],
query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
definitions_list.append(query_definition)
query_definition = QueryDefinition(index_name="index_name_travel_history_non_leading_end",
index_fields=["name", "age", "ALL `travel_history` END"],
query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
definitions_list.append(query_definition)
query_definition = QueryDefinition(index_name="index_name_travel_history_non_leading_middle",
index_fields=["name", "ALL `travel_history` END", "age"],
query_template="SELECT {0} FROM %s WHERE `travel_history` IS NOT NULL",
groups=["array"], index_where_clause=" `travel_history` IS NOT NULL ")
definitions_list.append(query_definition)
self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=definitions_list)
self.sleep(20)
index_map = self.rest.get_index_id_map()
for query_definition in definitions_list:
for bucket in self.buckets:
doc_list = self.full_docs_list[:len(self.full_docs_list)/2]
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
for data in DATATYPES:
self.change_index_field_type(bucket.name, "travel_history",
doc_list, data, query_definition)
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_full_table_scan(
query_definition)
msg = "Results don't match for index {0}. Actual number: {1}, Expected number: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
self.full_docs_list = self.generate_full_docs_list(self.gens_load)
self.multi_drop_index_using_rest(buckets=self.buckets, query_definitions=definitions_list)
#.........这里部分代码省略.........
示例7: str
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_index_id_map [as 别名]
self.change_max_item_size(self.max_item_size)
self.change_max_array_size(self.max_array_size)
try:
self._upload_documents(num_items=self.num_docs,
item_size=self.max_item_size,
array_size=self.max_array_size)
except Exception, ex:
msg_list = ["Too big", "Invalid"]
for msg in msg_list:
if msg in str(ex):
log.info("Document being loaded is {0}".format(msg))
else:
query_definitions = self._create_indexes()
self.sleep(10)
rest = RestConnection(self.master)
index_map = rest.get_index_id_map()
#full table scan
for bucket in self.buckets:
for query_definition in query_definitions:
index_id = str(index_map[bucket.name][query_definition.index_name]["id"])
actual_result = self.rest.full_table_scan_gsi_index_with_rest(
index_id, body={"stale": "false"})
expected_result = self._get_expected_results_for_scan(query_definition)
msg = "Results don't match for index {0}. Actual: {1}, Expected: {2}"
self.assertEqual(sorted(actual_result), sorted(expected_result),
msg.format(query_definition.index_name,
actual_result, expected_result))
def test_increase_max_item_limits(self):
self.set_allow_large_keys(self.allow_large_keys)
self.change_max_item_size(self.max_item_size)