本文整理汇总了Python中membase.api.rest_client.RestConnection.query_view方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.query_view方法的具体用法?Python RestConnection.query_view怎么用?Python RestConnection.query_view使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.query_view方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
def check(self, task_manager):
rest = RestConnection(self.server)
try:
# query and verify expected num of rows returned
content = \
rest.query_view(self.design_doc_name, self.view_name, self.bucket, self.query)
self.log.info("(%d rows) expected, (%d rows) returned" %\
(len(content['rows']), self.expected_rows))
if len(content['rows']) == self.expected_rows:
self.log.info("expected number of rows: '{0}' was found for view query".format(self.
expected_rows))
self.state = FINISHED
self.set_result(True)
else:
if "stale" in self.query:
if self.query["stale"].lower() == "false":
self.state = FINISHED
self.set_result(False)
# retry until expected results or task times out
task_manager.schedule(self, self.retry_time)
except QueryViewException as e:
# subsequent query failed! exit
self.state = FINISHED
self.set_exception(e)
#catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.log.info("Unexpected Exception Caught")
self.set_exception(e)
示例2: execute
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
def execute(self, task_manager):
rest = RestConnection(self.server)
try:
# make sure view can be queried
content =\
rest.query_view(self.design_doc_name, self.view_name, self.bucket, self.query)
if self.expected_rows is None:
# no verification
self.state = FINISHED
self.set_result(content)
else:
self.state = CHECKING
task_manager.schedule(self)
except QueryViewException as e:
# initial query failed, try again
task_manager.schedule(self, self.retry_time)
#catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.log.info("Unexpected Exception Caught")
self.set_exception(e)
示例3: test_create_delete_similar_views
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
def test_create_delete_similar_views(self):
ddoc_name_prefix = self.input.param("ddoc_name_prefix", "ddoc")
view_name = self.input.param("view_name", "test_view")
map_fn = 'function (doc) {if(doc.age !== undefined) { emit(doc.age, doc.name);}}'
rest = RestConnection(self.servers[0])
ddocs = [DesignDocument(ddoc_name_prefix + "1", [View(view_name, map_fn,
dev_view=False)],
options={"updateMinChanges":0, "replicaUpdateMinChanges":0}),
DesignDocument(ddoc_name_prefix + "2", [View(view_name, map_fn,
dev_view=True)],
options={"updateMinChanges":0, "replicaUpdateMinChanges":0})]
ViewBaseTests._load_docs(self, self.num_docs, "test_")
for ddoc in ddocs:
results = self.create_ddoc(rest, 'default', ddoc)
try:
cluster = Cluster()
cluster.delete_view(self.servers[0], ddocs[1].name, ddocs[1].views[0])
finally:
cluster.shutdown()
results_new = rest.query_view(ddocs[0].name, ddocs[0].views[0].name, 'default',
{"stale" : "ok", "full_set" : "true"})
self.assertEquals(results.get(u'rows', []), results_new.get(u'rows', []),
"Results returned previosly %s don't match with current %s" % (
results.get(u'rows', []), results_new.get(u'rows', [])))
示例4: test_view_startkey_endkey_validation
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
def test_view_startkey_endkey_validation(self):
"""Regression test for MB-6591
This tests makes sure that the validation of startkey/endkey works
with view, which uses Unicode collation. Return results
when startkey is smaller than endkey. When endkey is smaller than
the startkey and exception should be raised. With Unicode collation
"foo" < "Foo", with raw collation "Foo" < "foo".
"""
bucket = "default"
master = self.servers[0]
rest = RestConnection(master)
ViewBaseTests._setup_cluster(self)
ViewBaseTests._create_view_doc_name(self, "startkey-endkey-validation")
view_name = "dev_test_view-startkey-endkey-validation"
startkey = '"foo"'
endkey = '"Foo"'
params = {"startkey": startkey, "endkey": endkey}
results = rest.query_view(view_name, view_name, bucket, params)
self.assertTrue('rows' in results, "Results were returned")
# Flip startkey and endkey
params = {"startkey": endkey, "endkey": startkey}
self.assertRaises(Exception, rest.query_view, view_name, view_name,
bucket, params)
示例5: check
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
def check(self, task_manager):
rest = RestConnection(self.server)
try:
# make sure view was deleted
query = {"stale" : "ok"}
content = \
rest.query_view(self.design_doc_name, self.view.name, self.bucket, query)
self.state = FINISHED
self.set_result(False)
except QueryViewException as e:
self.log.info("view : {0} was successfully deleted in ddoc: {1}".format(self.view.name, self.design_doc_name))
self.state = FINISHED
self.set_result(True)
示例6: ViewMergingTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
#.........这里部分代码省略.........
"""Initialise clients for all servers there are vBuckets on
It returns a dict with 'ip:port' as key (this information is also
stored this way in every vBucket in the `master` property) and
the MemcachedClient as the value
"""
clients = {}
for vbucket in self.bucket.vbuckets:
if vbucket.master not in clients:
ip, port = vbucket.master.split(':')
clients[vbucket.master] = MemcachedClient(ip, int(port))
return clients
def populate_alternated(self, num_vbuckets, docs):
"""Every vBucket gets a doc first
Populating the vBuckets alternated means that every vBucket gets
a document first, before it receives the second one and so on.
For example if we have 6 documents named doc-1 ... doc-6 and 3
vBuckets the result will be:
vbucket-1: doc-1, doc-4
vbucket-2: doc-2, doc-5
vbucket-3: doc-3, doc-6
"""
for i, doc in enumerate(docs):
self.insert_into_vbucket(i % num_vbuckets, doc)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
def populate_sequenced(self, num_vbuckets, docs):
"""vBuckets get filled up one by one
Populating the vBuckets sequenced means that the vBucket gets
a certain number of documents, before the next one gets some.
For example if we have 6 documents named doc-1 ... doc-6 and 3
vBuckets the result will be:
vbucket-1: doc-1, doc-2
vbucket-2: doc-3, doc-4
vbucket-3: doc-5, doc-5
"""
docs_per_vbucket = len(docs) / num_vbuckets
for vbucket in range(num_vbuckets):
start = vbucket * docs_per_vbucket
end = start + docs_per_vbucket
for doc in docs[start:end]:
self.insert_into_vbucket(vbucket, doc)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
def insert_into_vbucket(self, vbucket_id, doc):
"""Insert a document into a certain vBucket
The memcached clients must already been initialised in the
self.clients property.
"""
vbucket = self.bucket.vbuckets[vbucket_id]
client = self.clients[vbucket.master]
client.set(doc['json']['key'], 0, 0, json.dumps(doc['json']['body']).encode("ascii", "ignore"), vbucket_id)
@staticmethod
def make_docs(start, end):
"""Create documents
`key` will be used as a key and won't end up in the final
document body.
`body` will be used as the document body
"""
docs = []
for i in range(start, end):
doc = {
'key': str(i),
'body': { 'integer': i, 'string': str(i)}}
docs.append({"meta":{"id": str(i)}, "json": doc })
return docs
def merged_query(self, view_name, params={}, ddoc='test'):
params['full_set'] = 'true'
bucket = self.default_bucket_name
if not 'stale' in params:
params['stale'] = 'false'
ddoc = ("","dev_")[self.is_dev_view] + ddoc
return self.rest.query_view(ddoc, view_name, bucket, params)
def verify_keys_are_sorted(self, results, desc=False):
current_keys = [row['key'] for row in results['rows']]
self.assertTrue(ViewMergingTests._verify_list_is_sorted(current_keys, desc=desc), 'keys are not sorted')
self.log.info('rows are sorted by key')
@staticmethod
def _verify_list_is_sorted(keys, key = lambda x: x, desc=False):
if desc:
return all([key(keys[i]) >= key(keys[i + 1]) for i in xrange(len(keys) - 1)])
else:
return all([key(keys[i]) <= key(keys[i + 1]) for i in xrange(len(keys) - 1)])
示例7: ViewMergingTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
#.........这里部分代码省略.........
def init_clients(self):
"""Initialise clients for all servers there are vBuckets on
It returns a dict with 'ip:port' as key (this information is also
stored this way in every vBucket in the `master` property) and
the MemcachedClient as the value
"""
clients = {}
for vbucket in self.bucket.vbuckets:
if vbucket.master not in clients:
ip, port = vbucket.master.split(':')
clients[vbucket.master] = MemcachedClient(ip, int(port))
return clients
def populate_alternated(self, num_vbuckets, docs):
"""Every vBucket gets a doc first
Populating the vBuckets alternated means that every vBucket gets
a document first, before it receives the second one and so on.
For example if we have 6 documents named doc-1 ... doc-6 and 3
vBuckets the result will be:
vbucket-1: doc-1, doc-4
vbucket-2: doc-2, doc-5
vbucket-3: doc-3, doc-6
"""
for i, doc in enumerate(docs):
self.insert_into_vbucket(i % num_vbuckets, doc)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
def populate_sequenced(self, num_vbuckets, docs):
"""vBuckets get filled up one by one
Populating the vBuckets sequenced means that the vBucket gets
a certain number of documents, before the next one gets some.
For example if we have 6 documents named doc-1 ... doc-6 and 3
vBuckets the result will be:
vbucket-1: doc-1, doc-2
vbucket-2: doc-3, doc-4
vbucket-3: doc-5, doc-5
"""
docs_per_vbucket = len(docs) / num_vbuckets
for vbucket in range(num_vbuckets):
start = vbucket * docs_per_vbucket
end = start + docs_per_vbucket
for doc in docs[start:end]:
self.insert_into_vbucket(vbucket, doc)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
def insert_into_vbucket(self, vbucket_id, doc):
"""Insert a document into a certain vBucket
The memcached clients must already been initialised in the
self.clients property.
"""
vbucket = self.bucket.vbuckets[vbucket_id]
client = self.clients[vbucket.master]
client.set(doc['json']['key'], 0, 0, json.dumps(doc['json']['body']).encode("ascii", "ignore"), vbucket_id)
@staticmethod
def make_docs(start, end):
"""Create documents
`key` will be used as a key and won't end up in the final
document body.
`body` will be used as the document body
"""
docs = []
for i in range(start, end):
doc = {
'key': str(i),
'body': { 'integer': i, 'string': str(i)}}
docs.append({"meta":{"id": str(i)}, "json": doc })
return docs
def merged_query(self, view_name, params={}, ddoc='test'):
bucket = self.default_bucket_name
if not 'stale' in params:
params['stale'] = 'false'
ddoc = ("", "dev_")[self.is_dev_view] + ddoc
return self.rest.query_view(ddoc, view_name, bucket, params)
def verify_keys_are_sorted(self, results, desc=False):
current_keys = [row['key'] for row in results['rows']]
self.assertTrue(ViewMergingTests._verify_list_is_sorted(current_keys, desc=desc), 'keys are not sorted')
self.log.info('rows are sorted by key')
@staticmethod
def _verify_list_is_sorted(keys, key=lambda x: x, desc=False):
if desc:
return all([key(keys[i]) >= key(keys[i + 1]) for i in xrange(len(keys) - 1)])
else:
return all([key(keys[i]) <= key(keys[i + 1]) for i in xrange(len(keys) - 1)])
示例8: SpatialQueryErrorsTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
class SpatialQueryErrorsTests(BaseTestCase):
def setUp(self):
try:
if 'first_case' not in TestInputSingleton.input.test_params:
TestInputSingleton.input.test_params['default_bucket'] = False
TestInputSingleton.input.test_params['skip_cleanup'] = True
TestInputSingleton.input.test_params['skip_buckets_handle'] = True
self.default_bucket_name = 'default'
super(SpatialQueryErrorsTests, self).setUp()
if 'first_case' in TestInputSingleton.input.test_params:
self.cluster.rebalance(self.servers[:], self.servers[1:], [])
# We use only one bucket in this test suite
self.rest = RestConnection(self.master)
self.bucket = self.rest.get_bucket(Bucket(name=self.default_bucket_name))
# num_docs must be a multiple of the number of vbuckets
self.num_docs = self.input.param("num_docs", 2000)
# `testname` is used for the design document name as wel as the
# spatial function name
self.testname = 'query-errors'
self.helper = SpatialHelper(self, "default")
if 'first_case' in TestInputSingleton.input.test_params:
self.create_ddoc()
self.helper.insert_docs(self.num_docs, self.testname)
except Exception as ex:
self.input.test_params["stop-on-failure"] = True
self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
self.fail(ex)
def tearDown(self):
# clean up will only performed on the last run
if 'last_case' in TestInputSingleton.input.test_params:
TestInputSingleton.input.test_params['skip_cleanup'] = False
TestInputSingleton.input.test_params['skip_buckets_handle'] = False
super(SpatialQueryErrorsTests, self).tearDown()
else:
self.cluster.shutdown(force=True)
self._log_finish(self)
def test_query_errors(self):
all_params = ['skip', 'limit', 'stale', 'bbox', 'start_range',
'end_range']
query_params = {}
for key in self.input.test_params:
if key in all_params:
query_params[key] = str(self.input.test_params[key])
try:
self.spatial_query(query_params)
except QueryViewException as ex:
self.assertEquals(self.input.test_params['error'],
json.loads(ex.reason)['error'])
else:
self.fail("Query did not fail, but should have. "
"Query parameters were: {0}".format(query_params))
def create_ddoc(self):
view_fn = '''function (doc) {
if (doc.age !== undefined || doc.height !== undefined ||
doc.bloom !== undefined || doc.shed_leaves !== undefined) {
emit([doc.age, doc.height, [doc.bloom, doc.shed_leaves]], doc.name);
}}'''
self.helper.create_index_fun(self.testname, view_fn)
def spatial_query(self, params={}, ddoc='test'):
bucket = self.default_bucket_name
if not 'stale' in params:
params['stale'] = 'false'
return self.rest.query_view(self.testname, self.testname, bucket,
params, type="spatial")
示例9: ViewMergingTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import query_view [as 别名]
#.........这里部分代码省略.........
def init_clients(self):
"""Initialise clients for all servers there are vBuckets on
It returns a dict with 'ip:port' as key (this information is also
stored this way in every vBucket in the `master` property) and
the MemcachedClient as the value
"""
clients = {}
for vbucket in self.bucket.vbuckets:
if vbucket.master not in clients:
ip, port = vbucket.master.split(":")
clients[vbucket.master] = MemcachedClient(ip, int(port))
return clients
def populate_alternated(self, num_vbuckets, docs):
"""Every vBucket gets a doc first
Populating the vBuckets alternated means that every vBucket gets
a document first, before it receives the second one and so on.
For example if we have 6 documents named doc-1 ... doc-6 and 3
vBuckets the result will be:
vbucket-1: doc-1, doc-4
vbucket-2: doc-2, doc-5
vbucket-3: doc-3, doc-6
"""
for i, doc in enumerate(docs):
self.insert_into_vbucket(i % num_vbuckets, doc)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
def populate_sequenced(self, num_vbuckets, docs):
"""vBuckets get filled up one by one
Populating the vBuckets sequenced means that the vBucket gets
a certain number of documents, before the next one gets some.
For example if we have 6 documents named doc-1 ... doc-6 and 3
vBuckets the result will be:
vbucket-1: doc-1, doc-2
vbucket-2: doc-3, doc-4
vbucket-3: doc-5, doc-5
"""
docs_per_vbucket = len(docs) / num_vbuckets
for vbucket in range(num_vbuckets):
start = vbucket * docs_per_vbucket
end = start + docs_per_vbucket
for doc in docs[start:end]:
self.insert_into_vbucket(vbucket, doc)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
def insert_into_vbucket(self, vbucket_id, doc):
"""Insert a document into a certain vBucket
The memcached clients must already been initialised in the
self.clients property.
"""
vbucket = self.bucket.vbuckets[vbucket_id]
client = self.clients[vbucket.master]
client.set(doc["json"]["key"], 0, 0, json.dumps(doc["json"]["body"]).encode("ascii", "ignore"), vbucket_id)
@staticmethod
def make_docs(start, end):
"""Create documents
`key` will be used as a key and won't end up in the final
document body.
`body` will be used as the document body
"""
docs = []
for i in range(start, end):
doc = {"key": str(i), "body": {"integer": i, "string": str(i)}}
docs.append({"meta": {"id": str(i)}, "json": doc})
return docs
def merged_query(self, view_name, params={}, ddoc="test"):
params["full_set"] = "true"
bucket = self.default_bucket_name
if not "stale" in params:
params["stale"] = "false"
ddoc = ("", "dev_")[self.is_dev_view] + ddoc
return self.rest.query_view(ddoc, view_name, bucket, params)
def verify_keys_are_sorted(self, results, desc=False):
current_keys = [row["key"] for row in results["rows"]]
self.assertTrue(ViewMergingTests._verify_list_is_sorted(current_keys, desc=desc), "keys are not sorted")
self.log.info("rows are sorted by key")
@staticmethod
def _verify_list_is_sorted(keys, key=lambda x: x, desc=False):
if desc:
return all([key(keys[i]) >= key(keys[i + 1]) for i in xrange(len(keys) - 1)])
else:
return all([key(keys[i]) <= key(keys[i + 1]) for i in xrange(len(keys) - 1)])