本文整理汇总了Python中couchbase.cluster.Cluster类的典型用法代码示例。如果您正苦于以下问题:Python Cluster类的具体用法?Python Cluster怎么用?Python Cluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Cluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_create_delete_similar_views
def test_create_delete_similar_views(self):
ddoc_name_prefix = self.input.param("ddoc_name_prefix", "ddoc")
view_name = self.input.param("view_name", "test_view")
map_fn = 'function (doc) {if(doc.age !== undefined) { emit(doc.age, doc.name);}}'
rest = RestConnection(self.servers[0])
ddocs = [DesignDocument(ddoc_name_prefix + "1", [View(view_name, map_fn,
dev_view=False)],
options={"updateMinChanges":0, "replicaUpdateMinChanges":0}),
DesignDocument(ddoc_name_prefix + "2", [View(view_name, map_fn,
dev_view=True)],
options={"updateMinChanges":0, "replicaUpdateMinChanges":0})]
ViewBaseTests._load_docs(self, self.num_docs, "test_")
for ddoc in ddocs:
results = self.create_ddoc(rest, 'default', ddoc)
try:
cluster = Cluster()
cluster.delete_view(self.servers[0], ddocs[1].name, ddocs[1].views[0])
finally:
cluster.shutdown()
results_new = rest.query_view(ddocs[0].name, ddocs[0].views[0].name, 'default',
{"stale" : "ok", "full_set" : "true"})
self.assertEquals(results.get(u'rows', []), results_new.get(u'rows', []),
"Results returned previosly %s don't match with current %s" % (
results.get(u'rows', []), results_new.get(u'rows', [])))
示例2: create_xattr_data
def create_xattr_data(self, type="system"):
cluster = Cluster('couchbase://'+str(self.master.ip))
authenticator = PasswordAuthenticator(self.username, self.password)
cluster.authenticate(authenticator)
cb = cluster.open_bucket('default')
docs = self.get_meta_ids()
self.log.info("Docs: " + str(docs[0:5]))
xattr_data = []
self.log.info("Adding xattrs to data")
val = 0
for doc in docs:
if type == "system":
rv = cb.mutate_in(doc["id"], SD.upsert('_system1', val, xattr=True, create_parents=True))
xattr_data.append({'_system1': val})
rv = cb.mutate_in(doc["id"], SD.upsert('_system2', {'field1': val, 'field2': val*val}, xattr=True, create_parents=True))
xattr_data.append({'_system2': {'field1': val, 'field2': val*val}})
rv = cb.mutate_in(doc["id"], SD.upsert('_system3', {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}, xattr=True, create_parents=True))
xattr_data.append({'_system3': {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}})
if type == "user":
rv = cb.mutate_in(doc["id"], SD.upsert('user1', val, xattr=True, create_parents=True))
xattr_data.append({'user1': val})
rv = cb.mutate_in(doc["id"], SD.upsert('user2', {'field1': val, 'field2': val*val}, xattr=True, create_parents=True))
xattr_data.append({'user2': {'field1': val, 'field2': val*val}})
rv = cb.mutate_in(doc["id"], SD.upsert('user3', {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}, xattr=True, create_parents=True))
xattr_data.append({'user3': {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}})
val = val + 1
self.log.info("Completed adding " + type + "xattrs to data to " + str(val) + " docs")
return xattr_data
示例3: _createConn
def _createConn(self):
try:
cluster = Cluster(self.connection_string, bucket_class=CouchbaseBucket)
cluster.authenticate(PasswordAuthenticator(self.bucket, 'password'))
self.cb = cluster.open_bucket(self.bucket)
except BucketNotFoundError:
raise
示例4: test_PYCBC_488
def test_PYCBC_488(self):
cluster = Cluster('couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key')
with self.assertRaises(MixedAuthError) as maerr:
cluster.open_bucket("pixels",
password=self.cluster_info.bucket_password)
exception = maerr.exception
self.assertIsInstance(exception, MixedAuthError)
self.assertRegex(exception.message, r'.*CertAuthenticator.*password.*')
示例5: SwapRebalanceBase
class SwapRebalanceBase(unittest.TestCase):
@staticmethod
def common_setup(self):
self.log = logger.Logger.get_logger()
self.cluster_run = False
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
if len(set([server.ip for server in self.servers])) == 1:
ip = rest.get_nodes_self().ip
for server in self.servers:
server.ip = ip
self.cluster_run = True
self.case_number = self.input.param("case_number", 0)
self.replica = self.input.param("replica", 1)
self.keys_count = self.input.param("keys-count", 1000)
self.load_ratio = self.input.param("load-ratio", 1)
self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
self.num_buckets = self.input.param("num-buckets", 1)
self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
self.num_initial_servers = self.input.param("num-initial-servers", 3)
self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
self.do_access = self.input.param("do-access", True)
self.load_started = False
self.loaders = []
try:
# Clear the state from Previous invalid run
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, previous test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
self.log.info("============== SwapRebalanceBase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
SwapRebalanceBase.reset(self)
self.cluster_helper = Cluster()
# Make sure the test is setup correctly
min_servers = int(self.num_initial_servers) + int(self.num_swap)
msg = "minimum {0} nodes required for running swap rebalance"
self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))
self.log.info('picking server : {0} as the master'.format(serverInfo))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
if self.num_buckets > 10:
BaseTestCase.change_max_buckets(self, self.num_buckets)
self.log.info("============== SwapRebalanceBase setup was finished for test #{0} {1} =============="
.format(self.case_number, self._testMethodName))
SwapRebalanceBase._log_start(self)
except Exception, e:
self.cluster_helper.shutdown()
self.fail(e)
示例6: test_PYCBC_489
def test_PYCBC_489(self):
from couchbase.cluster import Cluster
with self.assertRaises(MixedAuthError) as maerr:
cluster = Cluster('couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key')
cb = cluster.open_bucket('pixels', password = 'foo')
cb.upsert('u:king_arthur', {'name': 'Arthur', 'email': '[email protected]', 'interests': ['Holy Grail', 'African Swallows']})
exception = maerr.exception
self.assertIsInstance(exception, MixedAuthError)
self.assertRegex(exception.message, r'.*CertAuthenticator-style.*password.*')
示例7: setUp
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.buckets = []
self.master = self.servers[0]
self.cluster = Cluster()
self.wait_timeout = self.input.param("wait_timeout", 60)
#number of case that is performed from testrunner( increment each time)
self.case_number = self.input.param("case_number", 0)
self.default_bucket = self.input.param("default_bucket", True)
if self.default_bucket:
self.default_bucket_name = "default"
self.standard_buckets = self.input.param("standard_buckets", 0)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
self.num_servers = self.input.param("servers", len(self.servers))
#initial number of items in the cluster
self.nodes_init = self.input.param("nodes_init", 1)
self.num_replicas = self.input.param("replicas", 1)
self.num_items = self.input.param("items", 1000)
self.dgm_run = self.input.param("dgm_run", False)
#max items number to verify in ValidateDataTask, None - verify all
self.max_verify = self.input.param("max_verify", None)
#we don't change consistent_view on server by default
self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
self.log.info("============== basetestcase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
#avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) or self.case_number == 1:
self.tearDown()
self.cluster = Cluster()
if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
#rebalance all nodes into the cluster before each test
self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
elif self.nodes_init > 1:
self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
if self.dgm_run:
self.quota = 256
if self.total_buckets > 0:
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
if self.default_bucket:
self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
num_replicas=self.num_replicas, bucket_size=self.bucket_size))
self._create_sasl_buckets(self.master, self.sasl_buckets)
self._create_standard_buckets(self.master, self.standard_buckets)
self.log.info("============== basetestcase setup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
self._log_start(self)
示例8: _create_cluster
def _create_cluster(self):
connargs = self.make_connargs()
connstr = ConnectionString.parse(str(connargs.pop('connection_string')))
connstr.clear_option('username')
bucket = connstr.bucket
connstr.bucket = None
password = connargs.get('password', '')
# Can I open a new bucket via open_bucket?
cluster = Cluster(connstr, bucket_class=self.factory)
cluster.authenticate(ClassicAuthenticator(buckets={bucket: password},cluster_password=self.cluster_info.admin_password, cluster_username=self.cluster_info.admin_username))
return cluster, bucket
示例9: _create_cluster_clean
def _create_cluster_clean(self, authenticator):
connargs = self.make_connargs()
connstr = ConnectionString.parse(str(connargs.pop('connection_string')))
connstr.clear_option('username')
bucket = connstr.bucket
connstr.bucket = None
password = connargs.get('password', None)
keys_to_skip = authenticator.get_credentials(bucket)['options'].keys()
for entry in keys_to_skip:
connstr.clear_option(entry)
cluster = Cluster(connstr, bucket_class=self.factory)
cluster.authenticate(ClassicAuthenticator(buckets={bucket: password}))
return cluster, bucket
示例10: setUp
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.cluster = Cluster()
self.servers = self.input.servers
self.buckets = {}
self.default_bucket = self.input.param("default_bucket", True)
self.standard_buckets = self.input.param("standard_buckets", 0)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
self.num_servers = self.input.param("servers", len(self.servers))
self.num_replicas = self.input.param("replicas", 1)
self.num_items = self.input.param("items", 1000)
self.dgm_run = self.input.param("dgm_run", False)
if not self.input.param("skip_cleanup", False):
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)
self.quota = self._initialize_nodes(self.cluster, self.servers)
if self.dgm_run:
self.quota = 256
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
if self.default_bucket:
self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
self.buckets['default'] = {1 : KVStore()}
self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
示例11: create_connections
def create_connections(self):
"""
Create bucket connections. 5 bucket connections are created per instance.
:return: Nothing
"""
for i in range(0, self.threads):
if self.cb_version > '5':
cluster = Cluster(self.spec)
auth = PasswordAuthenticator(self.user, self.password)
cluster.authenticate(auth)
bucket = cluster.open_bucket(self.bucket_name, lockmode=LOCKMODE_WAIT)
bucket.timeout = self.timeout
self.connections.append(bucket)
else:
bucket = Bucket('{0}/{1}'.format(self.spec, self.bucket_name), lockmode=LOCKMODE_WAIT)
bucket.timeout = self.timeout
self.connections.append(bucket)
示例12: wait_for_replication
def wait_for_replication(servers, cluster_helper=None, timeout=600):
if cluster_helper is None:
cluster = Cluster()
else:
cluster = cluster_helper
tasks = []
rest = RestConnection(servers[0])
buckets = rest.get_buckets()
for server in servers:
for bucket in buckets:
for server_repl in list(set(servers) - set([server])):
tasks.append(
cluster.async_wait_for_stats(
[server],
bucket,
"tap",
"eq_tapq:[email protected]" + server_repl.ip + ":idle",
"==",
"true",
)
)
tasks.append(
cluster.async_wait_for_stats(
[server],
bucket,
"tap",
"eq_tapq:[email protected]" + server_repl.ip + ":backfill_completed",
"==",
"true",
)
)
try:
for task in tasks:
task.result(timeout)
finally:
if cluster_helper is None:
# stop all newly created task manager threads
cluster.shutdown()
return True
示例13: setUp
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.cluster = Cluster()
self.servers = self.input.servers
self.buckets = {}
self.wait_timeout = self.input.param("wait_timeout", 60)
#number of case that is performed from testrunner( increment each time)
self.case_number = self.input.param("case_number", 0)
self.default_bucket = self.input.param("default_bucket", True)
if self.default_bucket:
self.default_bucket_name = "default"
self.standard_buckets = self.input.param("standard_buckets", 0)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
self.num_servers = self.input.param("servers", len(self.servers))
self.num_replicas = self.input.param("replicas", 1)
self.num_items = self.input.param("items", 1000)
self.dgm_run = self.input.param("dgm_run", False)
self.log.info("============== basetestcase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
#avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) or self.case_number == 1:
self.tearDown()
self.cluster = Cluster()
self.quota = self._initialize_nodes(self.cluster, self.servers)
if self.dgm_run:
self.quota = 256
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
if self.default_bucket:
self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
self.buckets[self.default_bucket_name] = {1 : KVStore()}
self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
self.log.info("============== basetestcase setup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
# TODO (Mike): Create Standard buckets
self._log_start(self)
示例14: _createConn
def _createConn(self):
try:
cluster = Cluster(self.connection_string, bucket_class=CouchbaseBucket)
cluster.authenticate(PasswordAuthenticator(self.bucket, 'password'))
self.cb = cluster.open_bucket(self.bucket)
except BucketNotFoundError:
raise
except AuthError:
# Try using default user created by the tests, if any, in case there is no user with bucket name in the
# cluster.
try:
cluster = Cluster(self.connection_string, bucket_class=CouchbaseBucket)
cluster.authenticate(PasswordAuthenticator("cbadminbucket", 'password'))
self.cb = cluster.open_bucket(self.bucket)
except AuthError:
raise
示例15: common_setup
def common_setup(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
self.case_number = self.input.param("case_number", 0)
# Clear the state from Previous invalid run
rest.stop_rebalance()
self.load_started = False
self.loaders = []
self.log.info("============== SwapRebalanceBase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
SwapRebalanceBase.reset(self)
self.cluster_helper = Cluster()
# Initialize test params
self.replica = self.input.param("replica", 1)
self.keys_count = self.input.param("keys-count", 100000)
self.load_ratio = self.input.param("load-ratio", 1)
self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
self.num_buckets = self.input.param("num-buckets", 1)
self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
self.num_initial_servers = self.input.param("num-initial-servers", 3)
self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
self.skip_cleanup = self.input.param("skip-cleanup", False)
self.do_access = self.input.param("do-access", True)
# Make sure the test is setup correctly
min_servers = int(self.num_initial_servers) + int(self.num_swap)
msg = "minimum {0} nodes required for running swap rebalance"
self.assertTrue(len(self.servers) >= min_servers,
msg=msg.format(min_servers))
self.log.info('picking server : {0} as the master'.format(serverInfo))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
self.log.info("============== SwapRebalanceBase setup was finished for test #{0} {1} =============="
.format(self.case_number, self._testMethodName))
SwapRebalanceBase._log_start(self)