本文整理汇总了Python中membase.api.rest_client.RestConnection类的典型用法代码示例。如果您正苦于以下问题:Python RestConnection类的具体用法?Python RestConnection怎么用?Python RestConnection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RestConnection类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pick_node
def pick_node(master):
rest = RestConnection(master)
nodes = rest.node_statuses()
node_picked = None
nodes_on_same_ip = True
firstIp = nodes[0].ip
for node in nodes:
if node.ip != firstIp:
nodes_on_same_ip = False
break
for node in nodes:
node_picked = node
if not nodes_on_same_ip:
if node_picked.ip != master.ip:
log.info("Picked node ... {0}:{1}".format(node_picked.ip, node_picked.port))
break
else:
# temp fix - port numbers of master(machine ip and localhost: 9000 match
if int(node_picked.port) == int(master.port):
log.info(
"Not picking the master node {0}:{1}.. try again...".format(node_picked.ip, node_picked.port)
)
else:
log.info("Picked node {0}:{1}".format(node_picked.ip, node_picked.port))
break
return node_picked
示例2: _verify_zone
def _verify_zone(self, name):
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
if rest.is_zone_exist(name.strip()):
self.log.info("verified! zone '{0}' is existed".format(name.strip()))
else:
raise Exception("There is not zone with name: %s in cluster" % name)
示例3: test_change_mem_quota_when_index_building
def test_change_mem_quota_when_index_building(self):
rest = RestConnection(self.oomServer)
log.info("Setting indexer memory quota to 700 MB...")
rest.set_indexer_memoryQuota(indexMemoryQuota=700)
self.sleep(30)
query_definitions = []
for x in range(3):
index_name = "index_"+str(x)
query_definition = QueryDefinition(index_name=index_name, index_fields = ["job_title"],
query_template = self.query_template, groups = ["simple"])
query_definitions.append(query_definition)
create_tasks = []
build_tasks = []
index_info = {}
for bucket in self.buckets:
if not bucket in index_info.keys():
index_info[bucket] = []
for query_definition in query_definitions:
index_info[bucket].append(query_definition.index_name)
task = self.async_create_index(bucket.name, query_definition)
create_tasks.append(task)
for task in create_tasks:
task.result()
if self.defer_build:
log.info("Building Indexes...")
for key, val in index_info.iteritems():
task = self.async_build_index(bucket=key, index_list=val)
build_tasks.append(task)
self.sleep(10)
log.info("Setting indexer memory quota to 500 MB...")
rest.set_indexer_memoryQuota(indexMemoryQuota=500)
self.sleep(30)
for task in build_tasks:
task.result()
示例4: rebalance_in_out_at_once_persistence_stopped
def rebalance_in_out_at_once_persistence_stopped(self):
num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
rest = RestConnection(self.master)
self._wait_for_stats_all_buckets(servs_init)
for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
shell = RemoteMachineShellConnection(server)
for bucket in self.buckets:
shell.execute_cbepctl(bucket, "stop", "", "", "")
self.sleep(5)
self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
, end=self.num_items / 2 + self.num_items_without_persistence)
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
self.log.info("adding nodes {0} to cluster".format(servs_in))
self.log.info("removing nodes {0} from cluster".format(servs_out))
tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
result_nodes = set(servs_init + servs_in) - set(servs_out)
# wait timeout in 60 min because MB-7386 rebalance stuck
self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
for task in tasks:
task.result()
self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
self._wait_for_stats_all_buckets(servs_in)
self._verify_all_buckets(self.master, timeout=None)
self._verify_stats_all_buckets(result_nodes)
#verify that curr_items_tot corresponds to sum of curr_items from all nodes
verified = True
for bucket in self.buckets:
verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
示例5: test_delete_empty_defautl_zone
def test_delete_empty_defautl_zone(self):
zone_name ="test1"
default_zone = "Group 1"
moved_node = []
serverInfo = self.servers[0]
moved_node.append(serverInfo.ip)
rest = RestConnection(serverInfo)
try:
self.log.info("create zone {0}".format(zone_name))
rest.add_zone(zone_name)
if rest.is_zone_exist(zone_name):
self.log.info("Move node {0} from zone {1} to zone {2}" \
.format(moved_node, default_zone, zone_name))
status = rest.shuffle_nodes_in_zones(moved_node, default_zone, zone_name)
if status:
rest.delete_zone(default_zone)
else:
self.fail("Failed to move node {0} from zone {1} to zone {2}" \
.format(moved_node, default_zone, zone_name))
if not rest.is_zone_exist(default_zone):
self.log.info("successful delete default zone")
else:
raise Exception("Failed to delete default zone")
rest.rename_zone(zone_name, default_zone)
except Exception,e :
print e
示例6: test_rotateInterval
def test_rotateInterval(self):
intervalSec = self.input.param("intervalSec", None)
auditIns = audit(host=self.master)
rest = RestConnection(self.master)
originalInt = auditIns.getAuditRotateInterval()
try:
firstEventTime = self.getTimeStampForFile(auditIns)
self.log.info ("first time evetn is {0}".format(firstEventTime))
auditIns.setAuditRotateInterval(intervalSec)
self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
self.sleep(120)
shell = RemoteMachineShellConnection(self.master)
try:
hostname = shell.execute_command("hostname")
archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
self.log.info ("Archive File Name is {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, archiveFile)
self.assertTrue(result, "Archive Audit.log is not created on time interval")
self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
self.assertTrue(result, "Audit.log is not created when memcached server is killed")
finally:
shell.disconnect()
finally:
auditIns.setAuditRotateInterval(originalInt)
示例7: test_folderMisMatchCluster
def test_folderMisMatchCluster(self):
auditIns = audit(host=self.master)
orginalPath = auditIns.getAuditLogPath()
newPath = originalPath + 'testFolderMisMatch'
shell = RemoteMachineShellConnection(self.servers[0])
try:
shell.create_directory(newPath)
command = 'chown couchbase:couchbase ' + newPath
shell.execute_command(command)
finally:
shell.disconnect()
auditIns.setsetAuditLogPath(newPath)
for server in self.servers:
rest = RestConnection(sever)
#Create an Event for Bucket Creation
expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1,
'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
"flush_enabled":False, "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' }
rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
'11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')
#Check on Events
try:
self.checkConfig(self.eventID, self.servers[0], expectedResults)
except:
self.log.info ("Issue reading the file at Node {0}".format(server.ip))
示例8: common_test_body
def common_test_body(self, replica, load_ratio, timeout=10):
log = logger.Logger.get_logger()
start_time = time.time()
log.info("replica : {0}".format(replica))
log.info("load_ratio : {0}".format(load_ratio))
master = self._servers[0]
log.info('picking server : {0} as the master'.format(master))
rest = RestConnection(master)
while time.time() < ( start_time + 60 * timeout):
#rebalance out step nodes
#let's add some items ?
nodes = rest.node_statuses()
delta = len(self._servers) - len(nodes)
if delta > 0:
if delta > 1:
how_many_add = Random().randint(1, delta)
else:
how_many_add = 1
self.log.info("going to add {0} nodes".format(how_many_add))
self.rebalance_in(how_many=how_many_add)
else:
self.log.info("all nodes already joined the cluster")
time.sleep(30 * 60)
#dont rebalance out if there are not too many nodes
if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
nodes = rest.node_statuses()
how_many_out = Random().randint(1, len(nodes) - 1)
self.log.info("going to remove {0} nodes".format(how_many_out))
self.rebalance_out(how_many=how_many_out)
示例9: backup
def backup(self):
while True:
try:
x = self.queue.get_nowait()
self.log.info("get_nowait : {0}".format(x))
break
#things are notmal just do another back aafter
#waiting for self.interval
except Exception:
master = self.servers[0]
rest = RestConnection(master)
nodes = rest.node_statuses()
map = self.node_server_map(nodes, self.servers)
self.log.info("cluster has {0} nodes".format(len(nodes)))
for node in nodes:
try:
from Crypto.Random import atfork
atfork()
BackupHelper(map[node]).backup('default', "/tmp")
BackupHelper(map[node]).backup('default', "/tmp")
except Exception as ex:
print ex
self.log.info("backed up the data into ")
time.sleep(self.interval)
示例10: create_bucket
def create_bucket(serverInfo, name='default', replica=1, port=11210, test_case=None, bucket_ram=-1, password=None):
log = logger.Logger.get_logger()
rest = RestConnection(serverInfo)
if bucket_ram < 0:
info = rest.get_nodes_self()
bucket_ram = info.memoryQuota * 2 / 3
if password == None:
authType = "sasl"
else:
authType = "none"
rest.create_bucket(bucket=name,
ramQuotaMB=bucket_ram,
replicaNumber=replica,
proxyPort=port,
authType=authType,
saslPassword=password)
msg = 'create_bucket succeeded but bucket "{0}" does not exist'
bucket_created = BucketOperationHelper.wait_for_bucket_creation(name, rest)
if not bucket_created:
log.error(msg)
if test_case:
test_case.fail(msg=msg.format(name))
return bucket_created
示例11: create_primary_index_for_3_0_and_greater
def create_primary_index_for_3_0_and_greater(self):
self.log.info("CREATE PRIMARY INDEX using %s" % self.primary_indx_type)
rest = RestConnection(self.master)
versions = rest.get_nodes_versions()
if versions[0].startswith("4") or versions[0].startswith("3") or versions[0].startswith("5"):
for bucket in self.buckets:
if self.primary_indx_drop:
self.log.info("Dropping primary index for %s using %s ..." % (bucket.name,self.primary_indx_type))
self.query = "DROP PRIMARY INDEX ON %s USING %s" % (bucket.name,self.primary_indx_type)
#self.run_cbq_query()
self.sleep(3, 'Sleep for some time after index drop')
self.query = 'select * from system:indexes where name="#primary" and keyspace_id = "%s"' % bucket.name
res = self.run_cbq_query()
self.sleep(10)
if self.monitoring:
self.query = "delete from system:completed_requests"
self.run_cbq_query()
if not self.skip_primary_index:
if (res['metrics']['resultCount'] == 0):
self.query = "CREATE PRIMARY INDEX ON %s USING %s" % (bucket.name, self.primary_indx_type)
self.log.info("Creating primary index for %s ..." % bucket.name)
try:
self.run_cbq_query()
self.primary_index_created = True
if self.primary_indx_type.lower() == 'gsi':
self._wait_for_index_online(bucket, '#primary')
except Exception, ex:
self.log.info(str(ex))
示例12: _create_buckets
def _create_buckets(self, nodes):
master_node = nodes[0]
num_buckets = 0
if self._default_bucket:
num_buckets += 1
num_buckets += self._sasl_buckets + self._standard_buckets
bucket_size = self._get_bucket_size(master_node, nodes, self._mem_quota_int, num_buckets)
rest = RestConnection(master_node)
master_id = rest.get_nodes_self().id
if self._default_bucket:
if self._default_quota != 0:
bucket_size = self._default_quota
rest = RestConnection(nodes[0])
rest.create_bucket(
bucket=self.default_bucket_name,
ramQuotaMB=bucket_size,
replicaNumber=self._num_replicas,
proxyPort=11211,
authType="none",
saslPassword=None,
)
self._buckets.append(self.default_bucket_name)
if self._sasl_buckets > 0:
if self._sasl_quota != 0:
bucket_size = self._sasl_quota
self._create_sasl_buckets(master_node, master_id, bucket_size, password="password")
if self._standard_buckets > 0:
if self._standard_quota != 0:
bucket_size = self._standard_quota
self._create_standard_buckets(master_node, master_id, bucket_size)
示例13: test_invalidLogPathCluster
def test_invalidLogPathCluster(self):
auditIns = audit(host=self.master)
newPath = auditIns.getAuditLogPath() + 'test'
rest = RestConnection(self.master)
status, content = rest.setAuditSettings(logPath=newPath)
self.assertFalse(status, "Audit is able to set invalid path")
self.assertEqual(content['errors']['logPath'], 'The value must be a valid directory', 'No error or error changed')
示例14: setUp
def setUp(self):
super(RbacTestMemcached, self).setUp()
rest = RestConnection(self.master)
self.auth_type = self.input.param('auth_type','builtin')
self.user_id = self.input.param("user_id",None)
self.user_role = self.input.param("user_role",None)
self.bucket_name = self.input.param("bucket_name",None)
rest.create_bucket(bucket=self.bucket_name, ramQuotaMB=100,lww=True)
self.role_map = self.input.param("role_map",None)
self.incorrect_bucket = self.input.param("incorrect_bucket",False)
self.new_role = self.input.param("new_role",None)
self.new_role_map = self.input.param("new_role_map",None)
self.no_bucket_access = self.input.param("no_bucket_access",False)
self.no_access_bucket_name = self.input.param("no_access_bucket_name","noaccess")
self.all_buckets = self.input.param("all_buckets",None)
self.ldap_users = rbacmain().returnUserList(self.user_id)
if self.no_bucket_access:
rest.create_bucket(bucket=self.no_access_bucket_name, ramQuotaMB=100, lww=True)
if self.auth_type == 'ldap':
rbacmain(self.master, 'builtin')._delete_user('cbadminbucket')
if self.auth_type == 'ldap':
rbacmain().setup_auth_mechanism(self.servers,'ldap',rest)
for user in self.ldap_users:
testuser = [{'id': user[0], 'name': user[0], 'password': user[1]}]
RbacBase().create_user_source(testuser, 'ldap', self.master)
self.sleep(10)
elif self.auth_type == "pam":
rbacmain().setup_auth_mechanism(self.servers,'pam', rest)
rbacmain().add_remove_local_user(self.servers, self.ldap_users, 'deluser')
rbacmain().add_remove_local_user(self.servers, self.ldap_users,'adduser')
elif self.auth_type == "builtin":
for user in self.ldap_users:
testuser = [{'id': user[0], 'name': user[0], 'password': user[1]}]
RbacBase().create_user_source(testuser, 'builtin', self.master)
self.sleep(10)
示例15: test_rotateIntervalCluster
def test_rotateIntervalCluster(self):
intervalSec = self.input.param("intervalSec", None)
nodes_init = self.input.param("nodes_init", 2)
auditIns = audit(host=self.master)
auditIns.setAuditEnable('true')
originalInt = auditIns.getAuditRotateInterval()
auditIns.setAuditRotateInterval(intervalSec)
firstEventTime = []
try:
for i in range(len(self.servers[:nodes_init])):
auditTemp = audit(host=self.servers[i])
firstEventTime.append(self.getTimeStampForFile(auditTemp))
self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
for i in range(len(self.servers[:nodes_init])):
shell = RemoteMachineShellConnection(self.servers[i])
rest = RestConnection(self.servers[i])
status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
self.sleep(120, "sleeping for log file creation")
try:
hostname = shell.execute_command("hostname")
self.log.info ("print firstEventTime {0}".format(firstEventTime[i]))
archiveFile = hostname[0][0] + '-' + firstEventTime[i] + "-audit.log"
self.log.info ("Archive File Name is {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, archiveFile)
self.assertTrue(result, "Archive Audit.log is not created on time interval")
self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
self.assertTrue(result, "Audit.log is not created as per the roll over time specified")
finally:
shell.disconnect()
finally:
auditIns.setAuditRotateInterval(originalInt)