当前位置: 首页>>代码示例>>Python>>正文


Python rest_client.RestConnection类代码示例

本文整理汇总了Python中lib.membase.api.rest_client.RestConnection的典型用法代码示例。如果您正苦于以下问题:Python RestConnection类的具体用法?Python RestConnection怎么用?Python RestConnection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了RestConnection类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: execute_statement_on_cbas_via_rest

    def execute_statement_on_cbas_via_rest(self, statement, mode=None, rest=None, timeout=120, client_context_id=None, username=None, password=None):
        """
        Executes a statement on CBAS using the REST API using REST Client
        """
        pretty = "true"
        if not rest:
            rest = RestConnection(self.cbas_node)
        try:
            self.log.info("Running query on cbas: %s"%statement)
            response = rest.execute_statement_on_cbas(statement, mode, pretty,
                                                      timeout, client_context_id, username, password)
            response = json.loads(response)
            if "errors" in response:
                errors = response["errors"]
            else:
                errors = None

            if "results" in response:
                results = response["results"]
            else:
                results = None

            if "handle" in response:
                handle = response["handle"]
            else:
                handle = None
            
            return response["status"], response[
                "metrics"], errors, results, handle

        except Exception,e:
            raise Exception(str(e))
开发者ID:ritamcouchbase,项目名称:testrunner,代码行数:32,代码来源:cbas_base.py

示例2: get_bucket_size

 def get_bucket_size(self, interval=60):
     self._task["bucket_size"] = []
     retries = 0
     nodes_iterator = (node for node in self.nodes)
     node = nodes_iterator.next()
     rest = RestConnection(node)
     while not self._aborted():
         time.sleep(interval)
         log.info("collecting bucket size stats")
         try:
             status, db_size = rest.get_database_disk_size(self.bucket)
             if status:
                 self._task["bucket_size"].append(db_size)
         except IndexError, e:
             retries += 1
             log.error("unable to get bucket size {0}: {1}"
                       .format(self.bucket, e))
             log.warning("retries: {0} of {1}".format(retries, RETRIES))
             if retries == RETRIES:
                 try:
                     node = nodes_iterator.next()
                     rest = RestConnection(node)
                     retries = 0
                 except StopIteration:
                     log.error("no nodes available: stop collecting bucket_size")
                     return
开发者ID:xiejunyi,项目名称:testrunner,代码行数:26,代码来源:stats.py

示例3: measure_indexing_throughput

    def measure_indexing_throughput(self, nodes):
        self._task['indexer_info'] = list()
        indexers = defaultdict(dict)
        while not self._aborted():
            time.sleep(15)  # 15 seconds by default

            # Grab indexer tasks from all nodes
            tasks = list()
            for node in nodes:
                rest = RestConnection(node)
                tasks.extend(filter(lambda t: t['type'] == 'indexer',
                                    rest.active_tasks()))

            # Calculate throughput for every unique PID
            thr = 0
            for task in tasks:
                uiid = task['pid'] + str(task['started_on'])

                changes_delta = \
                    task['changes_done'] - indexers[uiid].get('changes_done', 0)
                time_delta = \
                    task['updated_on'] - indexers[uiid].get('updated_on',
                                                            task['started_on'])
                if time_delta:
                    thr += changes_delta / time_delta
                indexers[uiid]['changes_done'] = task['changes_done']
                indexers[uiid]['updated_on'] = task['updated_on']

            # Average throughput
            self._task['indexer_info'].append({
                'indexing_throughput': thr,
                'timestamp': time.time()
            })
开发者ID:mschoch,项目名称:testrunner,代码行数:33,代码来源:stats.py

示例4: ns_server_stats

    def ns_server_stats(self, interval=60):
        self._task["ns_server_stats"] = []
        self._task["ns_server_stats_system"] = []
        nodes_iterator = (node for node in self.nodes)
        node = nodes_iterator.next()
        retries = 0
        not_null = lambda v: v if v is not None else 0

        rest = RestConnection(node)
        while not self._aborted():
            time.sleep(interval)
            log.info("collecting ns_server_stats")
            try:
                # Bucket stats
                ns_server_stats = rest.fetch_bucket_stats(bucket=self.bucket)
                for key, value in ns_server_stats["op"]["samples"].iteritems():
                    ns_server_stats["op"]["samples"][key] = not_null(value)
                self._task["ns_server_stats"].append(ns_server_stats)
                # System stats
                ns_server_stats_system = rest.fetch_system_stats()
                self._task["ns_server_stats_system"].append(ns_server_stats_system)
            except (ValueError, TypeError), e:
                retries += 1
                log.error("unable to parse json object {0}: {1}".format(node, e))
                log.warning("retries: {0} of {1}".format(retries, RETRIES))
                if retries == RETRIES:
                    try:
                        node = nodes_iterator.next()
                        rest = RestConnection(node)
                        retries = 0
                    except StopIteration:
                        log.error("no nodes available: stop collecting ns_server_stats")
                        return
开发者ID:xiejunyi,项目名称:testrunner,代码行数:33,代码来源:stats.py

示例5: get_and_validate_latest_checkpoint

    def get_and_validate_latest_checkpoint(self):
        rest_con = RestConnection(self.src_master)
        try:
            checkpoint_record = rest_con.get_recent_xdcr_vb_ckpt('default')
            self.log.info("Checkpoint record : {}".format(checkpoint_record))
            self.chkpt_records.append(checkpoint_record)
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {}".format(e))

        failover_uuid = checkpoint_record["failover_uuid"]
        seqno = checkpoint_record["seqno"]

        self.log.info ("Verifying commitopaque/remote failover log ...")
        if seqno != 0:
            self.validate_remote_failover_log(checkpoint_record["target_vb_opaque"]["target_vb_uuid"], checkpoint_record["target_seqno"])
            self.log.info ("Verifying local failover uuid ...")
            local_vb_uuid, _ = self.get_failover_log(self.src_master)
            self.assertTrue((int(failover_uuid) == int(local_vb_uuid)) or
                            (int(failover_uuid) == 0),
                        "local failover_uuid is wrong in checkpoint record! Expected: {0} seen: {1}".
                        format(local_vb_uuid,failover_uuid))
            self.log.info("Checkpoint record verified")
        else:
            self.log.info("Skipping checkpoint record checks for checkpoint-0")
        return True
开发者ID:lichia,项目名称:testrunner,代码行数:25,代码来源:checkpointXDCR.py

示例6: test_maxttl_with_doc_updates

    def test_maxttl_with_doc_updates(self):
        """
        1. Create a bucket with ttl = 60s
        2. Upload 1000 docs with exp = 40s
        3. After 20s, Update docs with exp = 60s
        4. After 40s, run expiry pager again and get item count, must be 1000
        5. After 20s, run expiry pager again and get item count, must be 0
        """
        rest = RestConnection(self.master)
        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=40)

        self.sleep(20, "waiting to update docs with exp=60s...")

        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=60)

        self.sleep(40, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        for bucket in self.buckets:
            items = rest.get_active_key_count(bucket)
            self.log.info("Items: {0}".format(items))
            if items != self.num_items:
                self.fail("FAIL: Docs with updated expiry deleted unexpectedly!")

        self.sleep(20, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        self.sleep(20, "waiting for item count to come down...")
        for bucket in self.buckets:
            items = rest.get_active_key_count(bucket)
            self.log.info("Items: {0}".format(items))
            if items != 0:
                self.fail("FAIL: Docs with updated expiry not deleted after new exp has elapsed!")
开发者ID:arod1987,项目名称:testrunner,代码行数:33,代码来源:expiry_maxttl.py

示例7: test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout

 def test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout(self):
     # Note to Self : Never use SDK's unless you really have to. It is difficult to upgrade or maintain correct
     # sdk versions on the slaves. Scripts will be notoriously unreliable when you run on jenkins slaves.
     num_docs = 10
     values = ['1', '10']
     # create 10 non json docs on source bucket
     gen_load_non_json = JSONNonDocGenerator('non_json_docs', values, start=0, end=num_docs)
     self.cluster.load_gen_docs(self.master, self.src_bucket_name, gen_load_non_json, self.buckets[0].kvs[1],
                                'create', compression=self.sdk_compression)
     # create a function which sleeps for 5 secs and set execution_timeout to 1s
     body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EXECUTION_TIME_MORE_THAN_TIMEOUT,
                                           execution_timeout=1)
     # deploy the function
     self.deploy_function(body)
     # This is intentionally added so that we wait for some mutations to process and we decide none are processed
     self.sleep(60)
     # No docs should be present in dst_bucket as the all the function executions should have timed out
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     exec_timeout_count = 0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         # get sum of all timeout_count
         exec_timeout_count += out[0]["failure_stats"]["timeout_count"]
     # check whether all the function executions timed out and is equal to number of docs created
     if exec_timeout_count != num_docs:
         self.fail("Not all event executions timed out : Expected : {0} Actual : {1}".format(len(keys),
                                                                                             exec_timeout_count))
     self.undeploy_and_delete_function(body)
开发者ID:arod1987,项目名称:testrunner,代码行数:30,代码来源:eventing_negative.py

示例8: customize_xdcr_settings

    def customize_xdcr_settings(self):
        """Set custom XDCR environment variables"""
        max_concurrent_reps_per_doc = self.param('max_concurrent_reps_per_doc', None)
        xdcr_doc_batch_size_kb = self.param('xdcr_doc_batch_size_kb', None)
        xdcr_checkpoint_interval = self.param('xdcr_checkpoint_interval', None)
        xdcr_latency_optimization = self.param('xdcr_latency_optimization', None)

        if max_concurrent_reps_per_doc:
            param = 'xdcrMaxConcurrentReps'
            value = max_concurrent_reps_per_doc
        elif xdcr_doc_batch_size_kb:
            param = 'xdcrDocBatchSizeKb'
            value = xdcr_doc_batch_size_kb
        elif xdcr_checkpoint_interval:
            param = 'xdcrCheckpointInterval'
            value = xdcr_checkpoint_interval
        else:
            return

        self.log.info("changing {0} to {1}".format(param, value))

        for servers in self.input.clusters.values():
            rest_conn = RestConnection(servers[0])
            replications = rest_conn.get_replications()
            for repl in replications:
                src_bucket = repl.get_src_bucket()
                dst_bucket = repl.get_dest_bucket()
                rest_conn.set_xdcr_param(src_bucket.name, dst_bucket.name, param, value)
开发者ID:EricACooper,项目名称:testrunner,代码行数:28,代码来源:perf.py

示例9: start_replication

    def start_replication(self, master, slave, replication_type='continuous',
                          buckets=None, bidir=False, suffix='A'):
        """Add remote cluster and start replication"""

        master_rest_conn = RestConnection(master)
        remote_reference = 'remote_cluster_' + suffix

        master_rest_conn.add_remote_cluster(slave.ip, slave.port,
                                            slave.rest_username,
                                            slave.rest_password,
                                            remote_reference)

        if not buckets:
            buckets = self.get_buckets()
        else:
            buckets = self.get_buckets(reversed=True)

        for bucket in buckets:
            master_rest_conn.start_replication(replication_type, bucket,
                                               remote_reference)

        if self.parami('num_buckets', 1) > 1 and suffix == 'A':
            self.start_replication(slave, master, replication_type, buckets,
                                   suffix='B')

        if bidir:
            self.start_replication(slave, master, replication_type, buckets,
                                   suffix='B')
开发者ID:ronniedada,项目名称:testrunner,代码行数:28,代码来源:iperf.py

示例10: test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout

 def test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout(self):
     keys = ['customer123', 'customer1234', 'customer12345']
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
     bucket = Bucket(url, username="cbadminbucket", password="password")
     for doc_id in keys:
         bucket.upsert(doc_id, {'name' : doc_id})
     # create a function which sleeps for 5 secs and set execution_timeout to 1s
     body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EXECUTION_TIME_MORE_THAN_TIMEOUT,
                                           execution_timeout=1)
     # deploy the function
     self.deploy_function(body)
     # This is intentionally added so that we wait for some mutations to process and we decide none are processed
     self.sleep(60)
     # No docs should be present in dst_bucket as the all the function executions should have timed out
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     exec_timeout_count = 0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         # get sum of all timeout_count
         exec_timeout_count += out[0]["failure_stats"]["timeout_count"]
     # check whether all the function executions timed out and is equal to number of docs created
     if exec_timeout_count != len(keys):
         self.fail("Not all event executions timed out : Expected : {0} Actual : {1}".format(len(keys),
                                                                                             exec_timeout_count))
     self.undeploy_and_delete_function(body)
开发者ID:membase,项目名称:testrunner,代码行数:27,代码来源:eventing_negative.py

示例11: get_and_validate_latest_checkpoint

    def get_and_validate_latest_checkpoint(self):
        rest_con = RestConnection(self.src_master)
        try:
            checkpoint_record = rest_con.get_recent_xdcr_vb_ckpt('default', 'default', '0')
            self.log.info("Checkpoint record : {}".format(checkpoint_record))
            self.chkpt_records.append(checkpoint_record)
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {}".format(e))

        commit_opaque = checkpoint_record["commitopaque"]
        failover_uuid = checkpoint_record["failover_uuid"]

        upr_snap_seqno = checkpoint_record["upr_snapshot_seqno"]
        seqno = checkpoint_record["seqno"]
        start_time = checkpoint_record["start_time"]
        total_docs_checked = checkpoint_record["total_docs_checked"]
        total_docs_written = checkpoint_record["total_docs_written"]
        total_data_repl = checkpoint_record["total_data_replicated"]
        end_time = checkpoint_record["end_time"]

        self.log.info ("Verifying commitopaque/remote failover log ...")
        if seqno != 0:
            self.validate_remote_failover_log(commit_opaque[0], commit_opaque[1])
            self.log.info ("Verifying last checkpointed seqno ...")
            self.validate_last_checkpointed_seqno(int(seqno))
            self.log.info ("Verifying local failover uuid ...")
            local_vb_uuid, _ = self.get_failover_log(self.src_master)
            self.assertTrue(int(local_vb_uuid) == int(failover_uuid),
                        "local failover_uuid is wrong in checkpoint record! Expected: {0} seen: {1}".
                        format(local_vb_uuid,failover_uuid))
            self.log.info("Checkpoint record verified")
        else:
            self.log.info("Skipping checkpoint record checks for checkpoint-0")
        return True
开发者ID:uvenum,项目名称:testrunner,代码行数:34,代码来源:checkpointXDCR.py

示例12: print_eventing_stats_from_all_eventing_nodes

 def print_eventing_stats_from_all_eventing_nodes(self):
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                   indent=4)))
开发者ID:arod1987,项目名称:testrunner,代码行数:7,代码来源:eventing_base.py

示例13: test_gsi_with_flush_bucket_redaction_enabled

    def test_gsi_with_flush_bucket_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True, buckets=self.buckets, full_docs_list=full_docs_list,
                                      log=log, input=self.input, master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
        query_definitions = query_definition_generator.filter_by_group("all", query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        rest = RestConnection(self.master)
        rest.flush_bucket(self.buckets[0].name)

        self.sleep(10)
        self.load(gen_docs, buckets=[self.buckets[0]])

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["[email protected]" + str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
开发者ID:arod1987,项目名称:testrunner,代码行数:59,代码来源:log_redaction_tests.py

示例14: wait_for_xdc_replication

 def wait_for_xdc_replication(self):
     rest = RestConnection(self.input.servers[0])
     bucket = self.param('bucket', 'default')
     while True:  # we have to wait at least once
         print "Waiting for XDC replication to finish"
         time.sleep(15)
         if not rest.get_xdc_queue_size(bucket):
             break
开发者ID:mschoch,项目名称:testrunner,代码行数:8,代码来源:iperf.py

示例15: print_go_routine_dump_from_all_eventing_nodes

 def print_go_routine_dump_from_all_eventing_nodes(self):
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_eventing_go_routine_dumps()
         log.info("Go routine dumps for Node {0} is \n{1} ======================================================"
                  "============================================================================================="
                  "\n\n".format(eventing_node.ip, out))
开发者ID:arod1987,项目名称:testrunner,代码行数:8,代码来源:eventing_base.py


注:本文中的lib.membase.api.rest_client.RestConnection类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。