当前位置: 首页>>代码示例>>Python>>正文


Python SimpleConsumer.stop方法代码示例

本文整理汇总了Python中kafka.SimpleConsumer.stop方法的典型用法代码示例。如果您正苦于以下问题:Python SimpleConsumer.stop方法的具体用法?Python SimpleConsumer.stop怎么用?Python SimpleConsumer.stop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.SimpleConsumer的用法示例。


在下文中一共展示了SimpleConsumer.stop方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: assert_message_count

# 需要导入模块: from kafka import SimpleConsumer [as 别名]
# 或者: from kafka.SimpleConsumer import stop [as 别名]
    def assert_message_count(self, topic, check_count, timeout=10,
                             partitions=None, at_least=False):
        hosts = ','.join(['%s:%d' % (broker.host, broker.port)
                          for broker in self.brokers])

        client = SimpleClient(hosts, timeout=2)
        consumer = SimpleConsumer(client, None, topic,
                                  partitions=partitions,
                                  auto_commit=False,
                                  iter_timeout=timeout)

        started_at = time.time()
        pending = -1
        while pending < check_count and (time.time() - started_at < timeout):
            try:
                pending = consumer.pending(partitions)
            except FailedPayloadsError:
                pass
            time.sleep(0.5)

        consumer.stop()
        client.close()

        if pending < check_count:
            self.fail('Too few pending messages: found %d, expected %d' %
                      (pending, check_count))
        elif pending > check_count and not at_least:
            self.fail('Too many pending messages: found %d, expected %d' %
                      (pending, check_count))
        return True
开发者ID:jianbin-wei,项目名称:kafka-python,代码行数:32,代码来源:test_failover_integration.py

示例2: assert_message_count

# 需要导入模块: from kafka import SimpleConsumer [as 别名]
# 或者: from kafka.SimpleConsumer import stop [as 别名]
    def assert_message_count(self, topic, check_count, timeout=10,
                             partitions=None, at_least=False):
        hosts = ','.join(['%s:%d' % (broker.host, broker.port)
                          for broker in self.brokers])

        client = KafkaClient(hosts)
        consumer = SimpleConsumer(client, None, topic,
                                  partitions=partitions,
                                  auto_commit=False,
                                  iter_timeout=timeout)

        started_at = time.time()
        pending = consumer.pending(partitions)

        # Keep checking if it isn't immediately correct, subject to timeout
        while pending < check_count and (time.time() - started_at < timeout):
            pending = consumer.pending(partitions)
            time.sleep(0.5)

        consumer.stop()
        client.close()

        if pending < check_count:
            self.fail('Too few pending messages: found %d, expected %d' %
                      (pending, check_count))
        elif pending > check_count and not at_least:
            self.fail('Too many pending messages: found %d, expected %d' %
                      (pending, check_count))
        return True
开发者ID:249550148,项目名称:kafka-python,代码行数:31,代码来源:test_failover_integration.py

示例3: assert_message_count

# 需要导入模块: from kafka import SimpleConsumer [as 别名]
# 或者: from kafka.SimpleConsumer import stop [as 别名]
    def assert_message_count(self, topic, check_count, timeout=10, partitions=None):
        hosts = ",".join(["%s:%d" % (broker.host, broker.port) for broker in self.brokers])

        client = KafkaClient(hosts)
        group = random_string(10)
        consumer = SimpleConsumer(client, group, topic, partitions=partitions, auto_commit=False, iter_timeout=timeout)

        started_at = time.time()
        pending = consumer.pending(partitions)

        # Keep checking if it isn't immediately correct, subject to timeout
        while pending != check_count and (time.time() - started_at < timeout):
            pending = consumer.pending(partitions)

        consumer.stop()
        client.close()

        self.assertEqual(pending, check_count)
开发者ID:anyway1021,项目名称:incubator-eagle,代码行数:20,代码来源:test_failover_integration.py

示例4: consume_topic

# 需要导入模块: from kafka import SimpleConsumer [as 别名]
# 或者: from kafka.SimpleConsumer import stop [as 别名]
        def consume_topic(callback_url, consumer_group, topic):
            consumer = None
            try:
                consumer = SimpleConsumer(self.kafka, consumer_group, topic, auto_commit=False)
                messages_read = 0

                # we can't read messages infinitely here as we have
                # a lot of topics/subscribers (much more than threadpool size)
                while messages_read < self.max_read_messages_per_cycle:

                    # get one message and monitor the time
                    start = monitoring.start_time_measure()
                    message = consumer.get_message(block=False)
                    ms_elapsed = monitoring.stop_time_measure(start)
                    self.metrics['kafka_read'].add({'topic': topic}, ms_elapsed)

                    # if we don't have messages for this topic/subscriber - quit and give chance to others
                    if message is None:
                        logging.info('No messages for topic: %s and callback: %s, quiting the thread', topic, callback_url)
                        break

                    try:
                        event = json.loads(message.message.value.decode('utf-8'))
                        response_status = self.forward_event(callback_url, event, topic)

                        # if status is success - mark message as consumed by this subscriber
                        if 200 <= response_status < 300:
                            consumer.commit()
                        else:
                            logging.info('Received error response fro consumer: %s', response_status)
                    except:
                        logging.error("Exception while sending event to consumer")
                        logging.error(traceback.format_exc())
                    finally:
                        messages_read += 1
                return messages_read

            except UnknownTopicOrPartitionError:
                logging.error('Adding %s to skip list', topic)
            except:
                logging.exception('failed to create kafka client')
            finally:
                if consumer is not None:
                    consumer.stop()
开发者ID:AlexanderYastrebov,项目名称:nakadi,代码行数:46,代码来源:background.py

示例5: __init__

# 需要导入模块: from kafka import SimpleConsumer [as 别名]
# 或者: from kafka.SimpleConsumer import stop [as 别名]
class KafkaDriver:

    def __init__(self, driver_args, event_loop):
        self.logger = logging.getLogger('KafkaDriver') # possible TODO: get logger from invoker
        self.logger.setLevel(logging.INFO)
        console_log_handler = logging.StreamHandler(sys.stdout)
        self.logger.addHandler(console_log_handler)

        self.logger.info("KafkaDriver initialized; driver_args=%s" % (driver_args))
        self.event_loop = event_loop
        if driver_args is "":
            kafka_server_addr =  "localhost:9092"
        else:
            kafka_server_addr = driver_args
        client_id = "KafkaDriver-%d-%d" % (time.time(), os.getpid()) # generate a unique client ID so that Kafka doesn't confuse us with a different instance
        self.kafka = KafkaClient(kafka_server_addr, client_id=client_id)

        self.queue_name = None
        ## APPEND direction
        self.get_message_stream = None
        # how frequently to add check for messages and (space permitting) to add them to the GET message stream, in seconds
        self.MESSAGE_CHECK_FREQ = 0.010
        # how many message we have sent from various queues
        self.get_message_count = 0
        self.producer = None
        ## GET direction
        self.consumer = None
        self.get_message_count = 0
        self.MAX_KAFKA_REQ_BATCH_MSGS = 200 # most number of messages that we will request from Kafka at a time

    ######## APPEND direction ########

    # called to tell driver of a new stream of appends than are going to come in; these should go to the end of the named queue
    def prepare_for_append_stream(self, queue_name):
        self.logger.info("KafkaDriver prepare_for_append_stream got: queue_name=%s" % (queue_name))
        self.queue_name = str(queue_name)
        self.producer = SimpleProducer(
            self.kafka,
            async=True,
            req_acks=SimpleProducer.ACK_AFTER_LOCAL_WRITE,
            ack_timeout=5000,
            batch_send=True,
            batch_send_every_n= 100,
            batch_send_every_t=1000,
            random_start=True
        )

    def append(self, payload, ttl):
        ttl = int(ttl)
        self.logger.debug("KafkaDriver append got: ttl=%d, payload='%s'" % (ttl, payload))
        try:
            self.producer.send_messages(self.queue_name,payload)
        except UnknownTopicOrPartitionError:
            self.logger.warn("Kafka reports unknown topic or invalid partition number: " + str(sys.exc_info()))
            return 500
        except:
            self.logger.warn("Got exception from kafka-python SimpleProducer:" + str(sys.exc_info()))
            return 500

        # if random.uniform(0,1) < self.FRACTION_MSGS_TO_FAKE_APPEND_ERROR:
        #     self.logger.debug("faking error")
        #     return 400
        return 100

    def cancel_append_stream(self):
        self.logger.info("KafkaDriver cancel_append_stream got called")
        self.producer.stop()
        self.producer = None
        self.queue_name = None

    ######## GET direction ########

    # called to tell driver that a new stream of messages is needed for return to a client.   message_stream_queue is an instance of MessageStream to use to put messages the driver has available as a response to this request.  Other arguments have same meaning as in the Marconi API.
    def init_get_stream(self, get_message_stream, queue_name_spec, starting_marker, echo_requested, include_claimed):
        self.logger.info("KafkaDriver prepare_to_get_messages got: queue_name=%s, echo_requested=%s, include_claimed=%s, starting_marker=%s" % (queue_name_spec,str(echo_requested),str(include_claimed),starting_marker))
        self.logger.info("warning: KafkaDriver ignores echo_requested and include_claimed in GET requests")
        self.consume_group = "cg1" # default consume group
        if len(starting_marker) > 0:
            self.consume_group = starting_marker
        self.logger.info("consume group="+self.consume_group)

        # if the queue name contains "/n"  at the end, we interpret that is referring to partition to read from
        queue_name, partition_part = queue_name_spec.split("/",2)
        if partition_part is None:
            partition = None
        else:
            partition = int(partition_part)
            self.logger.info("limiting topic %s to partition %d" % (queue_name, partition))

        self.get_message_stream = get_message_stream
        self.queue_name = str(queue_name)
        self.consumer = SimpleConsumer(
            client=self.kafka,
            group=self.consume_group,
            topic=self.queue_name,
            partitions=[partition],
            auto_commit=False, # it seems we cannot do any kind of commit when using kafka-pythong 0.9.1 with Kafka versions before 0.8.1 because kafka-python will send a OffsetFetchReqeust (request type 9) or OffsetCommitRequest (request type 8) which is not supported
            fetch_size_bytes= self.MAX_KAFKA_REQ_BATCH_MSGS*4096, # in Marconi,messages can be up to 4k
            iter_timeout=None,
        )
#.........这里部分代码省略.........
开发者ID:openstacker,项目名称:streaming-zaqar-poc,代码行数:103,代码来源:KafkaDriver.py


注:本文中的kafka.SimpleConsumer.stop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。