当前位置: 首页>>代码示例>>Python>>正文


Python pykafka.KafkaClient方法代码示例

本文整理汇总了Python中pykafka.KafkaClient方法的典型用法代码示例。如果您正苦于以下问题:Python pykafka.KafkaClient方法的具体用法?Python pykafka.KafkaClient怎么用?Python pykafka.KafkaClient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pykafka的用法示例。


在下文中一共展示了pykafka.KafkaClient方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: setUpClass

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def setUpClass(cls):
        cls.kafka = get_cluster()
        cls.topic_name = b'test-data'
        cls.kafka.create_topic(cls.topic_name, 3, 2)
        cls.client = KafkaClient(cls.kafka.brokers)
        topic = cls.client.topics[cls.topic_name]
        cls.producer = topic.get_producer(min_queued_messages=1)
        cls.total_messages = 99
        for i in range(cls.total_messages):
            cls.producer.produce("message {}".format(i).encode()) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:12,代码来源:test_partition.py

示例2: setUpClass

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def setUpClass(cls):
        cls.kafka = get_cluster()
        cls.topic_name = uuid4().hex.encode()
        cls.n_partitions = 3
        cls.kafka.create_topic(cls.topic_name, cls.n_partitions, 2)
        cls.client = KafkaClient(cls.kafka.brokers, use_greenlets=cls.USE_GEVENT)
        cls.prod = cls.client.topics[cls.topic_name].get_producer(
            min_queued_messages=1
        )
        for i in range(1000):
            cls.prod.produce('msg {num}'.format(num=i).encode()) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:13,代码来源:test_balancedconsumer.py

示例3: fetch_offsets

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def fetch_offsets(client, topic, offset):
    """Fetch raw offset data from a topic.

    :param client: KafkaClient connected to the cluster.
    :type client:  :class:`pykafka.KafkaClient`
    :param topic:  Name of the topic.
    :type topic:  :class:`pykafka.topic.Topic`
    :param offset: Offset to reset to. Can be earliest, latest or a datetime.
        Using a datetime will reset the offset to the latest message published
        *before* the datetime.
    :type offset: :class:`pykafka.common.OffsetType` or
        :class:`datetime.datetime`
    :returns: {partition_id: :class:`pykafka.protocol.OffsetPartitionResponse`}
    """
    if offset.lower() == 'earliest':
        return topic.earliest_available_offsets()
    elif offset.lower() == 'latest':
        return topic.latest_available_offsets()
    else:
        offset = dt.datetime.strptime(offset, "%Y-%m-%dT%H:%M:%S")
        offset = int(calendar.timegm(offset.utctimetuple())*1000)
        return topic.fetch_offset_limits(offset) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:24,代码来源:kafka_tools.py

示例4: desc_topic

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def desc_topic(client, args):
    """Print detailed information about a topic.

    :param client: KafkaClient connected to the cluster.
    :type client:  :class:`pykafka.KafkaClient`
    :param topic:  Name of the topic.
    :type topic:  :class:`str`
    """
    # Don't auto-create topics.
    if args.topic not in client.topics:
        raise ValueError('Topic {} does not exist.'.format(args.topic))
    topic = client.topics[args.topic]
    print('Topic: {}'.format(topic.name))
    print('Partitions: {}'.format(len(topic.partitions)))
    print('Replicas: {}'.format(len(topic.partitions.values()[0].replicas)))
    print(tabulate.tabulate(
        [(p.id, p.leader.id, [r.id for r in p.replicas], [r.id for r in p.isr])
         for p in topic.partitions.values()],
        headers=['Partition', 'Leader', 'Replicas', 'ISR'],
        numalign='center',
    )) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:23,代码来源:kafka_tools.py

示例5: send_to_kafka

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def send_to_kafka(topic_name, msg):
    kafka_host = get_config('KAFKA_HOST')

    if not kafka_host:
        raise Exception('Unable to get Kafka host address')

    client = KafkaClient(hosts=kafka_host)
    topic = client.topics[topic_name]

    with topic.get_producer(delivery_reports=True) as producer:
        producer.produce(json.dumps(msg, sort_keys=True))

        msg, exc = producer.get_delivery_report(block=True)

        if exc is not None:
            raise exc 
开发者ID:marklit,项目名称:mass-ipv4-whois,代码行数:18,代码来源:tasks.py

示例6: getKafkaTopic

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def getKafkaTopic(kafkaBrokers=["0.0.0.0:9092"], channel=endorser_util.SYS_CHANNEL_ID):
    kafkas = ",".join(kafkaBrokers)
    client = KafkaClient(hosts=kafkas)
    if client.topics == {} and channel is None:
        topic = client.topics[endorser_util.TEST_CHANNEL_ID]
    elif client.topics == {} and channel is not None:
        topic = client.topics[channel]
    elif channel is not None and channel in client.topics:
        topic = client.topics[channel]
    elif channel is None and client.topics != {}:
        topic_list = client.topics.keys()
        topic = client.topics[topic_list[0]]

    # Print brokers in ISR
    print("ISR: {}".format(["kafka{}".format(broker.id) for broker in topic.partitions[0].isr]))
    isr_set = ["kafka{}".format(broker.id) for broker in topic.partitions[0].isr]
    return topic, isr_set 
开发者ID:hyperledger,项目名称:fabric-test,代码行数:19,代码来源:orderer_util.py

示例7: loops

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def loops():
    logger.info(sub_addr)
    logger.info(topic)
    logger.info(request_addr)
    client_msg = KafkaClient(hosts=sub_addr)
    topic_id = client_msg.topics[topic]
    consumer = topic_id.get_simple_consumer(consumer_group=group_id, auto_commit_enable=True,
                                            auto_commit_interval_ms=1, consumer_id=consumer_id)
    logger.info("==========={topic}_consumer_run=================".format(topic=topic))
    while True:
        try:
            for message in consumer:
                if message.value:
                    logger.info("{topic}_consumer--->{message}".format(topic=topic, message=message.value))
                    _send(request_addr, message.value)

        except SocketDisconnectedError as e:
            consumer = topic_id.get_simple_consumer(consumer_group=group_id, auto_commit_enable=True,
                                                    auto_commit_interval_ms=1, consumer_id=consumer_id)
            logger.error("{topic}_connect_again.....".format(topic=topic))

        logger.debug("{topic}_loop_run".format(topic=topic)) 
开发者ID:bufubaoni,项目名称:antitools,代码行数:24,代码来源:consumer.py

示例8: write_partition_to_kafka

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def write_partition_to_kafka(df, zookeeper_hosts, kafka_topic):
    """Write a partition of a dataframe to Kafka.
    This runs in the worker proceses."""
    # We must start our own logging for this worker process.
    # We will also see PyKafka messages in this log.
    logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
    logger = logging.getLogger('write_partition_to_kafka')
    logger.info('BEGIN')
    client = KafkaClient(zookeeper_hosts=zookeeper_hosts)
    topic = client.topics[kafka_topic]
    # Note that if we used the default linger_ms=5000, there would be a minimum delay of 10 seconds to
    # complete this function. With linger_ms=1, the delay is 5-10 milliseconds.
    with topic.get_producer(delivery_reports=False, linger_ms=10.0) as producer:
        logger.info('Kafka client connected')
        for row in df:
            msg = json.dumps(row.asDict())
            producer.produce(msg)
        logger.info('Produced all messages')
    logger.info('END') 
开发者ID:claudiofahey,项目名称:global_anomaly_detection_demo,代码行数:21,代码来源:spark_streaming_processor.py

示例9: pykafka_producer_performance

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def pykafka_producer_performance(use_rdkafka=False,topic=topic):

    # Setup client
    client = KafkaClient(hosts=bootstrap_servers)
    topic = client.topics[topic.encode('UTF-8')]
    producer = topic.get_producer(use_rdkafka=use_rdkafka)
    print("\n>>> Connect Kafka in {} by pykafka as producer".
          format(bootstrap_servers))

    msgs_produced = 0
    produce_start = time.time()
    for i in range(msg_count):
        # Start producing
        producer.produce(msg_payload)

    producer.stop() # Will flush background queue
    print("produce {} message".format(msg_count))
    return time.time() - produce_start 
开发者ID:sucitw,项目名称:benchmark-python-client-for-kafka,代码行数:20,代码来源:pykafka.py

示例10: pykafka_consumer_performance

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def pykafka_consumer_performance(use_rdkafka=False, topic=topic):
    # Setup client
    client = KafkaClient(hosts=bootstrap_servers)
    topic = client.topics[topic.encode('UTF-8')]
    print("\n>>> Connect Kafka in {} by pykafka as consumer".
          format(bootstrap_servers))

    msg_consumed_count = 0

    consumer_start = time.time()
    # Consumer starts polling messages in background thread, need to start timer here
    consumer = topic.get_simple_consumer(use_rdkafka=use_rdkafka)

    while True:
        msg = consumer.consume()
        if msg:
            msg_consumed_count += 1

        if msg_consumed_count >= msg_count:
            break

    consumer_timing = time.time() - consumer_start
    consumer.stop()
    return consumer_timing 
开发者ID:sucitw,项目名称:benchmark-python-client-for-kafka,代码行数:26,代码来源:pykafka.py

示例11: kafka_info

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def kafka_info(Topic = None):
    try:
        kafka_client = KafkaClient(hosts=KAFKA_HOSTS)
        # kafka??
        BROKERS = [kafka_client.brokers[id].host for id in kafka_client.brokers]
        # kafka??
        TOPICS = [t for t in kafka_client.topics]
        if Topic:
            TOPIC = kafka_client.topics[str(Topic)]
            # kafka ISR
            ISR = {id:[host.host for host in TOPIC.partitions[id].isr] for id in TOPIC.partitions}
            #kafka ??
            replicas = {id:[host.host for host in TOPIC.partitions[id].replicas] for id in TOPIC.partitions}
            #kafka LRADER
            LEADER = {id:TOPIC.partitions[id].leader.host for id in TOPIC.partitions}
            # kafka latest_offset
            latest_offset = {id:TOPIC.partitions[id].latest_available_offset() for id in TOPIC.partitions}
            # kafka earliest_offset
            earliest_offset = {id:TOPIC.partitions[id].earliest_available_offset() for id in TOPIC.partitions}
            return render_template('kafka_partition_show.html',Main_Infos=g.main_infos,Topic=Topic,ISR = ISR,replicas=replicas,LEADER=LEADER,latest_offset=latest_offset,earliest_offset=earliest_offset)
        else:
            return render_template('kafka_show.html',Main_Infos=g.main_infos,topics = TOPICS,brokes = BROKERS )
    except Exception as e:
        logging.error(e)
        flash('??????!')
        return render_template('Message_static.html', Main_Infos=g.main_infos) 
开发者ID:wylok,项目名称:opsweb,代码行数:28,代码来源:kafka_info.py

示例12: setUpClass

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def setUpClass(cls):
        cls.kafka = get_cluster()
        cls.client = KafkaClient(cls.kafka.brokers) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:5,代码来源:test_cluster.py

示例13: test_exclude_internal_topics

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def test_exclude_internal_topics(self):
        """Test exclude_internal_topics setting

        See also #277 for a related bug.
        """
        topic_name = b"__starts_with_underscores"
        with self.assertRaises(KeyError):
            topic = self.client.topics[topic_name]

        client = KafkaClient(self.kafka.brokers, exclude_internal_topics=False)
        topic = client.topics[topic_name]
        self.assertTrue(isinstance(topic, Topic)) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:14,代码来源:test_cluster.py

示例14: test_zk_connect

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def test_zk_connect(self):
        """Clusters started with broker lists and zk connect strings should get same brokers"""
        zk_client = KafkaClient(zookeeper_hosts=self.kafka.zookeeper)
        kafka_client = KafkaClient(hosts=self.kafka.brokers)
        zk_brokers = ["{}:{}".format(b.host, b.port)
                      for b in itervalues(zk_client.brokers)]
        kafka_brokers = ["{}:{}".format(b.host, b.port)
                         for b in itervalues(kafka_client.brokers)]
        self.assertEqual(zk_brokers, kafka_brokers) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:11,代码来源:test_cluster.py

示例15: test_ca_only

# 需要导入模块: import pykafka [as 别名]
# 或者: from pykafka import KafkaClient [as 别名]
def test_ca_only(self):
        """Connect with CA cert only (ie no client cert)"""
        config = SslConfig(cafile=self.kafka.certs.root_cert)
        client = KafkaClient(self.kafka.brokers_ssl, ssl_config=config)
        self.roundtrip_test(client) 
开发者ID:sunqb,项目名称:oa_qian,代码行数:7,代码来源:test_ssl.py


注:本文中的pykafka.KafkaClient方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。