当前位置: 首页>>代码示例>>Python>>正文


Python KafkaConsumer.unsubscribe方法代码示例

本文整理汇总了Python中kafka.KafkaConsumer.unsubscribe方法的典型用法代码示例。如果您正苦于以下问题:Python KafkaConsumer.unsubscribe方法的具体用法?Python KafkaConsumer.unsubscribe怎么用?Python KafkaConsumer.unsubscribe使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.KafkaConsumer的用法示例。


在下文中一共展示了KafkaConsumer.unsubscribe方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: MessagehubStreamingAdapter

# 需要导入模块: from kafka import KafkaConsumer [as 别名]
# 或者: from kafka.KafkaConsumer import unsubscribe [as 别名]
class MessagehubStreamingAdapter(StreamingDataAdapter):
    def __init__(self, topic, username, password, prod=True):
        # Create a new context using system defaults, disable all but TLS1.2
        context = ssl.create_default_context()
        context.options &= ssl.OP_NO_TLSv1
        context.options &= ssl.OP_NO_TLSv1_1
        conf = {
            'client_id': 'pixieapp.client.id',
            'group_id': 'pixieapp.group',
            'sasl_mechanism': 'PLAIN',
            'security_protocol': 'SASL_SSL',
            'ssl_context': context,
            "bootstrap_servers": [ "kafka0{}-{}.messagehub.services.us-south.bluemix.net:9093".format(i, "prod01" if prod else "stage1") for i in range(1,6)],
            "sasl_plain_username": username,
            "sasl_plain_password": password,
            "auto_offset_reset":"latest"
        }
        self.consumer = KafkaConsumer(**conf)
        self.consumer.subscribe([topic])
        self.schema = {}
        self.sampleDocCount = 0
        
    def close(self):
        self.consumer.unsubscribe()
        self.consumer.close() 
        
    def tryCast(self, value, t):
        def _innerTryCast(value, t):
            try:
                return t(value)
            except:
                return None

        if isinstance(t, tuple):
            for a in t:
                ret = _innerTryCast(value, a)
                if ret is not None:
                    return ret
            return None
        
        return _innerTryCast(value, t)
        
    def inferType(self, value):
        if isinstance(value, string_types):
            value = self.tryCast(value, integer_types) or self.tryCast(value, float) or value
        return "integer" if value.__class__==int else "float" if value.__class__ == float else "string"
        
    def inferSchema(self, eventJSON):
        if self.sampleDocCount > 20:
            return
        for key,value in iteritems(eventJSON):
            if not key in self.schema:
                self.schema[key] = self.inferType(value)
        self.sampleDocCount = self.sampleDocCount + 1 
    
    def doGetNextData(self):
        msgs = []
        msg = self.consumer.poll(1000, max_records=10)
        if msg is not None:
            for topicPartition,records in iteritems(msg):
                for record in records:
                    if record.value is not None:                    
                        jsonValue = json.loads(record.value.decode('utf-8'))
                        self.inferSchema(jsonValue)
                        msgs.append(jsonValue)
        return msgs
    
    def close(self):
        self.consumer.close()
开发者ID:ibm-cds-labs,项目名称:pixiedust,代码行数:71,代码来源:messageHub.py

示例2: cylc_kafka_consumer

# 需要导入模块: from kafka import KafkaConsumer [as 别名]
# 或者: from kafka.KafkaConsumer import unsubscribe [as 别名]
def cylc_kafka_consumer(kafka_server, kafka_topic, group_id, message, debug):
    r"""Look for a matching message in a Kafka topic.

    ARGUMENTS:
     * kafka_server - Kafka server URL, e.g. "localhost:9092".
     * kafka_topic - the Kafka topic to check, e.g. "data-avail".
     * group_id - determines Kafka offset ownership (see below).
     * message - string-ified dict with optional pattern elements (see below).
     * debug - boolean; set by daemon debug mode; prints to suite err log.

    The topic is first consumed from the beginning, then from the previous
    committed offset. If the message is not found by end of topic, commit the
    offset and return (to will try again later). If found, return the result.

    Kafka commits offsets per "consumer group" so the group_id argument
    must be unique per distinct trigger in the suite - this allows each trigger
    to separately consume the topic from the beginning, looking for its own
    messages (otherwise, with shared offsets, one trigger could move the offset
    beyond the messages of another trigger). This goes for successive instances
    of an external-triggered cycling task too, because out-of-order triggering
    could be required sometimes. So this argument should typically be, e.g.:

        group_id=x%(id)s  # id ID of the dependent task

    where "x" is an arbitrary string you can use to change the group name if
    you need to re-run the suite, and the messages, from the start again,
    without re-running the producer suite. Note this also serves to make the
    function signature cycle-point-specific for Cylc even if the message does
    not contain the cycle point (although it probably should).

    The "message" argument is a stringified dict, e.g.:
        {'system': 'prod', 'point': '2025', 'data': '<nwp.*\.nc>'}
    should be represented as:
        "system:prod point:2025 data:<nwp.*\.nc>"

    A match occurs Kafka if all message dict items match, and the result
    returned is the sub-dict of the actual values of items containing
    angle-bracket-delineated regex patterns. E.g. above {'data': 'nwp-2025.nc'}

    """

    consumer = KafkaConsumer(kafka_topic, bootstrap_servers=[kafka_server],
                             value_deserializer=json.loads,
                             consumer_timeout_ms=CONSUMER_TIMEOUT_MS,
                             auto_offset_reset='earliest',
                             group_id=group_id)

    # Construct a dict from the message argument "key1=val1 key2=val2 ...".
    cylc_msg = dict(m.split(':') for m in message.split())

    result = (False, {})
    n_cons = 0
    for kafka_msg in consumer:
        n_cons += 1
        m = _match_msg(cylc_msg, kafka_msg)
        if m:
            result = (True, m)
            break
        # (else consume and compare next message)
    consumer.commit()
    # Unsubscribe before exit, otherwise next call will be slow while
    # Kafka times out waiting for this original consumer connection.
    consumer.unsubscribe()
    if debug:
        if result[0]:
            res = "\n  MATCHED: %s" % result[1]
        else:
            res = "no match."
        LOG.debug('Kafka: "%s" (consumed %d) ... %s', message, n_cons, res)
    return result
开发者ID:dpmatthews,项目名称:cylc,代码行数:72,代码来源:cylc_kafka_consumer.py

示例3: FeedConsumer

# 需要导入模块: from kafka import KafkaConsumer [as 别名]
# 或者: from kafka.KafkaConsumer import unsubscribe [as 别名]

#.........这里部分代码省略.........

        # curb over-optimism
        handler_timeout_ms = min(handler_timeout_ms, 60000)

        try:
            self.cons = KafkaConsumer(
                bootstrap_servers=broker,
                auto_offset_reset=offset,
                enable_auto_commit=self.async_commit,
                auto_commit_interval_ms=commit_every_t_ms,
                group_id=group,
                session_timeout_ms=handler_timeout_ms,
                **kwargs)

        except KafkaTimeoutError as e:
            log.error(
                "[feedconsumer log] INIT KafkaTimeoutError  %s. Please check broker string %s \n",
                str(e), broker)
            raise e
        except Exception as e1:
            log.error("[feedconsumer log] INIT err %s \n", str(e1))
            raise e1

        self.callbacks = {}
        super(FeedConsumer, self).__init__()

    def add_topic(self, topic, todo, parts=None):
        """
        Set the topic/partitions to consume

        todo (callable) : callback for the topic
        NOTE: Callback is for entire topic, if you call this for multiple
        partitions for same topic with diff callbacks, only the last callback
        is retained

        topic : topic to listen to

        parts (list) : tuple of the partitions to listen to

        """
        try:
            self.callbacks[topic] = todo

            if parts is None:
                log.info("[feedconsumer log] : adding topic %s ", topic)
            else:
                raise WoofNotSupported(
                    "manual partition assignement not supported")

            self.cons.subscribe(topics=self.callbacks.keys())
        except Exception as e:
            log.error("[feedconsumer log] add_topic err %s \n", str(e))
            raise e

    def remove_topic(self, topic, parts=None):
        if parts is not None:
            raise WoofNotSupported(
                "manual partition assignement not supported")

        try:
            self.cons.unsubscribe()
            del self.callbacks[topic]
            self.cons.subscribe(topics=self.callbacks.keys())
        except Exception as e:
            log.error("[feedconsumer log] remove_topic err %s \n", str(e))
            raise e

    def create_kill_signal_handler(self):
        def set_stop_signal(signal, frame):
            self.exit_consumer = True

        signal.signal(self.kill_signal, set_stop_signal)

    def check_for_exit_criteria(self):
        if self.exit_consumer:
            self.cons.commit()
            time.sleep(self.wait_time_before_exit)
            exit(0)

    def run(self):
        while True:
            try:
                for m in self.cons:
                    self.callbacks[m.topic](m.key, m.value)
                    # Looks like in kakfa-python,  the generator namely (fetcher._message_generator), stores offset in
                    # the next call to __next__
                    # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/fetcher.py
                    # TODO verfiy

                    # "extra" safely
                    if not self.async_commit:
                        self.cons.commit()

                    self.check_for_exit_criteria()
                self.check_for_exit_criteria()
            except Exception as e:
                log.error(
                    "[feedconsumer log] thread run  err %s ..continuing..\n",
                    str(e))
                time.sleep(1)
开发者ID:goibibo,项目名称:woof,代码行数:104,代码来源:consumer.py


注:本文中的kafka.KafkaConsumer.unsubscribe方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。