当前位置: 首页>>代码示例>>Java>>正文


Java Consumer.assign方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.Consumer.assign方法的典型用法代码示例。如果您正苦于以下问题:Java Consumer.assign方法的具体用法?Java Consumer.assign怎么用?Java Consumer.assign使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.Consumer的用法示例。


在下文中一共展示了Consumer.assign方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createConsumer

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
/**
 * createConsumer - create a new consumer
 * @return
 * @throws Exception
 */
private Consumer<String, String> createConsumer() throws Exception {
    Properties props = ConfUtils.getProps(CONSUMER_PROPS);
    Consumer<String, String> consumer = new KafkaConsumer<>(props);
   
    // Seek to end automatically
    List<TopicPartition> pts = topics.stream().map(s -> new TopicPartition(s, 0)).collect(Collectors.toList());
    consumer.assign(pts);
    if(rollBack==0){
        consumer.seekToEnd(pts);  
    }else{
        for (TopicPartition topicPartition : pts) {
            consumer.seek(topicPartition, consumer.position(topicPartition)-rollBack);
            logger.info("Consumer seeked to -500000 :"+consumer.position(topicPartition));
        }    
    }  
    return consumer;
}
 
开发者ID:BriData,项目名称:DBus,代码行数:23,代码来源:FullPullerPerfChecker.java

示例2: createConsumer

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
/**
 * createConsumer - create a new consumer
 * @return
 * @throws Exception
 */
private Consumer<String, String> createConsumer() throws Exception {

    // Seek to end automatically
    TopicPartition dataTopicPartition = new TopicPartition(topicName, 0);
    List<TopicPartition> topics = Arrays.asList(dataTopicPartition);

    Properties props = ConfUtils.getProps(CONSUMER_PROPS);
    Consumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.assign(topics);

    if(offset == -1){
        consumer.seekToEnd(topics);
        logger.info("Consumer seek to end");
    }else{
        consumer.seek(dataTopicPartition, offset);
        logger.info(String.format("read changed as offset: %s", consumer.position(dataTopicPartition)));
    }
    return consumer;
}
 
开发者ID:BriData,项目名称:DBus,代码行数:25,代码来源:KafkaReader.java

示例3: NakadiKafkaConsumer

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
public NakadiKafkaConsumer(
        final Consumer<byte[], byte[]> kafkaConsumer,
        final List<KafkaCursor> kafkaCursors,
        final Map<TopicPartition, Timeline> timelineMap,
        final long pollTimeout) {
    this.kafkaConsumer = kafkaConsumer;
    this.pollTimeout = pollTimeout;
    this.timelineMap = timelineMap;
    // define topic/partitions to consume from
    final Map<TopicPartition, KafkaCursor> topicCursors = kafkaCursors.stream().collect(
            Collectors.toMap(
                    cursor -> new TopicPartition(cursor.getTopic(), cursor.getPartition()),
                    cursor -> cursor,
                    (cursor1, cursor2) -> cursor2
            ));
    kafkaConsumer.assign(new ArrayList<>(topicCursors.keySet()));
    topicCursors.forEach((topicPartition, cursor) -> kafkaConsumer.seek(topicPartition, cursor.getOffset()));
}
 
开发者ID:zalando,项目名称:nakadi,代码行数:19,代码来源:NakadiKafkaConsumer.java

示例4: createConsumer

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
public static Consumer<String, byte[]> createConsumer(Properties props, String subscribeTopic) throws Exception {
    TopicPartition topicPartition = new TopicPartition(subscribeTopic, 0);
    List<TopicPartition> topicPartitions = Arrays.asList(topicPartition);   
    Consumer<String, byte[]> consumer = new KafkaConsumer<>(props);
    // consumer.subscribe(Arrays.asList(subscribeTopics.split(",")));
    consumer.assign(topicPartitions);
    // consumer都是在topo启动时创建。当Topo重启,目前的策略是对于kafka中未处理的msg放弃。不再消费。所以seek to end。
    consumer.seekToEnd(topicPartitions);
    return consumer;
}
 
开发者ID:BriData,项目名称:DBus,代码行数:11,代码来源:DbusHelper.java

示例5: CommandStore

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
public CommandStore(
    String commandTopic,
    Consumer<CommandId, Command> commandConsumer,
    Producer<CommandId, Command> commandProducer,
    CommandIdAssigner commandIdAssigner) {
    this.commandTopic = commandTopic;
    this.commandConsumer = commandConsumer;
    this.commandProducer = commandProducer;
    this.commandIdAssigner = commandIdAssigner;

    commandConsumer.assign(Collections.singleton(new TopicPartition(commandTopic, 0)));

    closed = new AtomicBoolean(false);
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:15,代码来源:CommandStore.java

示例6: ProcessorTopologyTestDriver

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
/**
 * Create a new test driver instance.
 * @param config the stream configuration for the topology
 * @param builder the topology builder that will be used to create the topology instance
 */
public ProcessorTopologyTestDriver(final StreamsConfig config,
                                   final TopologyBuilder builder) {
    topology = builder.setApplicationId(APPLICATION_ID).build(null);
    final ProcessorTopology globalTopology  = builder.buildGlobalStateTopology();

    // Set up the consumer and producer ...
    final Consumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
    final Serializer<byte[]> bytesSerializer = new ByteArraySerializer();
    producer = new MockProducer<byte[], byte[]>(true, bytesSerializer, bytesSerializer) {
        @Override
        public List<PartitionInfo> partitionsFor(final String topic) {
            return Collections.singletonList(new PartitionInfo(topic, PARTITION_ID, null, null, null));
        }
    };

    // Identify internal topics for forwarding in process ...
    for (final TopologyBuilder.TopicsInfo topicsInfo : builder.topicGroups().values()) {
        internalTopics.addAll(topicsInfo.repartitionSourceTopics.keySet());
    }

    // Set up all of the topic+partition information and subscribe the consumer to each ...
    for (final String topic : topology.sourceTopics()) {
        final TopicPartition tp = new TopicPartition(topic, PARTITION_ID);
        partitionsByTopic.put(topic, tp);
        offsetsByTopicPartition.put(tp, new AtomicLong());
    }

    consumer.assign(offsetsByTopicPartition.keySet());

    final StateDirectory stateDirectory = new StateDirectory(APPLICATION_ID, TestUtils.tempDirectory().getPath(), Time.SYSTEM);
    final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
    final ThreadCache cache = new ThreadCache("mock", 1024 * 1024, streamsMetrics);

    if (globalTopology != null) {
        final MockConsumer<byte[], byte[]> globalConsumer = createGlobalConsumer();
        for (final String topicName : globalTopology.sourceTopics()) {
            final List<PartitionInfo> partitionInfos = new ArrayList<>();
            partitionInfos.add(new PartitionInfo(topicName, 1, null, null, null));
            globalConsumer.updatePartitions(topicName, partitionInfos);
            final TopicPartition partition = new TopicPartition(topicName, 1);
            globalConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
            globalPartitionsByTopic.put(topicName, partition);
            offsetsByTopicPartition.put(partition, new AtomicLong());
        }
        final GlobalStateManagerImpl stateManager = new GlobalStateManagerImpl(globalTopology, globalConsumer, stateDirectory);
        globalStateTask = new GlobalStateUpdateTask(globalTopology,
                                                    new GlobalProcessorContextImpl(config, stateManager, streamsMetrics, cache),
                                                    stateManager
        );
        globalStateTask.initialize();
    }

    if (!partitionsByTopic.isEmpty()) {
        task = new StreamTask(TASK_ID,
                              APPLICATION_ID,
                              partitionsByTopic.values(),
                              topology,
                              consumer,
                              new StoreChangelogReader(
                                  createRestoreConsumer(topology.storeToChangelogTopic()),
                                  Time.SYSTEM,
                                  5000),
                              config,
                              streamsMetrics, stateDirectory,
                              cache,
                              new MockTime(),
                              producer);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:75,代码来源:ProcessorTopologyTestDriver.java


注:本文中的org.apache.kafka.clients.consumer.Consumer.assign方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。