当前位置: 首页>>代码示例>>Java>>正文


Java Consumer.close方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.Consumer.close方法的典型用法代码示例。如果您正苦于以下问题:Java Consumer.close方法的具体用法?Java Consumer.close怎么用?Java Consumer.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.Consumer的用法示例。


在下文中一共展示了Consumer.close方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
@Override
public void run() {
    Consumer<String, Property> consumer = ConsumerFactory.getPropertyConsumer();
    consumer.subscribe(Arrays.asList(Topics.SAVE_PROPERTY));

    try {
        while (true) {
            ConsumerRecords<String, Property> records = consumer.poll(POLL_DELAY);
            LOGGER.log(Level.INFO, "records fetched to persist {0}", records.count());
            for (ConsumerRecord<String, Property> record : records) {
                Property property = record.value();
                propertyService.save(property);
            }
        }
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, null, e);
    } finally {
        consumer.close();
    }
}
 
开发者ID:joaobmonteiro,项目名称:mobclip,代码行数:21,代码来源:PropertyPersister.java

示例2: main

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
public static void main(String[] args) {

		// build consumer
		Properties props = new Properties();
		props.put("bootstrap.servers", "127.0.0.1:9092");
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");		// 配置value的序列化类
		props.setProperty("group.id", "0");				// group 代表一个消费组(可根据之,实现queue队列或者topic广播)
		props.setProperty("enable.auto.commit", "true");
		props.setProperty("auto.offset.reset", "earliest");

		Consumer<String, String> consumer = new KafkaConsumer<String, String>(props);

		// sub msg
		String topic = "demo_topic";
		consumer.subscribe(Arrays.asList(topic));

		for (int i = 0; i < 100; i++) {
			ConsumerRecords<String, String> records = consumer.poll(1000);
			System.out.println(records.count());
			for (ConsumerRecord<String, String> record : records) {
				System.out.println("record = " + record);
			}
		}

		//  close consumer
		consumer.close();

	}
 
开发者ID:xuxueli,项目名称:xxl-incubator,代码行数:30,代码来源:KafkaConsumerTest.java

示例3: testSimpleConsumer

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
@Test
public void testSimpleConsumer() throws Exception {
    String topic = "persistent://sample/standalone/ns/testSimpleConsumer";

    Properties props = new Properties();
    props.put("bootstrap.servers", lookupUrl.toString());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    Consumer<String, String> consumer = new PulsarKafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    Producer pulsarProducer = pulsarClient.createProducer(topic);

    for (int i = 0; i < 10; i++) {
        Message msg = MessageBuilder.create().setKey(Integer.toString(i)).setContent(("hello-" + i).getBytes())
                .build();
        pulsarProducer.send(msg);
    }

    for (int i = 0; i < 10; i++) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        assertEquals(records.count(), 1);
        int idx = i;
        records.forEach(record -> {
            assertEquals(record.key(), Integer.toString(idx));
            assertEquals(record.value(), "hello-" + idx);
        });

        consumer.commitSync();
    }

    consumer.close();
}
 
开发者ID:apache,项目名称:incubator-pulsar,代码行数:37,代码来源:KafkaConsumerTest.java

示例4: run

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
@Override
public void run() {

    Consumer<String, String> consumer = ConsumerFactory.getPageConsumer();
    consumer.subscribe(Arrays.asList(PAGE_EXTRACTOR));

    try {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(POLL_DELAY);
            LOGGER.log(Level.INFO, "records fetched {0}", records.count());

            Producer<String, Property> propertyProducer = ProducerFactory.getPropertyProducer();
            
            for (ConsumerRecord<String, String> record : records) {
                String url = record.value();
                int type = 2;

                List<Property> properties = extractPageData(url, type);
                for (Property property : properties) {
                    propertyProducer.send(new ProducerRecord<>(PAGE_DETAIL_EXTRACTOR, property));
                }
            }
            consumer.commitAsync((Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) -> {
                if (exception != null) {
                    LOGGER.log(Level.SEVERE, null, exception);
                } else {
                    offsets.entrySet().stream()
                            .forEach(entry -> LOGGER.log(Level.INFO, "records commited: partition {0}, offset {1}",
                                    Arrays.asList(entry.getKey().partition(), entry.getValue().offset())));
                }
            });
        }
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, null, e);
    } finally {
        consumer.close();
    }
}
 
开发者ID:joaobmonteiro,项目名称:mobclip,代码行数:39,代码来源:PageExtractor.java

示例5: testSimpleProducerConsumer

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
@Test(timeOut = 30000)
public void testSimpleProducerConsumer() throws Exception {
    String topic = "persistent://sample/standalone/ns/testSimpleProducerConsumer";

    Properties producerProperties = new Properties();
    producerProperties.put("bootstrap.servers", lookupUrl.toString());
    producerProperties.put("key.serializer", IntegerSerializer.class.getName());
    producerProperties.put("value.serializer", StringSerializer.class.getName());
    Producer<Integer, String> producer = new KafkaProducer<>(producerProperties);

    Properties consumerProperties = new Properties();
    consumerProperties.put("bootstrap.servers", lookupUrl.toString());
    consumerProperties.put("group.id", "my-subscription-name");
    consumerProperties.put("key.deserializer", IntegerDeserializer.class.getName());
    consumerProperties.put("value.deserializer", StringDeserializer.class.getName());
    consumerProperties.put("enable.auto.commit", "true");
    Consumer<Integer, String> consumer = new KafkaConsumer<>(consumerProperties);
    consumer.subscribe(Arrays.asList(topic));

    List<Long> offsets = new ArrayList<>();

    for (int i = 0; i < 10; i++) {
        RecordMetadata md = producer.send(new ProducerRecord<Integer, String>(topic, i, "hello-" + i)).get();
        offsets.add(md.offset());
        log.info("Published message at {}", Long.toHexString(md.offset()));
    }

    producer.flush();
    producer.close();

    for (int i = 0; i < 10; i++) {
        ConsumerRecords<Integer, String> records = consumer.poll(1000);
        assertEquals(records.count(), 1);

        int idx = i;
        records.forEach(record -> {
            log.info("Received record: {}", record);
            assertEquals(record.key().intValue(), idx);
            assertEquals(record.value(), "hello-" + idx);
            assertEquals(record.offset(), offsets.get(idx).longValue());
        });
    }

    consumer.close();
}
 
开发者ID:apache,项目名称:incubator-pulsar,代码行数:46,代码来源:KafkaApiTest.java

示例6: read

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
/**
 * Read all JSON messages from the replicate info topic and creates the 
 * latest catalog for PLOG monitor to use for controlling Kafka task 
 * configuration. This is only done during source connector startup,
 * therefore it creates a consumer each time
 * 
 * @return synchronized sorted catalog of replicate info
 * @throws Exception for any consumer errors
 */
public synchronized Map <String, ReplicateInfo> read () throws Exception {
    /* short lived consumer, no need to keep it alive seeing that 
     * it always consumes all messages again, the volume of messages
     * is low and the consumer doesn't need to be coordinated */
    Consumer<String, String> consumer = new KafkaConsumer<String, String> (
        consumerProps
    );
    consumer.subscribe(Arrays.asList(topicName));
    
    Map<String, ReplicateInfo> catalog = Collections.synchronizedMap(
        new LinkedHashMap<String, ReplicateInfo> ()
    );
    try {
        logger.debug (
            "Polling topic: " + topicName + " for existing replicate " +
            "catalog"
        );
        
        /* greedy implementation, always fetch all replicate info 
         * messages when source connector is started */
        ConsumerRecords<String, String> records = consumer.poll(
            REPLICATE_INFO_POLL_TIMEOUT
        );
        consumer.commitSync();
        
        if (records != null) {
            for (ConsumerRecord<String, String> record : records) {
                logger.trace (
                    "Read " + topicName + " record, key: " + 
                    record.key() + " value: " + record.value()
                );
                String identifier  = record.key();
                ReplicateInfo info = ReplicateInfo.fromJSONString(
                    record.value()
                );
                /* all message are consumed in order, always overwrite 
                 * with the latest info */
                catalog.put (identifier, info);
            }
        }
    }
    catch (Exception e) {
        throw new Exception (
            "Failed to read replicate info records from topic: " + 
            topicName + ", reason: " + e.getMessage(),
            e
        );
    }
    finally {
        consumer.close();
    }
    
    logger.debug (
        "Read replicate catalog: " + catalog.toString() + " " +
        "from topic: " + topicName
    );
    return catalog;
}
 
开发者ID:dbvisitsoftware,项目名称:replicate-connector-for-kafka,代码行数:68,代码来源:ReplicateInfoTopic.java

示例7: read

import org.apache.kafka.clients.consumer.Consumer; //导入方法依赖的package包/类
@Override
public synchronized Map <String, ReplicateInfo> read () 
    throws Exception {
    Map<String, ReplicateInfo> catalog = Collections.synchronizedMap(
        new LinkedHashMap<String, ReplicateInfo> ()
    );
    Consumer<String, String> consumer = 
        new MockConsumer<String, String>(
            OffsetResetStrategy.EARLIEST
        );
    consumer.subscribe(Arrays.asList(mockTopicName));
    
    try {
        /* greedy implementation, always fetch all replicate info 
         * messages when source connector is started */
        ConsumerRecords<String, String> records = consumer.poll(
            100
        );
        if (records != null) {
            for (ConsumerRecord<String, String> record : records) {
                String identifier  = record.key();
                ReplicateInfo info = ReplicateInfo.fromJSONString(
                    record.value()
                );
                /* all message are consumed in order, always overwrite 
                 * with the latest info */
                catalog.put (identifier, info);
            }
        }
    }
    catch (Exception e) {
        throw new Exception (
            "Failed to read replicate info records from topic: " + 
            mockTopicName + ", reason: " + e.getMessage(),
            e
        );
    }
    finally {
        consumer.close();
    }
    
    return catalog;
}
 
开发者ID:dbvisitsoftware,项目名称:replicate-connector-for-kafka,代码行数:44,代码来源:ReplicateTestConfig.java


注:本文中的org.apache.kafka.clients.consumer.Consumer.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。