當前位置: 首頁>>代碼示例>>Java>>正文


Java Consumer.close方法代碼示例

本文整理匯總了Java中org.apache.kafka.clients.consumer.Consumer.close方法的典型用法代碼示例。如果您正苦於以下問題:Java Consumer.close方法的具體用法?Java Consumer.close怎麽用?Java Consumer.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.kafka.clients.consumer.Consumer的用法示例。


在下文中一共展示了Consumer.close方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: run

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
@Override
public void run() {
    Consumer<String, Property> consumer = ConsumerFactory.getPropertyConsumer();
    consumer.subscribe(Arrays.asList(Topics.SAVE_PROPERTY));

    try {
        while (true) {
            ConsumerRecords<String, Property> records = consumer.poll(POLL_DELAY);
            LOGGER.log(Level.INFO, "records fetched to persist {0}", records.count());
            for (ConsumerRecord<String, Property> record : records) {
                Property property = record.value();
                propertyService.save(property);
            }
        }
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, null, e);
    } finally {
        consumer.close();
    }
}
 
開發者ID:joaobmonteiro,項目名稱:mobclip,代碼行數:21,代碼來源:PropertyPersister.java

示例2: main

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
public static void main(String[] args) {

		// build consumer
		Properties props = new Properties();
		props.put("bootstrap.servers", "127.0.0.1:9092");
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");		// 配置value的序列化類
		props.setProperty("group.id", "0");				// group 代表一個消費組(可根據之,實現queue隊列或者topic廣播)
		props.setProperty("enable.auto.commit", "true");
		props.setProperty("auto.offset.reset", "earliest");

		Consumer<String, String> consumer = new KafkaConsumer<String, String>(props);

		// sub msg
		String topic = "demo_topic";
		consumer.subscribe(Arrays.asList(topic));

		for (int i = 0; i < 100; i++) {
			ConsumerRecords<String, String> records = consumer.poll(1000);
			System.out.println(records.count());
			for (ConsumerRecord<String, String> record : records) {
				System.out.println("record = " + record);
			}
		}

		//  close consumer
		consumer.close();

	}
 
開發者ID:xuxueli,項目名稱:xxl-incubator,代碼行數:30,代碼來源:KafkaConsumerTest.java

示例3: testSimpleConsumer

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
@Test
public void testSimpleConsumer() throws Exception {
    String topic = "persistent://sample/standalone/ns/testSimpleConsumer";

    Properties props = new Properties();
    props.put("bootstrap.servers", lookupUrl.toString());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    Consumer<String, String> consumer = new PulsarKafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    Producer pulsarProducer = pulsarClient.createProducer(topic);

    for (int i = 0; i < 10; i++) {
        Message msg = MessageBuilder.create().setKey(Integer.toString(i)).setContent(("hello-" + i).getBytes())
                .build();
        pulsarProducer.send(msg);
    }

    for (int i = 0; i < 10; i++) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        assertEquals(records.count(), 1);
        int idx = i;
        records.forEach(record -> {
            assertEquals(record.key(), Integer.toString(idx));
            assertEquals(record.value(), "hello-" + idx);
        });

        consumer.commitSync();
    }

    consumer.close();
}
 
開發者ID:apache,項目名稱:incubator-pulsar,代碼行數:37,代碼來源:KafkaConsumerTest.java

示例4: run

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
@Override
public void run() {

    Consumer<String, String> consumer = ConsumerFactory.getPageConsumer();
    consumer.subscribe(Arrays.asList(PAGE_EXTRACTOR));

    try {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(POLL_DELAY);
            LOGGER.log(Level.INFO, "records fetched {0}", records.count());

            Producer<String, Property> propertyProducer = ProducerFactory.getPropertyProducer();
            
            for (ConsumerRecord<String, String> record : records) {
                String url = record.value();
                int type = 2;

                List<Property> properties = extractPageData(url, type);
                for (Property property : properties) {
                    propertyProducer.send(new ProducerRecord<>(PAGE_DETAIL_EXTRACTOR, property));
                }
            }
            consumer.commitAsync((Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) -> {
                if (exception != null) {
                    LOGGER.log(Level.SEVERE, null, exception);
                } else {
                    offsets.entrySet().stream()
                            .forEach(entry -> LOGGER.log(Level.INFO, "records commited: partition {0}, offset {1}",
                                    Arrays.asList(entry.getKey().partition(), entry.getValue().offset())));
                }
            });
        }
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, null, e);
    } finally {
        consumer.close();
    }
}
 
開發者ID:joaobmonteiro,項目名稱:mobclip,代碼行數:39,代碼來源:PageExtractor.java

示例5: testSimpleProducerConsumer

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
@Test(timeOut = 30000)
public void testSimpleProducerConsumer() throws Exception {
    String topic = "persistent://sample/standalone/ns/testSimpleProducerConsumer";

    Properties producerProperties = new Properties();
    producerProperties.put("bootstrap.servers", lookupUrl.toString());
    producerProperties.put("key.serializer", IntegerSerializer.class.getName());
    producerProperties.put("value.serializer", StringSerializer.class.getName());
    Producer<Integer, String> producer = new KafkaProducer<>(producerProperties);

    Properties consumerProperties = new Properties();
    consumerProperties.put("bootstrap.servers", lookupUrl.toString());
    consumerProperties.put("group.id", "my-subscription-name");
    consumerProperties.put("key.deserializer", IntegerDeserializer.class.getName());
    consumerProperties.put("value.deserializer", StringDeserializer.class.getName());
    consumerProperties.put("enable.auto.commit", "true");
    Consumer<Integer, String> consumer = new KafkaConsumer<>(consumerProperties);
    consumer.subscribe(Arrays.asList(topic));

    List<Long> offsets = new ArrayList<>();

    for (int i = 0; i < 10; i++) {
        RecordMetadata md = producer.send(new ProducerRecord<Integer, String>(topic, i, "hello-" + i)).get();
        offsets.add(md.offset());
        log.info("Published message at {}", Long.toHexString(md.offset()));
    }

    producer.flush();
    producer.close();

    for (int i = 0; i < 10; i++) {
        ConsumerRecords<Integer, String> records = consumer.poll(1000);
        assertEquals(records.count(), 1);

        int idx = i;
        records.forEach(record -> {
            log.info("Received record: {}", record);
            assertEquals(record.key().intValue(), idx);
            assertEquals(record.value(), "hello-" + idx);
            assertEquals(record.offset(), offsets.get(idx).longValue());
        });
    }

    consumer.close();
}
 
開發者ID:apache,項目名稱:incubator-pulsar,代碼行數:46,代碼來源:KafkaApiTest.java

示例6: read

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
/**
 * Read all JSON messages from the replicate info topic and creates the 
 * latest catalog for PLOG monitor to use for controlling Kafka task 
 * configuration. This is only done during source connector startup,
 * therefore it creates a consumer each time
 * 
 * @return synchronized sorted catalog of replicate info
 * @throws Exception for any consumer errors
 */
public synchronized Map <String, ReplicateInfo> read () throws Exception {
    /* short lived consumer, no need to keep it alive seeing that 
     * it always consumes all messages again, the volume of messages
     * is low and the consumer doesn't need to be coordinated */
    Consumer<String, String> consumer = new KafkaConsumer<String, String> (
        consumerProps
    );
    consumer.subscribe(Arrays.asList(topicName));
    
    Map<String, ReplicateInfo> catalog = Collections.synchronizedMap(
        new LinkedHashMap<String, ReplicateInfo> ()
    );
    try {
        logger.debug (
            "Polling topic: " + topicName + " for existing replicate " +
            "catalog"
        );
        
        /* greedy implementation, always fetch all replicate info 
         * messages when source connector is started */
        ConsumerRecords<String, String> records = consumer.poll(
            REPLICATE_INFO_POLL_TIMEOUT
        );
        consumer.commitSync();
        
        if (records != null) {
            for (ConsumerRecord<String, String> record : records) {
                logger.trace (
                    "Read " + topicName + " record, key: " + 
                    record.key() + " value: " + record.value()
                );
                String identifier  = record.key();
                ReplicateInfo info = ReplicateInfo.fromJSONString(
                    record.value()
                );
                /* all message are consumed in order, always overwrite 
                 * with the latest info */
                catalog.put (identifier, info);
            }
        }
    }
    catch (Exception e) {
        throw new Exception (
            "Failed to read replicate info records from topic: " + 
            topicName + ", reason: " + e.getMessage(),
            e
        );
    }
    finally {
        consumer.close();
    }
    
    logger.debug (
        "Read replicate catalog: " + catalog.toString() + " " +
        "from topic: " + topicName
    );
    return catalog;
}
 
開發者ID:dbvisitsoftware,項目名稱:replicate-connector-for-kafka,代碼行數:68,代碼來源:ReplicateInfoTopic.java

示例7: read

import org.apache.kafka.clients.consumer.Consumer; //導入方法依賴的package包/類
@Override
public synchronized Map <String, ReplicateInfo> read () 
    throws Exception {
    Map<String, ReplicateInfo> catalog = Collections.synchronizedMap(
        new LinkedHashMap<String, ReplicateInfo> ()
    );
    Consumer<String, String> consumer = 
        new MockConsumer<String, String>(
            OffsetResetStrategy.EARLIEST
        );
    consumer.subscribe(Arrays.asList(mockTopicName));
    
    try {
        /* greedy implementation, always fetch all replicate info 
         * messages when source connector is started */
        ConsumerRecords<String, String> records = consumer.poll(
            100
        );
        if (records != null) {
            for (ConsumerRecord<String, String> record : records) {
                String identifier  = record.key();
                ReplicateInfo info = ReplicateInfo.fromJSONString(
                    record.value()
                );
                /* all message are consumed in order, always overwrite 
                 * with the latest info */
                catalog.put (identifier, info);
            }
        }
    }
    catch (Exception e) {
        throw new Exception (
            "Failed to read replicate info records from topic: " + 
            mockTopicName + ", reason: " + e.getMessage(),
            e
        );
    }
    finally {
        consumer.close();
    }
    
    return catalog;
}
 
開發者ID:dbvisitsoftware,項目名稱:replicate-connector-for-kafka,代碼行數:44,代碼來源:ReplicateTestConfig.java


注:本文中的org.apache.kafka.clients.consumer.Consumer.close方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。