当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerConnector.shutdown方法代码示例

本文整理汇总了Java中kafka.javaapi.consumer.ConsumerConnector.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerConnector.shutdown方法的具体用法?Java ConsumerConnector.shutdown怎么用?Java ConsumerConnector.shutdown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kafka.javaapi.consumer.ConsumerConnector的用法示例。


在下文中一共展示了ConsumerConnector.shutdown方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: release

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void release() {
	try {
		for(ConsumerConnector consumer : consumerConnMap.values()){
			consumer.commitOffsets(true);
			consumer.shutdown();
		}
		for(ExecutorService executor : executorMap.values()){
			executor.shutdownNow();
		}

		if(scheduleExecutor != null){
			scheduleExecutor.shutdownNow();
		}

		this.zkDistributed.realse();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		logger.error(ExceptionUtil.getErrorMessage(e));
	}
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:22,代码来源:KafkaDistributed.java

示例2: close

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public synchronized void close() throws IOException {
    logger.debug("Stop kafka fetcher. [topic: {}]", topics);
    ConsumerConnector connector = this.connector;
    this.connector = null;
    if (connector != null) {
        connector.commitOffsets();
        connector.shutdown();
    }

    IOUtil.closeQuietly(eventItr);
    // Some events could exists in the buffer, try to save them.
    List<byte[]> remaining = new ArrayList<>();
    try {
        while (eventItr.hasNext()) {
            remaining.add(eventItr.next());
        }
    } catch (Exception e) {
        // Ignore
    }
    eventItr = null;
    if (!remaining.isEmpty()) {
        this.remaining = remaining;
    }
}
 
开发者ID:shunfei,项目名称:indexr,代码行数:26,代码来源:Kafka08Fetcher.java

示例3: reconnConsumer

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public void reconnConsumer(String topicName){
		
		//停止topic 对应的conn
		ConsumerConnector consumerConn = consumerConnMap.get(topicName);
		consumerConn.commitOffsets(true);
		consumerConn.shutdown();
		consumerConnMap.remove(topicName);
		
		//停止topic 对应的stream消耗线程
		ExecutorService es = executorMap.get(topicName);
		es.shutdownNow();
		executorMap.remove(topicName);

		Properties prop = geneConsumerProp();
		ConsumerConnector newConsumerConn = kafka.consumer.Consumer
				.createJavaConsumerConnector(new ConsumerConfig(prop));
		consumerConnMap.put(topicName, newConsumerConn);

		addNewConsumer(topicName, topic.get(topicName));
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:21,代码来源:KafkaDistributed.java

示例4: reconnConsumer

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public void reconnConsumer(String topicName){
	
	//停止topic 对应的conn
	ConsumerConnector consumerConn = consumerConnMap.get(topicName);
	consumerConn.commitOffsets(true);
	consumerConn.shutdown();
	consumerConnMap.remove(topicName);
	
	//停止topic 对应的stream消耗线程
	ExecutorService es = executorMap.get(topicName);
	es.shutdownNow();	
	executorMap.remove(topicName);
	
	Properties prop = geneConsumerProp();
	ConsumerConnector newConsumerConn = kafka.consumer.Consumer
			.createJavaConsumerConnector(new ConsumerConfig(prop));
	consumerConnMap.put(topicName, newConsumerConn);
	
	addNewConsumer(topicName, topic.get(topicName));
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:21,代码来源:Kafka.java

示例5: getVehicleStartPoints

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
private static Map<String, Location> getVehicleStartPoints() {
	Map<String, Location> vehicleStartPoint = new HashMap<String, Location>();
	Properties props = new Properties();
	props.put("zookeeper.connect", ZOOKEEPER_CONNECTION_STRING);
	props.put("group.id", "DataLoader" + r.nextInt(100));
	props.put("key.deserializer", StringDeserializer.class.getName());
	props.put("value.deserializer", StringDeserializer.class.getName());
	props.put("auto.offset.reset", "smallest");

	ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props));

	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(KAFKA_TOPIC_STATIC_DATA, new Integer(1)); 

	KafkaStream<byte[], byte[]> stream = consumer.createMessageStreams(topicCountMap).get(KAFKA_TOPIC_STATIC_DATA)
			.get(0);

	ConsumerIterator<byte[], byte[]> it = stream.iterator();

	while (it.hasNext()) {
		String message = new String(it.next().message());
		try {
			vehicleStartPoint = objectMapper.readValue(message, new TypeReference<Map<String, Location>>() {
			});
		} catch (IOException e) {
			e.printStackTrace();
		}
		break;
	}
	consumer.shutdown();
	return vehicleStartPoint;
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:33,代码来源:VehicleDataGeneration.java

示例6: run

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void run() {
  long relayed = 0;
  
  LOG.info("Starting relay");
  final ConsumerConnector consumer = Consumer.createJavaConsumerConnector(createConsumerConfig());
  final KafkaStream<byte[], byte[]> stream = createConsumerStream(consumer);
  
  final Producer<byte[], byte[]> producer = new Producer<>(createProducerConfig());
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  while (it.hasNext()) {
    final MessageAndMetadata<byte[], byte[]> rx = it.next();
    relayed++;
    if (LOG.isTraceEnabled()) LOG.trace("Relaying {}/{}: key={}, value={}",
                                        relayed,
                                        maxRecords != 0 ? maxRecords : "\u221E",
                                        new String(rx.key()),
                                        new String(rx.message()));
    final KeyedMessage<byte[], byte[]> tx = new KeyedMessage<>(config.sink.topic, rx.key(), rx.message());
    producer.send(tx);
    
    if (maxRecords != 0 && relayed >= maxRecords) {
      LOG.info("Shutting down");
      break;
    }
  }

  producer.close();
  consumer.shutdown();
}
 
开发者ID:William-Hill-Community,项目名称:rekafka,代码行数:31,代码来源:Relay.java

示例7: release

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void release() {
	
	for(ConsumerConnector consumer : consumerConnMap.values()){
		consumer.commitOffsets(true);
		consumer.shutdown();
	}
	
	for(ExecutorService executor : executorMap.values()){
		executor.shutdownNow();
	}
	
	scheduleExecutor.shutdownNow();
}
 
开发者ID:DTStack,项目名称:jlogstash-input-plugin,代码行数:15,代码来源:Kafka.java

示例8: close

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void close()
{
  if (standardConsumer != null && standardConsumer.values() != null) {
    for (ConsumerConnector consumerConnector : standardConsumer.values()) {
      consumerConnector.shutdown();
    }
  }
  if (consumerThreadExecutor != null) {
    consumerThreadExecutor.shutdown();
  }
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:13,代码来源:HighlevelKafkaConsumer.java

示例9: shutdownConsummer

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
static void shutdownConsummer(String groupId){
	String consumerKey = groupId + "|" + Thread.currentThread().getName();
	ConsumerConnector consumerConnector = groupConsumers.get(consumerKey);
	try{
		consumerLock.lock();
		consumerConnector = groupConsumers.get(consumerKey);
		if (consumerConnector != null ) {
			consumerConnector.shutdown();
			groupConsumers.remove(consumerKey);
			consumerConnector = null;
		}
	}finally{
		consumerLock.unlock();
	}
}
 
开发者ID:linzhaoming,项目名称:easyframe-msg,代码行数:16,代码来源:KafkaHelper.java

示例10: main

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public static void main(String[] args) {
	
	// String group = args[0] ;
	String group = "manoj" ;
	
	
	Properties props = new Properties();
       props.put("zookeeper.connect", "localhost:2181");
       props.put("group.id", group);
       props.put("zookeeper.session.timeout.ms", "413");
       props.put("zookeeper.sync.time.ms", "203");
       props.put("auto.commit.interval.ms", "1000");
       // props.put("auto.offset.reset", "smallest");
	
       ConsumerConfig cf = new ConsumerConfig(props) ;
       
       ConsumerConnector consumer = Consumer.createJavaConsumerConnector(cf) ;
       
       String topic = "mjtopic" ;
       
       Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(topic, new Integer(1));
       Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
       List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);


       KafkaStream<byte[],byte[]> stream = streams.get(0) ;
       
       ConsumerIterator<byte[], byte[]> it = stream.iterator();
       int i = 1 ;
       while (it.hasNext()) {
           System.out.println(i + ": " + new String(it.next().message()));
           ++i;
       }
       
       consumer.shutdown(); 
}
 
开发者ID:mdkhanga,项目名称:my-blog-code,代码行数:38,代码来源:KafkaConsumer.java

示例11: main

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final MetricRegistry metricRegistry = new MetricRegistry();
        final Meter meter = metricRegistry.meter("throughput");
        final ConsoleReporter reporter = ConsoleReporter.forRegistry(metricRegistry)
                .convertRatesTo(TimeUnit.SECONDS)
                .convertDurationsTo(TimeUnit.MILLISECONDS)
                .build();
        reporter.start(1, TimeUnit.MINUTES);

        final Properties properties = new Properties();
        properties.setProperty("zookeeper.connect", "localhost:2181");
        properties.setProperty("group.id", "myclient");
        properties.setProperty("zookeeper.session.timeout.ms", "400");
        properties.setProperty("zookeeper.sync.time.ms", "200");
        properties.setProperty("auto.commit.interval.ms", "1000");
        properties.setProperty("auto.offset.reset", "smallest");
        properties.setProperty("consumer.timeout.ms", "10000");

        final ConsumerConfig consumerConfig = new ConsumerConfig(properties);

        final String topic = "csc8101";
        final int numThreads = 4;

        final ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
        final Map<String, Integer> topicCountMap = new HashMap<>();
        topicCountMap.put(topic, numThreads);
        final Decoder<String> decoder = new StringDecoder(new VerifiableProperties());
        final Map<String, List<KafkaStream<String, String>>> streamsMap =
                consumerConnector.createMessageStreams(topicCountMap, decoder, decoder);

        final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

        for(final KafkaStream<String, String> stream : streamsMap.get(topic)) {
            final MessageHandler messageHandler = new MessageHandler();
            final RunnableConsumer runnableConsumer = new RunnableConsumer(stream, messageHandler, meter);
            executorService.submit(runnableConsumer);
        }

        executorService.shutdown();
        executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.HOURS);

        consumerConnector.shutdown();

        MessageHandler.close();

        reporter.report();
        reporter.stop();

        // we use the newer version of metrics, which shuts itself down cleanly. But...
        // kafka still uses the old one and doesn't shut it down properly,
        // leaving some metrics-meter-tick-thread lying around. So to avoid
        // java.lang.IllegalThreadStateException from mvn:exec wrapper we terminate it explicitly here
        com.yammer.metrics.Metrics.defaultRegistry().shutdown();

        // bin/kafka-run-class.sh kafka.tools.ConsumerOffsetChecker --zkconnect localhost:2181 --group myclient
    }
 
开发者ID:jhalliday,项目名称:csc8101,代码行数:58,代码来源:KafkaConsumer.java

示例12: consumeFromTopic

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public <T> void consumeFromTopic(
		final String formatPluginName,
		final AvroFormatPlugin<T, ?> avroFormatPlugin,
		final KafkaIngestRunData ingestRunData,
		final List<String> queue ) {

	final ConsumerConnector consumer = buildKafkaConsumer();
	if (consumer == null) {
		throw new RuntimeException(
				"Kafka consumer connector is null, unable to create message streams");
	}
	try {
		LOGGER.debug("Kafka consumer setup for format [" + formatPluginName + "] against topic ["
				+ formatPluginName + "]");
		final Map<String, Integer> topicCount = new HashMap<>();
		topicCount.put(
				formatPluginName,
				1);

		final Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams = consumer
				.createMessageStreams(topicCount);
		final List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(formatPluginName);

		queue.remove(formatPluginName);
		consumeMessages(
				formatPluginName,
				avroFormatPlugin,
				ingestRunData,
				streams.get(0));
	}
	finally {
		consumer.shutdown();
	}
}
 
开发者ID:locationtech,项目名称:geowave,代码行数:35,代码来源:IngestFromKafkaDriver.java

示例13: consume

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@GET
@Timed
public Response consume(
        @QueryParam("topic") String topic,
        @QueryParam("timeout") Integer timeout
) {
    if (Strings.isNullOrEmpty(topic))
        return Response.status(400)
                .entity(new String[]{"Undefined topic"})
                .build();

    Properties props = (Properties) consumerCfg.clone();
    if (timeout != null) props.put("consumer.timeout.ms", "" + timeout);

    ConsumerConfig config = new ConsumerConfig(props);
    ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

    Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts);
    KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0);

    List<Message> messages = new ArrayList<>();
    try {
        for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream)
            messages.add(new Message(messageAndMetadata));
    } catch (ConsumerTimeoutException ignore) {
    } finally {
        connector.commitOffsets();
        connector.shutdown();
    }

    return Response.ok(messages).build();
}
 
开发者ID:elodina,项目名称:dropwizard-kafka-http,代码行数:34,代码来源:MessageResource.java

示例14: main

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public static void main(String[] argv) {

        System.out.println("Start smoke test for kafka");

        // Basic kafka configuration and change for your kafka cluster
        String broker = "localhost:9092"; 
        String zookeeper = "localhost:2181";
        String topic = "test-topic";
        
        // New kafka producer
        Properties props = new Properties();
        props.put("metadata.broker.list", broker);
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put("partitioner.class", "cn.chendihao.SimplePartitioner");
        props.put("request.required.acks", "1");
        ProducerConfig config = new ProducerConfig(props);
        Producer<String, String> producer = new Producer<String, String>(config);


        // Produce data in kafka
        long events = 5;
        Random random = new Random();
        for (long nEvents = 0; nEvents < events; nEvents++) {
            long runtime = new Date().getTime();
            String ip = "192.168.2." + random.nextInt(255);
            String msg = runtime + ", www.example.com, " + ip;
            KeyedMessage<String, String> data = new KeyedMessage<String, String>(topic, ip, msg);
            producer.send(data);
            System.out.println("Success to insert message " + msg);
        }

        // Close producer
        producer.close();

        // New consumer
        String groupId = "test-group";
        Properties props2 = new Properties();
        props2.put("zookeeper.connect", zookeeper);
        props2.put("group.id", groupId);
        props2.put("zookeeper.session.timeout.ms", "400");
        props2.put("zookeeper.sync.time.ms", "200");
        props2.put("auto.commit.interval.ms", "1000");
        ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props2));
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();

        // Consume data from kafka
        int threads = 1;
        topicCountMap.put(topic, threads);
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
        List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
        //now launch all threads
        ExecutorService executor = Executors.newFixedThreadPool(threads);
        //now create an object to consume the messages
        int threadNum = 0;
        for (final KafkaStream stream : streams) {
            executor.submit(new ConsumerTest(stream, threadNum));
            threadNum++;
        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        // Recycle the resources
        if (consumer != null) {
            consumer.shutdown();
        }
        if (executor != null) {
            executor.shutdown();
        }

        System.out.println("Stop smoke test for kafka");

    }
 
开发者ID:tobegit3hub,项目名称:smoke-kafka,代码行数:76,代码来源:SmokeKafka.java

示例15: testMultithread

import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Test
public void testMultithread() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "    \"batchSize\": 10,\n" +
            "    \"jobQueueSize\": 3\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    int msgCount = 10000;
    for (int i = 0; i < msgCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", Integer.toString(i))
                .put("value", "message:" + i).build();
        sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    System.out.println(sink.getStat());
    assertEquals(sink.getNumOfPendingMessages(), 0);

    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
    for (int i = 0; i < msgCount; ++i) {
        stream.iterator().next();
    }

    try {
        stream.iterator().next();
        fail();
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
开发者ID:Netflix,项目名称:suro,代码行数:52,代码来源:TestKafkaSinkV2.java


注:本文中的kafka.javaapi.consumer.ConsumerConnector.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。