本文整理汇总了Java中kafka.javaapi.consumer.ConsumerConnector.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerConnector.shutdown方法的具体用法?Java ConsumerConnector.shutdown怎么用?Java ConsumerConnector.shutdown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.javaapi.consumer.ConsumerConnector
的用法示例。
在下文中一共展示了ConsumerConnector.shutdown方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: release
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void release() {
try {
for(ConsumerConnector consumer : consumerConnMap.values()){
consumer.commitOffsets(true);
consumer.shutdown();
}
for(ExecutorService executor : executorMap.values()){
executor.shutdownNow();
}
if(scheduleExecutor != null){
scheduleExecutor.shutdownNow();
}
this.zkDistributed.realse();
} catch (Exception e) {
// TODO Auto-generated catch block
logger.error(ExceptionUtil.getErrorMessage(e));
}
}
示例2: close
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public synchronized void close() throws IOException {
logger.debug("Stop kafka fetcher. [topic: {}]", topics);
ConsumerConnector connector = this.connector;
this.connector = null;
if (connector != null) {
connector.commitOffsets();
connector.shutdown();
}
IOUtil.closeQuietly(eventItr);
// Some events could exists in the buffer, try to save them.
List<byte[]> remaining = new ArrayList<>();
try {
while (eventItr.hasNext()) {
remaining.add(eventItr.next());
}
} catch (Exception e) {
// Ignore
}
eventItr = null;
if (!remaining.isEmpty()) {
this.remaining = remaining;
}
}
示例3: reconnConsumer
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public void reconnConsumer(String topicName){
//停止topic 对应的conn
ConsumerConnector consumerConn = consumerConnMap.get(topicName);
consumerConn.commitOffsets(true);
consumerConn.shutdown();
consumerConnMap.remove(topicName);
//停止topic 对应的stream消耗线程
ExecutorService es = executorMap.get(topicName);
es.shutdownNow();
executorMap.remove(topicName);
Properties prop = geneConsumerProp();
ConsumerConnector newConsumerConn = kafka.consumer.Consumer
.createJavaConsumerConnector(new ConsumerConfig(prop));
consumerConnMap.put(topicName, newConsumerConn);
addNewConsumer(topicName, topic.get(topicName));
}
示例4: reconnConsumer
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public void reconnConsumer(String topicName){
//停止topic 对应的conn
ConsumerConnector consumerConn = consumerConnMap.get(topicName);
consumerConn.commitOffsets(true);
consumerConn.shutdown();
consumerConnMap.remove(topicName);
//停止topic 对应的stream消耗线程
ExecutorService es = executorMap.get(topicName);
es.shutdownNow();
executorMap.remove(topicName);
Properties prop = geneConsumerProp();
ConsumerConnector newConsumerConn = kafka.consumer.Consumer
.createJavaConsumerConnector(new ConsumerConfig(prop));
consumerConnMap.put(topicName, newConsumerConn);
addNewConsumer(topicName, topic.get(topicName));
}
示例5: getVehicleStartPoints
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
private static Map<String, Location> getVehicleStartPoints() {
Map<String, Location> vehicleStartPoint = new HashMap<String, Location>();
Properties props = new Properties();
props.put("zookeeper.connect", ZOOKEEPER_CONNECTION_STRING);
props.put("group.id", "DataLoader" + r.nextInt(100));
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
props.put("auto.offset.reset", "smallest");
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(KAFKA_TOPIC_STATIC_DATA, new Integer(1));
KafkaStream<byte[], byte[]> stream = consumer.createMessageStreams(topicCountMap).get(KAFKA_TOPIC_STATIC_DATA)
.get(0);
ConsumerIterator<byte[], byte[]> it = stream.iterator();
while (it.hasNext()) {
String message = new String(it.next().message());
try {
vehicleStartPoint = objectMapper.readValue(message, new TypeReference<Map<String, Location>>() {
});
} catch (IOException e) {
e.printStackTrace();
}
break;
}
consumer.shutdown();
return vehicleStartPoint;
}
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:33,代码来源:VehicleDataGeneration.java
示例6: run
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void run() {
long relayed = 0;
LOG.info("Starting relay");
final ConsumerConnector consumer = Consumer.createJavaConsumerConnector(createConsumerConfig());
final KafkaStream<byte[], byte[]> stream = createConsumerStream(consumer);
final Producer<byte[], byte[]> producer = new Producer<>(createProducerConfig());
final ConsumerIterator<byte[], byte[]> it = stream.iterator();
while (it.hasNext()) {
final MessageAndMetadata<byte[], byte[]> rx = it.next();
relayed++;
if (LOG.isTraceEnabled()) LOG.trace("Relaying {}/{}: key={}, value={}",
relayed,
maxRecords != 0 ? maxRecords : "\u221E",
new String(rx.key()),
new String(rx.message()));
final KeyedMessage<byte[], byte[]> tx = new KeyedMessage<>(config.sink.topic, rx.key(), rx.message());
producer.send(tx);
if (maxRecords != 0 && relayed >= maxRecords) {
LOG.info("Shutting down");
break;
}
}
producer.close();
consumer.shutdown();
}
示例7: release
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void release() {
for(ConsumerConnector consumer : consumerConnMap.values()){
consumer.commitOffsets(true);
consumer.shutdown();
}
for(ExecutorService executor : executorMap.values()){
executor.shutdownNow();
}
scheduleExecutor.shutdownNow();
}
示例8: close
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Override
public void close()
{
if (standardConsumer != null && standardConsumer.values() != null) {
for (ConsumerConnector consumerConnector : standardConsumer.values()) {
consumerConnector.shutdown();
}
}
if (consumerThreadExecutor != null) {
consumerThreadExecutor.shutdown();
}
}
示例9: shutdownConsummer
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
static void shutdownConsummer(String groupId){
String consumerKey = groupId + "|" + Thread.currentThread().getName();
ConsumerConnector consumerConnector = groupConsumers.get(consumerKey);
try{
consumerLock.lock();
consumerConnector = groupConsumers.get(consumerKey);
if (consumerConnector != null ) {
consumerConnector.shutdown();
groupConsumers.remove(consumerKey);
consumerConnector = null;
}
}finally{
consumerLock.unlock();
}
}
示例10: main
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public static void main(String[] args) {
// String group = args[0] ;
String group = "manoj" ;
Properties props = new Properties();
props.put("zookeeper.connect", "localhost:2181");
props.put("group.id", group);
props.put("zookeeper.session.timeout.ms", "413");
props.put("zookeeper.sync.time.ms", "203");
props.put("auto.commit.interval.ms", "1000");
// props.put("auto.offset.reset", "smallest");
ConsumerConfig cf = new ConsumerConfig(props) ;
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(cf) ;
String topic = "mjtopic" ;
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(1));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
KafkaStream<byte[],byte[]> stream = streams.get(0) ;
ConsumerIterator<byte[], byte[]> it = stream.iterator();
int i = 1 ;
while (it.hasNext()) {
System.out.println(i + ": " + new String(it.next().message()));
++i;
}
consumer.shutdown();
}
示例11: main
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
final MetricRegistry metricRegistry = new MetricRegistry();
final Meter meter = metricRegistry.meter("throughput");
final ConsoleReporter reporter = ConsoleReporter.forRegistry(metricRegistry)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build();
reporter.start(1, TimeUnit.MINUTES);
final Properties properties = new Properties();
properties.setProperty("zookeeper.connect", "localhost:2181");
properties.setProperty("group.id", "myclient");
properties.setProperty("zookeeper.session.timeout.ms", "400");
properties.setProperty("zookeeper.sync.time.ms", "200");
properties.setProperty("auto.commit.interval.ms", "1000");
properties.setProperty("auto.offset.reset", "smallest");
properties.setProperty("consumer.timeout.ms", "10000");
final ConsumerConfig consumerConfig = new ConsumerConfig(properties);
final String topic = "csc8101";
final int numThreads = 4;
final ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
final Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topic, numThreads);
final Decoder<String> decoder = new StringDecoder(new VerifiableProperties());
final Map<String, List<KafkaStream<String, String>>> streamsMap =
consumerConnector.createMessageStreams(topicCountMap, decoder, decoder);
final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
for(final KafkaStream<String, String> stream : streamsMap.get(topic)) {
final MessageHandler messageHandler = new MessageHandler();
final RunnableConsumer runnableConsumer = new RunnableConsumer(stream, messageHandler, meter);
executorService.submit(runnableConsumer);
}
executorService.shutdown();
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.HOURS);
consumerConnector.shutdown();
MessageHandler.close();
reporter.report();
reporter.stop();
// we use the newer version of metrics, which shuts itself down cleanly. But...
// kafka still uses the old one and doesn't shut it down properly,
// leaving some metrics-meter-tick-thread lying around. So to avoid
// java.lang.IllegalThreadStateException from mvn:exec wrapper we terminate it explicitly here
com.yammer.metrics.Metrics.defaultRegistry().shutdown();
// bin/kafka-run-class.sh kafka.tools.ConsumerOffsetChecker --zkconnect localhost:2181 --group myclient
}
示例12: consumeFromTopic
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public <T> void consumeFromTopic(
final String formatPluginName,
final AvroFormatPlugin<T, ?> avroFormatPlugin,
final KafkaIngestRunData ingestRunData,
final List<String> queue ) {
final ConsumerConnector consumer = buildKafkaConsumer();
if (consumer == null) {
throw new RuntimeException(
"Kafka consumer connector is null, unable to create message streams");
}
try {
LOGGER.debug("Kafka consumer setup for format [" + formatPluginName + "] against topic ["
+ formatPluginName + "]");
final Map<String, Integer> topicCount = new HashMap<>();
topicCount.put(
formatPluginName,
1);
final Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams = consumer
.createMessageStreams(topicCount);
final List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(formatPluginName);
queue.remove(formatPluginName);
consumeMessages(
formatPluginName,
avroFormatPlugin,
ingestRunData,
streams.get(0));
}
finally {
consumer.shutdown();
}
}
示例13: consume
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@GET
@Timed
public Response consume(
@QueryParam("topic") String topic,
@QueryParam("timeout") Integer timeout
) {
if (Strings.isNullOrEmpty(topic))
return Response.status(400)
.entity(new String[]{"Undefined topic"})
.build();
Properties props = (Properties) consumerCfg.clone();
if (timeout != null) props.put("consumer.timeout.ms", "" + timeout);
ConsumerConfig config = new ConsumerConfig(props);
ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);
Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts);
KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0);
List<Message> messages = new ArrayList<>();
try {
for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream)
messages.add(new Message(messageAndMetadata));
} catch (ConsumerTimeoutException ignore) {
} finally {
connector.commitOffsets();
connector.shutdown();
}
return Response.ok(messages).build();
}
示例14: main
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
public static void main(String[] argv) {
System.out.println("Start smoke test for kafka");
// Basic kafka configuration and change for your kafka cluster
String broker = "localhost:9092";
String zookeeper = "localhost:2181";
String topic = "test-topic";
// New kafka producer
Properties props = new Properties();
props.put("metadata.broker.list", broker);
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("partitioner.class", "cn.chendihao.SimplePartitioner");
props.put("request.required.acks", "1");
ProducerConfig config = new ProducerConfig(props);
Producer<String, String> producer = new Producer<String, String>(config);
// Produce data in kafka
long events = 5;
Random random = new Random();
for (long nEvents = 0; nEvents < events; nEvents++) {
long runtime = new Date().getTime();
String ip = "192.168.2." + random.nextInt(255);
String msg = runtime + ", www.example.com, " + ip;
KeyedMessage<String, String> data = new KeyedMessage<String, String>(topic, ip, msg);
producer.send(data);
System.out.println("Success to insert message " + msg);
}
// Close producer
producer.close();
// New consumer
String groupId = "test-group";
Properties props2 = new Properties();
props2.put("zookeeper.connect", zookeeper);
props2.put("group.id", groupId);
props2.put("zookeeper.session.timeout.ms", "400");
props2.put("zookeeper.sync.time.ms", "200");
props2.put("auto.commit.interval.ms", "1000");
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props2));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
// Consume data from kafka
int threads = 1;
topicCountMap.put(topic, threads);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
//now launch all threads
ExecutorService executor = Executors.newFixedThreadPool(threads);
//now create an object to consume the messages
int threadNum = 0;
for (final KafkaStream stream : streams) {
executor.submit(new ConsumerTest(stream, threadNum));
threadNum++;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
// Recycle the resources
if (consumer != null) {
consumer.shutdown();
}
if (executor != null) {
executor.shutdown();
}
System.out.println("Stop smoke test for kafka");
}
示例15: testMultithread
import kafka.javaapi.consumer.ConsumerConnector; //导入方法依赖的package包/类
@Test
public void testMultithread() throws IOException {
TopicCommand.createTopic(zk.getZkClient(),
new TopicCommand.TopicCommandOptions(new String[]{
"--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
"--replication-factor", "2", "--partitions", "1"}));
String description = "{\n" +
" \"type\": \"kafka\",\n" +
" \"client.id\": \"kafkasink\",\n" +
" \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
" \"request.required.acks\": 1,\n" +
" \"batchSize\": 10,\n" +
" \"jobQueueSize\": 3\n" +
"}";
ObjectMapper jsonMapper = new DefaultObjectMapper();
jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
sink.open();
int msgCount = 10000;
for (int i = 0; i < msgCount; ++i) {
Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
.put("key", Integer.toString(i))
.put("value", "message:" + i).build();
sink.writeTo(new DefaultMessageContainer(
new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
jsonMapper));
}
assertTrue(sink.getNumOfPendingMessages() > 0);
sink.close();
System.out.println(sink.getStat());
assertEquals(sink.getNumOfPendingMessages(), 0);
ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
for (int i = 0; i < msgCount; ++i) {
stream.iterator().next();
}
try {
stream.iterator().next();
fail();
} catch (ConsumerTimeoutException e) {
//this is expected
consumer.shutdown();
}
}