本文整理汇总了Java中kafka.consumer.Consumer.createJavaConsumerConnector方法的典型用法代码示例。如果您正苦于以下问题:Java Consumer.createJavaConsumerConnector方法的具体用法?Java Consumer.createJavaConsumerConnector怎么用?Java Consumer.createJavaConsumerConnector使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.consumer.Consumer
的用法示例。
在下文中一共展示了Consumer.createJavaConsumerConnector方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: newConsumerConnector
import kafka.consumer.Consumer; //导入方法依赖的package包/类
@Override
public ConsumerConnector newConsumerConnector(String name, ConsumerConfig configOverrides) {
Properties mergedProps = new Properties();
Map<String, String> config = configs.get(name);
if (config != null) {
mergedProps.putAll(config);
}
if (configOverrides != null) {
mergedProps.putAll(configOverrides.createConsumerConfig());
}
return Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(mergedProps));
}
示例2: open
import kafka.consumer.Consumer; //导入方法依赖的package包/类
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
_collector = spoutOutputCollector;
Properties props = new Properties();
props.put("zookeeper.connect", conf.get(OSMIngest.ZOOKEEPERS));
props.put("group.id", groupId);
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(new VerifiableProperties()), new StringDecoder(new VerifiableProperties()));
List<KafkaStream<String, String>> streams = consumerMap.get(topic);
KafkaStream<String, String> stream = null;
if (streams.size() == 1) {
stream = streams.get(0);
} else {
log.error("Streams should be of size 1");
}
kafkaIterator = stream.iterator();
}
示例3: readTopicToList
import kafka.consumer.Consumer; //导入方法依赖的package包/类
/**
* Read topic to list, only using Kafka code.
*/
private static List<MessageAndMetadata<byte[], byte[]>> readTopicToList(String topicName, ConsumerConfig config, final int stopAfter) {
ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(config);
// we request only one stream per consumer instance. Kafka will make sure that each consumer group
// will see each message only once.
Map<String,Integer> topicCountMap = Collections.singletonMap(topicName, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> streams = consumerConnector.createMessageStreams(topicCountMap);
if (streams.size() != 1) {
throw new RuntimeException("Expected only one message stream but got "+streams.size());
}
List<KafkaStream<byte[], byte[]>> kafkaStreams = streams.get(topicName);
if (kafkaStreams == null) {
throw new RuntimeException("Requested stream not available. Available streams: "+streams.toString());
}
if (kafkaStreams.size() != 1) {
throw new RuntimeException("Requested 1 stream from Kafka, bot got "+kafkaStreams.size()+" streams");
}
LOG.info("Opening Consumer instance for topic '{}' on group '{}'", topicName, config.groupId());
ConsumerIterator<byte[], byte[]> iteratorToRead = kafkaStreams.get(0).iterator();
List<MessageAndMetadata<byte[], byte[]>> result = new ArrayList<>();
int read = 0;
while(iteratorToRead.hasNext()) {
read++;
result.add(iteratorToRead.next());
if (read == stopAfter) {
LOG.info("Read "+read+" elements");
return result;
}
}
return result;
}
示例4: KtGroup
import kafka.consumer.Consumer; //导入方法依赖的package包/类
public KtGroup(Config config) {
// Because we are not pushing names to zookeeper random names should be fine
String groupId = config.getGroupId();
if (groupId == null) {
// default to a unique group id
groupId = "Kt-" + UUID.randomUUID();
}
String offset = "largest";
if (config.getLocation().equals("tail")) {
offset = "smallest";
}
log.info("Starting consumer at '{}' offset", offset);
consumer = Consumer.createJavaConsumerConnector(createConsumerConfig(config.getZookeeper(), groupId, offset));
this.topic = config.getTopic();
}
示例5: createKafkaStream
import kafka.consumer.Consumer; //导入方法依赖的package包/类
public List<KafkaStream<byte[], byte[]>> createKafkaStream(
String zookeeperConnectString,
String topic,
int partitions
) {
//create consumer
Properties consumerProps = new Properties();
consumerProps.put("zookeeper.connect", zookeeperConnectString);
consumerProps.put("group.id", "testClient");
consumerProps.put("zookeeper.session.timeout.ms", "6000");
consumerProps.put("zookeeper.sync.time.ms", "200");
consumerProps.put("auto.commit.interval.ms", "1000");
consumerProps.put("consumer.timeout.ms", "500");
ConsumerConfig consumerConfig = new ConsumerConfig(consumerProps);
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topic, partitions);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
return consumerMap.get(topic);
}
示例6: startConsumers
import kafka.consumer.Consumer; //导入方法依赖的package包/类
@Override
public CompletionService<Histogram> startConsumers() {
final ConsumerConfig consumerConfig = new ConsumerConfig(props);
consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
// Create message streams
final Map<String, Integer> topicMap = new HashMap<>();
topicMap.put(topic, numThreads);
final Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicMap);
final List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
// Pass each stream to a consumer that will read from the stream in its own thread.
for (final KafkaStream<byte[], byte[]> stream : streams) {
executorCompletionService.submit(new BlockingKafkaMessageConsumer(stream));
}
return executorCompletionService;
}
示例7: kafkaStream
import kafka.consumer.Consumer; //导入方法依赖的package包/类
@Bean
protected KafkaStream<String, float[]> kafkaStream() {
final String topicName = retrieveTopicNameFromGatewayAddress(gatewayUrl());
ConsumerConnector consumerConnector =
Consumer.createJavaConsumerConnector(consumerConfig());
Map<String, Integer> topicCounts = new HashMap<>();
topicCounts.put(topicName, 1);
VerifiableProperties emptyProps = new VerifiableProperties();
StringDecoder keyDecoder = new StringDecoder(emptyProps);
FeatureVectorDecoder valueDecoder = new FeatureVectorDecoder();
Map<String, List<KafkaStream<String, float[]>>> streams =
consumerConnector.createMessageStreams(topicCounts, keyDecoder, valueDecoder);
List<KafkaStream<String, float[]>> streamsByTopic = streams.get(topicName);
Preconditions.checkNotNull(streamsByTopic, String.format("Topic %s not found in streams map.", topicName));
Preconditions.checkElementIndex(0, streamsByTopic.size(),
String.format("List of streams of topic %s is empty.", topicName));
return streamsByTopic.get(0);
}
示例8: createKafkaStream
import kafka.consumer.Consumer; //导入方法依赖的package包/类
public static List<KafkaStream<byte[], byte[]>> createKafkaStream(String zookeeperConnectString, String topic, int partitions) {
//create consumer
Properties consumerProps = new Properties();
consumerProps.put("zookeeper.connect", zookeeperConnectString);
consumerProps.put("group.id", "testClient");
consumerProps.put("zookeeper.session.timeout.ms", "6000");
consumerProps.put("zookeeper.sync.time.ms", "200");
consumerProps.put("auto.commit.interval.ms", "1000");
consumerProps.put("consumer.timeout.ms", "500");
ConsumerConfig consumerConfig = new ConsumerConfig(consumerProps);
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topic, partitions);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
return consumerMap.get(topic);
}
示例9: initialize
import kafka.consumer.Consumer; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public void initialize()
throws StreamingException
{
ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProperties);
consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = Maps.newHashMap();
topicCountMap.put(topic, TOPIC_COUNT);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
consumerConnector.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
consumerIterator = stream.iterator();
}
示例10: getConsumer
import kafka.consumer.Consumer; //导入方法依赖的package包/类
public static ConsumerConnector getConsumer(String groupId) {
//加上线程名字的考虑是:保证每个线程只有一个Consumer,但是每个线程又可以有一个独立的Consumer,从而消费不同的partition
String consumerKey = groupId + "|" + Thread.currentThread().getName();
ConsumerConnector msgConnector = groupConsumers.get(consumerKey);
if (msgConnector == null) {
try {
consumerLock.lock();
msgConnector = groupConsumers.get(consumerKey);
if (msgConnector == null) {
msgConnector = Consumer.createJavaConsumerConnector(getConsumerRealConfig(groupId));
groupConsumers.put(consumerKey, msgConnector);
}
} finally {
consumerLock.unlock();
}
}
return msgConnector;
}
示例11: open
import kafka.consumer.Consumer; //导入方法依赖的package包/类
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
logger.info("Opened");
this.collector = collector;
logger.info(" topic = " + kafkaSpoutConfig.kafkaConsumerConfiguration.getTopic());
this.spoutName = String.format("%s-%d", context.getThisComponentId(), context.getThisTaskId());
Properties kafkaProperties =
KafkaConsumerProperties.createKafkaProperties(kafkaSpoutConfig.kafkaConsumerConfiguration);
// Have to use a different consumer.id for each spout so use the storm taskId. Otherwise,
// zookeeper complains about a conflicted ephemeral node when there is more than one spout
// reading from a topic
kafkaProperties.setProperty("consumer.id", String.valueOf(context.getThisTaskId()));
ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProperties);
this.consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
}
示例12: KafkaConsumerSuite
import kafka.consumer.Consumer; //导入方法依赖的package包/类
KafkaConsumerSuite(String zkConnectString, String topic)
{
_topic = topic;
Properties consumeProps = new Properties();
consumeProps.put("zookeeper.connect", zkConnectString);
consumeProps.put("group.id", _topic+"-"+System.nanoTime());
consumeProps.put("zookeeper.session.timeout.ms", "10000");
consumeProps.put("zookeeper.sync.time.ms", "10000");
consumeProps.put("auto.commit.interval.ms", "10000");
consumeProps.put("_consumer.timeout.ms", "10000");
_consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
_consumer.createMessageStreams(ImmutableMap.of(this._topic, 1));
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic);
_stream = streams.get(0);
_iterator = _stream.iterator();
}
示例13: MessageReader
import kafka.consumer.Consumer; //导入方法依赖的package包/类
public MessageReader(SecorConfig config, OffsetTracker offsetTracker) throws
UnknownHostException {
mConfig = config;
mOffsetTracker = offsetTracker;
mConsumerConnector = Consumer.createJavaConsumerConnector(createConsumerConfig());
if (!mConfig.getKafkaTopicBlacklist().isEmpty() && !mConfig.getKafkaTopicFilter().isEmpty()) {
throw new RuntimeException("Topic filter and blacklist cannot be both specified.");
}
TopicFilter topicFilter = !mConfig.getKafkaTopicBlacklist().isEmpty()? new Blacklist(mConfig.getKafkaTopicBlacklist()):
new Whitelist(mConfig.getKafkaTopicFilter());
LOG.debug("Use TopicFilter {}({})", topicFilter.getClass(), topicFilter);
List<KafkaStream<byte[], byte[]>> streams =
mConsumerConnector.createMessageStreamsByFilter(topicFilter);
KafkaStream<byte[], byte[]> stream = streams.get(0);
mIterator = stream.iterator();
mLastAccessTime = new HashMap<TopicPartition, Long>();
StatsUtil.setLabel("secor.kafka.consumer.id", IdUtil.getConsumerId());
mTopicPartitionForgetSeconds = mConfig.getTopicPartitionForgetSeconds();
mCheckMessagesPerSecond = mConfig.getMessagesPerSecond() / mConfig.getConsumerThreads();
mKafkaMessageTimestampFactory = new KafkaMessageTimestampFactory(mConfig.getKafkaMessageTimestampClass());
}
示例14: openKafkaStream
import kafka.consumer.Consumer; //导入方法依赖的package包/类
/**
* 初始化Kafka消费者客户端, 并获取Topic对应的Stream
*/
private void openKafkaStream() {
logger.info("开始初始化Kafka消费客户端");
this.consumer = Consumer.createJavaConsumerConnector(getConsumerConfig());
StringDecoder decoder = new StringDecoder(null);
Map<String, Integer> topicCountMap = Maps.of(topic, 1);
Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,
decoder, decoder);
List<KafkaStream<String, String>> streams = consumerMap.get(topic);
this.stream = streams.get(0);
Assert.notNull(stream);
}
示例15: open
import kafka.consumer.Consumer; //导入方法依赖的package包/类
/**
* Create a Kafka consumer.
*/
@Override
public void open() {
// these consumers use ZooKeeper for commit, offset and segment consumption tracking
// TODO: consider using SimpleConsumer the same way the Hadoop consumer job does to avoid ZK dependency
// TODO: use the task details from TopologyContext in the normal open method
ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProperties);
consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
// consumer with just one thread since the real parallelism is handled by Storm already
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(1));
Map<String, List<KafkaMessageStream>> consumerMap = consumerConnector.createMessageStreams(topicCountMap);
KafkaMessageStream stream = consumerMap.get(topic).get(0);
consumerIterator = stream.iterator();
}