本文整理汇总了Java中kafka.api.OffsetRequest类的典型用法代码示例。如果您正苦于以下问题:Java OffsetRequest类的具体用法?Java OffsetRequest怎么用?Java OffsetRequest使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
OffsetRequest类属于kafka.api包,在下文中一共展示了OffsetRequest类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: commitOffset
import kafka.api.OffsetRequest; //导入依赖的package包/类
private boolean commitOffset(SimpleConsumer consumer, long offset, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, OffsetAndMetadata> requestInfo = new HashMap<TopicAndPartition, OffsetAndMetadata>();
OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(
new OffsetMetadata(offset, OffsetMetadata.NoMetadata()), offset, -1L);
requestInfo.put(topicAndPartition, offsetAndMetadata);
OffsetCommitRequest commitRequest = new OffsetCommitRequest(groupid, requestInfo, correlationId, clientName,
OffsetRequest.CurrentVersion());
OffsetCommitResponse response = null;
while (true) {
try {
logger.debug("partition {} commit offest", partition);
response = consumer.commitOffsets(commitRequest);
if (response != null)
break;
} catch (Exception e) {
logger.error("some error occur when fetch messages", e);
try {
Thread.sleep(EXCEPTION_SLEEP_TIME);
} catch (InterruptedException e1) {
e1.printStackTrace();
}
}
}
return response.hasError();
}
示例2: KafkaSourceProcessor
import kafka.api.OffsetRequest; //导入依赖的package包/类
public KafkaSourceProcessor(UserConfig userConfig) {
String topic = userConfig.getString(KAFKA_TOPIC_PROPERTY).get();
String zookeeperQuorum = userConfig.getString(KAFKA_ZOOKEEPER_PROPERTY).get();
String serverUri = userConfig.getString(KAFKA_URI_PROPERTY).get();
Properties zookeeperProperties = new Properties();
zookeeperProperties.setProperty("zookeeper.connect", zookeeperQuorum);
zookeeperProperties.setProperty("group.id", "gearpump");
// todo what is the default storage on TAP?
zookeeperProperties.setProperty("offsets.storage", "kafka");
KafkaSourceConfig sourceConfig = new KafkaSourceConfig(zookeeperProperties)
.withConsumerStartOffset(OffsetRequest.LatestTime());
Properties kafkaProperties = new Properties();
kafkaProperties.setProperty("bootstrap.servers", serverUri);
KafkaStorageFactory offsetStorageFactory = new KafkaStorageFactory(sourceConfig.consumerProps(), kafkaProperties);
kafkaSource = new KafkaSource(topic, sourceConfig.consumerProps(), offsetStorageFactory);
context = ClientContext.apply();
}
示例3: initKafkaProperties
import kafka.api.OffsetRequest; //导入依赖的package包/类
private Properties initKafkaProperties(StreamingConfig conf)
throws StreamingException
{
Properties kafkaProperties = new Properties();
kafkaProperties.put(KafkaConfig.KAFKA_CON_ZK_CONNECT,
conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_ZOOKEEPERS));
kafkaProperties.put(KafkaConfig.KAFKA_GROUP_ID, conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_GROUPID));
kafkaProperties.put(KafkaConfig.KAFKA_SERIAL_CLASS,
conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_MESSAGESERIALIZERCLASS));
kafkaProperties.put(KafkaConfig.KAFKA_SESSION_TIME,
conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_ZKSESSIONTIMEOUT));
kafkaProperties.put(KafkaConfig.KAFKA_SYNC_TIME,
conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_ZKSYNCTIME));
if (conf.getBooleanValue(StreamingConfig.OPERATOR_KAFKA_READ_FROMBEGINNING))
{
kafkaProperties.put(KafkaConfig.KAFKA_OFFSET_RESET, OffsetRequest.SmallestTimeString());
}
else
{
kafkaProperties.put(KafkaConfig.KAFKA_OFFSET_RESET, OffsetRequest.LargestTimeString());
}
return kafkaProperties;
}
示例4: verifyMessage
import kafka.api.OffsetRequest; //导入依赖的package包/类
private boolean verifyMessage(String key, String message) {
long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
Message kafkaMessage = messageAndOffset.message();
ByteBuffer messageKeyBuffer = kafkaMessage.key();
String keyString = null;
String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
if (messageKeyBuffer != null) {
keyString = new String(Utils.toByteArray(messageKeyBuffer));
}
assertEquals(key, keyString);
assertEquals(message, messageString);
return true;
}
示例5: getLastOffset
import kafka.api.OffsetRequest; //导入依赖的package包/类
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime,
String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo,
kafka.api.OffsetRequest.CurrentVersion(), clientName);
kafka.javaapi.OffsetResponse response = consumer.getOffsetsBefore(request);
if (response.hasError()) {
LOG.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
}
示例6: generateTsOffsetsNoPartitions
import kafka.api.OffsetRequest; //导入依赖的package包/类
private List<long[]> generateTsOffsetsNoPartitions(final long scanRange,
final int partitionNum)
{
// we want to increase the number of splits so that it can achieve maximum parallelism
// the idle number would be splits == cores
// TODO make this configerable
final int numHosts = 40;
final int numCorePerHosts = 32;
final int splitsWanted = numHosts * numCorePerHosts;
final long start = System.currentTimeMillis() - scanRange;
long secondsPerSplit = scanRange / (splitsWanted / partitionNum);
List<long[]> offsetList = new ArrayList<long[]>();
for (int i = 0; i < splitsWanted / partitionNum; ++i) {
long[] offsets = new long[2];
offsets[0] = start + secondsPerSplit * i;
offsets[1] = start + secondsPerSplit * (i + 1);
offsetList.add(offsets);
}
offsetList.get(offsetList.size() - 1)[1] = OffsetRequest.LatestTime();
return offsetList;
}
示例7: createKafkaSpout
import kafka.api.OffsetRequest; //导入依赖的package包/类
/**
* Creates Kafka spout.
*
* @param topic Kafka topic
* @return {@link KafkaSpout}
*/
protected org.apache.storm.kafka.KafkaSpout createKafkaSpout(String topic, String spoutId) {
String zkRoot = String.format("/%s/%s", getTopologyName(), topic);
ZkHosts hosts = new ZkHosts(config.getZookeeperHosts());
SpoutConfig cfg = new SpoutConfig(hosts, topic, zkRoot, spoutId);
cfg.startOffsetTime = OffsetRequest.EarliestTime();
cfg.scheme = new SchemeAsMultiScheme(new StringScheme());
cfg.bufferSizeBytes = 1024 * 1024 * 4;
cfg.fetchSizeBytes = 1024 * 1024 * 4;
return new org.apache.storm.kafka.KafkaSpout(cfg);
}
示例8: fetchNextOffset
import kafka.api.OffsetRequest; //导入依赖的package包/类
private long fetchNextOffset(SimpleConsumer consumer, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
List<TopicAndPartition> requestInfo = new ArrayList<>();
requestInfo.add(topicAndPartition);
OffsetFetchRequest fetchRequest = new OffsetFetchRequest(groupid, requestInfo,
kafka.api.OffsetRequest.CurrentVersion(), correlationId, clientName);
OffsetFetchResponse response = null;
while (true) {
try {
logger.debug("partition {} fetch offest request", partition);
response = consumer.fetchOffsets(fetchRequest);
if (response != null)
break;
} catch (Exception e) {
logger.error("some error occur when fetch messages", e);
try {
Thread.sleep(EXCEPTION_SLEEP_TIME);
} catch (InterruptedException e1) {
e1.printStackTrace();
}
}
}
OffsetMetadataAndError offset = response.offsets().get(topicAndPartition);
if (offset.error() == 0)
return offset.offset();
else
return 0;
}
示例9: requestAndSetEarliestOrLatestOffsetsFromKafka
import kafka.api.OffsetRequest; //导入依赖的package包/类
/**
* For a set of partitions, if a partition is set with the special offsets {@link OffsetRequest#EarliestTime()}
* or {@link OffsetRequest#LatestTime()}, replace them with actual offsets requested via a Kafka consumer.
*
* @param consumer The consumer connected to lead broker
* @param partitions The list of partitions we need offsets for
*/
private static void requestAndSetEarliestOrLatestOffsetsFromKafka(
SimpleConsumer consumer,
List<KafkaTopicPartitionState<TopicAndPartition>> partitions) throws Exception {
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) {
if (part.getOffset() == OffsetRequest.EarliestTime() || part.getOffset() == OffsetRequest.LatestTime()) {
requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(part.getOffset(), 1));
}
}
requestAndSetOffsetsFromKafka(consumer, partitions, requestInfo);
}
示例10: getInvalidOffsetBehavior
import kafka.api.OffsetRequest; //导入依赖的package包/类
/**
* Retrieve the behaviour of "auto.offset.reset" from the config properties.
* A partition needs to fallback to "auto.offset.reset" as default offset when
* we can't find offsets in ZK to start from in {@link StartupMode#GROUP_OFFSETS} startup mode.
*
* @param config kafka consumer properties
* @return either OffsetRequest.LatestTime() or OffsetRequest.EarliestTime()
*/
private static long getInvalidOffsetBehavior(Properties config) {
final String val = config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "largest");
if (val.equals("largest") || val.equals("latest")) { // largest is kafka 0.8, latest is kafka 0.9
return OffsetRequest.LatestTime();
} else {
return OffsetRequest.EarliestTime();
}
}
示例11: getInvalidOffsetBehavior
import kafka.api.OffsetRequest; //导入依赖的package包/类
private static long getInvalidOffsetBehavior(Properties config) {
final String val = config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "largest");
if (val.equals("none")) {
throw new IllegalArgumentException("Cannot use '" + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG
+ "' value 'none'. Possible values: 'latest', 'largest', or 'earliest'.");
}
else if (val.equals("largest") || val.equals("latest")) { // largest is kafka 0.8, latest is kafka 0.9
return OffsetRequest.LatestTime();
} else {
return OffsetRequest.EarliestTime();
}
}
示例12: main
import kafka.api.OffsetRequest; //导入依赖的package包/类
public static void main(String[] args) throws ClassNotFoundException, SQLException, UnsupportedEncodingException
{
Class.forName("cn.edu.ruc.kafka.Driver");
Properties info = new Properties();
info.setProperty("role","consumer");
info.setProperty("client.id", "12345");
Connection conn = DriverManager.getConnection("kafka://log04:9092/test", info);
Statement st = conn.createStatement();
ResultSet rs = st.executeQuery(Query.GET_LEADER_PARTITIONS_METADATA);
while (rs.next())
{
System.out.println(((Broker)rs.getObject("leader")).host() + ", " + rs.getInt("partitionId") + ", " + rs.getInt("sizeInBytes"));
ResultSet rs1 = st.executeQuery(Query.GET_LAST_OFFSET(rs.getInt("partitionId"), OffsetRequest.LatestTime()));
while (rs1.next())
{
System.out.println("\t" + rs1.getLong("offset"));
long offset = rs1.getLong("offset");
while (true)
{
ResultSet rs2 = st.executeQuery(Query.GET_MESSAGE_FROM_PARTITION(rs.getInt("partitionId"), offset));
while (rs2.next())
{
offset = rs2.getLong("nextOffset");
System.out.println("\t\t" + rs2.getLong("offset") + ", " + rs2.getLong("nextOffset") + ", " + new String(rs2.getBytes("message"), "UTF-8"));
}
}
}
}
st.close();
conn.close();
}
示例13: getTopicLogSize
import kafka.api.OffsetRequest; //导入依赖的package包/类
private long getTopicLogSize(String topic, int pid) {
Option<Object> o = ZkUtils.getLeaderForPartition(zkClient, topic, pid);
if (o.isEmpty() || o.get() == null) {
log.error("No broker for partition %s - %s", topic, pid);
return 0;
}
Integer leaderId = Int.unbox(o.get());
SimpleConsumer consumer = consumerMap.get(leaderId);
if (consumer == null) {
consumer = createSimpleConsumer(leaderId);
}
// createSimpleConsumer may fail.
if (consumer == null) {
return 0;
}
consumerMap.put(leaderId, consumer);
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, pid);
PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1);
OffsetRequest request = new OffsetRequest(
new Map1<TopicAndPartition, PartitionOffsetRequestInfo>(topicAndPartition, requestInfo),
0,
Request.OrdinaryConsumerId()
);
OffsetResponse response = consumer.getOffsetsBefore(request);
PartitionOffsetsResponse offsetsResponse = response.partitionErrorAndOffsets().get(topicAndPartition).get();
return scala.Long.unbox(offsetsResponse.offsets().head());
}
示例14: brokerIsDown
import kafka.api.OffsetRequest; //导入依赖的package包/类
@Test(expected = FailedFetchException.class)
public void brokerIsDown() throws Exception {
int port = broker.getPort();
broker.shutdown();
SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", port, 100, 1024, "testClient");
KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), OffsetRequest.LatestTime());
}
示例15: fetchMessage
import kafka.api.OffsetRequest; //导入依赖的package包/类
@Test
public void fetchMessage() throws Exception {
String value = "test";
createTopicAndSendMessage(value);
long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
assertThat(message, is(equalTo(value)));
}