当前位置: 首页>>代码示例>>Java>>正文


Java RecordMetadata类代码示例

本文整理汇总了Java中org.apache.kafka.clients.producer.RecordMetadata的典型用法代码示例。如果您正苦于以下问题:Java RecordMetadata类的具体用法?Java RecordMetadata怎么用?Java RecordMetadata使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


RecordMetadata类属于org.apache.kafka.clients.producer包,在下文中一共展示了RecordMetadata类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: send

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
  /*
  // Create wrappedRecord because headers can be read only in record (if record is sent second time)
  ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(),
      record.partition(),
      record.timestamp(),
      record.key(),
      record.value(),
      record.headers());
  */

  try (Scope scope = buildAndInjectSpan(record)) {
    Callback wrappedCallback = new TracingCallback(callback, scope);
    return producer.send(record, wrappedCallback);
  }
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:18,代码来源:TracingKafkaProducer.java

示例2: testBatchCannotCompleteTwice

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@Test
public void testBatchCannotCompleteTwice() throws Exception {
    ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now);
    FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now);
    batch.done(500L, 10L, null);

    try {
        batch.done(1000L, 20L, null);
        fail("Expected exception from done");
    } catch (IllegalStateException e) {
        // expected
    }

    RecordMetadata recordMetadata = future.get();
    assertEquals(500L, recordMetadata.offset());
    assertEquals(10L, recordMetadata.timestamp());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:ProducerBatchTest.java

示例3: publish

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
public void publish(BrokerStats brokerStats) throws IOException {
  try {
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    BinaryEncoder binaryEncoder = avroEncoderFactory.binaryEncoder(stream, null);

    avroEventWriter.write(brokerStats, binaryEncoder);
    binaryEncoder.flush();
    IOUtils.closeQuietly(stream);

    String key = brokerStats.getName() + "_" + System.currentTimeMillis();
    int numPartitions = kafkaProducer.partitionsFor(destTopic).size();
    int partition = brokerStats.getId() % numPartitions;

    Future<RecordMetadata> future = kafkaProducer.send(
        new ProducerRecord(destTopic, partition, key.getBytes(), stream.toByteArray()));
    future.get();

    OpenTsdbMetricConverter.incr("kafka.stats.collector.success", 1, "host=" + HOSTNAME);
  } catch (Exception e) {
    LOG.error("Failure in publish stats", e);
    OpenTsdbMetricConverter.incr("kafka.stats.collector.failure", 1, "host=" + HOSTNAME);
    throw new RuntimeException("Avro serialization failure", e);
  }
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:25,代码来源:KafkaAvroPublisher.java

示例4: send

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@Override
public void send(Long k, byte[] v) {
    KafkaProducer<Long, byte[]> p = getWorker();
    p.initTransactions();
    p.beginTransaction();
    Future<RecordMetadata> res = worker.send(new ProducerRecord<Long, byte[]>(topic, k, v));
    RecordMetadata record;
    try {
        record = res.get();
        offsets.clear();
        offsets.put(new TopicPartition(topic, record.partition()), new OffsetAndMetadata(record.offset()));
        p.sendOffsetsToTransaction(offsets, MallConstants.ORDER_GROUP);
        p.commitTransaction();
    } catch (InterruptedException | ExecutionException e) {
        p.abortTransaction();
    }
}
 
开发者ID:jiumao-org,项目名称:wechat-mall,代码行数:18,代码来源:OrderProducer.java

示例5: emit

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
/**
 * Emit messages to the relevant kafka topics
 * @return true if the messages were successfully delivered to the kafka topics; false otherwise
 */
public Boolean emit() {

    try {
        final List<Future<RecordMetadata>> results = new ArrayList<>();

        if (async) {
            results.add(producer.send(new ProducerRecord<>(TOPIC_CACHE, identifier.getIRIString(),
                            serialize(dataset))));
        }

        // Update the containment triples of the parent resource if this is a delete or create operation
        parent.ifPresent(emitToParent(identifier, dataset, results));

        for (final Future<RecordMetadata> result : results) {
            final RecordMetadata res = result.get();
            LOGGER.debug("Send record to topic: {}, {}", res, res.timestamp());
        }

        return true;
    } catch (final InterruptedException | ExecutionException ex) {
        LOGGER.error("Error sending record to kafka topic: {}", ex.getMessage());
        return false;
    }
}
 
开发者ID:trellis-ldp,项目名称:trellis-rosid,代码行数:29,代码来源:EventProducer.java

示例6: sendMessage

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
public void sendMessage(MetaVersion ver, MetaWrapper newMeta, MetaCompareResult result) {
    ControlMessage message = new ControlMessage(System.currentTimeMillis(), ControlType.G_META_SYNC_WARNING.toString(), "dbus-appender");

    message.addPayload("datasource", GlobalCache.getDatasource().getDsName());
    message.addPayload("schema", ver.getSchema());
    message.addPayload("tableId", ver.getTableId());
    message.addPayload("table", ver.getTable());
    message.addPayload("before", ver.getMeta());
    message.addPayload("after", newMeta);
    message.addPayload("compare-result", JSON.toJSON(result));
    message.addPayload("version", ver.getVersion());

    String topic = PropertiesHolder.getProperties(Constants.Properties.CONFIGURE, Constants.ConfigureKey.GLOBAL_EVENT_TOPIC);
    ProducerRecord<String, String> record = new ProducerRecord<>(topic, message.getType(), message.toJSONString());
    Future<RecordMetadata> future = producer.send(record, (metadata, exception) -> {
        if (exception != null) {
            logger.error("Send global event error.{}", exception.getMessage());
        }
    });
    try {
        future.get(10000, TimeUnit.MILLISECONDS);
    } catch (Exception e) {
        logger.error(e.getMessage(), e);
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:26,代码来源:MetaEventWarningSender.java

示例7: sendAckInfoToCtrlTopic

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
private static void sendAckInfoToCtrlTopic(String dataSourceInfo, String completedTime, String pullStatus) {
    try {
        // 在源dataSourceInfo的基础上,更新全量拉取相关信息。然后发回src topic
        JSONObject jsonObj = JSONObject.parseObject(dataSourceInfo);
        jsonObj.put(DataPullConstants.FullPullInterfaceJson.FROM_KEY, DataPullConstants.FullPullInterfaceJson.FROM_VALUE);
        jsonObj.put(DataPullConstants.FullPullInterfaceJson.TYPE_KEY, DataPullConstants.FullPullInterfaceJson.TYPE_VALUE);
        // notifyFullPullRequestor
        JSONObject payloadObj = jsonObj.getJSONObject(DataPullConstants.FullPullInterfaceJson.PAYLOAD_KEY);
        // 完成时间
        payloadObj.put(DataPullConstants.FullPullInterfaceJson.COMPLETE_TIME_KEY, completedTime);
        // 拉取是否成功标志位
        payloadObj.put(DataPullConstants.FullPullInterfaceJson.DATA_STATUS_KEY, pullStatus);
        jsonObj.put(DataPullConstants.FullPullInterfaceJson.PAYLOAD_KEY, payloadObj);
        String ctrlTopic = getFullPullProperties(Constants.ZkTopoConfForFullPull.COMMON_CONFIG, true)
            .getProperty(Constants.ZkTopoConfForFullPull.FULL_PULL_SRC_TOPIC);
        Producer producer = DbusHelper
                .getProducer(getFullPullProperties(Constants.ZkTopoConfForFullPull.BYTE_PRODUCER_CONFIG, true));
        ProducerRecord record = new ProducerRecord<>(ctrlTopic, DataPullConstants.FullPullInterfaceJson.TYPE_VALUE, jsonObj.toString().getBytes());
        Future<RecordMetadata> future = producer.send(record);
        RecordMetadata meta = future.get();
    }
    catch (Exception e) {
        Log.error("Error occurred when report full data pulling status.", e);
        throw new RuntimeException(e);
    }
}
 
开发者ID:BriData,项目名称:DBus,代码行数:27,代码来源:FullPullHelper.java

示例8: sendMessageToKafka

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void sendMessageToKafka(String key, DbusMessage dbusMessage, AtomicLong sendCnt, AtomicLong recvCnt, AtomicBoolean isError) throws Exception{
    if(stringProducer == null) {
        throw new Exception("producer is null, can't send to kafka!");
    }

    ProducerRecord record = new ProducerRecord<>(resultTopic, key, dbusMessage.toString());
    sendCnt.getAndIncrement();
    stringProducer.send(record, new Callback() {
        public void onCompletion(RecordMetadata metadata, Exception e) {
            if (e != null) {
                e.printStackTrace();
                isError.set(true);
            }else{
                recvCnt.getAndIncrement();
            }
        }
    });
}
 
开发者ID:BriData,项目名称:DBus,代码行数:20,代码来源:PagedBatchDataFetchingBolt.java

示例9: postTopicData

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@POST
@Path("/topics/{topicName}")
public Response postTopicData(@PathParam("topicName") String topicName,
                              String data) {
    try {
        ImmutableMap<String, String> params = parseUrlEncodedParams(data);
        String key = params.get("key");
        String value = params.get("value");

        if (key == null || value == null) {
            return responseFactory.createBadRequestResponse("One of the required post params 'key' " +
                    "or 'value' are missing");
        }

        if (!doesTopicExist(topicName)) {
            return responseFactory.createNotFoundResponse(String.format("No topic exists with the name [%s]", topicName));
        }

        RecordMetadata metadata = kafkaProducerWrapper.publish(key, value, topicName);
        return responseFactory.createOkResponse(metadata);
    } catch (Exception e) {
        return responseFactory.createServerErrorResponse(e);
    }
}
 
开发者ID:enthusiast94,项目名称:kafka-visualizer,代码行数:25,代码来源:RestResource.java

示例10: send

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@Override
public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event,
                              final FailedDeliveryCallback<E> failedDeliveryCallback) {
    try {
        producer.send(record, new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null) {
                    failedDeliveryCallback.onFailedDelivery(event, exception);
                }
            }
        });
        return true;
    } catch (BufferExhaustedException e) {
        failedDeliveryCallback.onFailedDelivery(event, e);
        return false;
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:19,代码来源:AsynchronousDeliveryStrategy.java

示例11: main

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
public static void main(String[] args) {

        Map<String, Object> config = new HashMap<String, Object>();
        config.put("partitioner.class", "com.wngn.kafka.SimpleKeyPartition");
        LatestProducer producer = LatestProducer.getInstance(ProducerConstants.TOPIC_KAFKA_TEST, config);
        ProducerRecord<String, String> record = null;
        long index = 0L;
        boolean controller = true;
        while (controller) {
            controller = false;
            index++;
            System.out.println(index + "------------");
            try {
                String message = "message_" + index;
                RecordMetadata recordMetadata = producer.sendWithSync("1", message);
                System.out.format("PARTITION: %d OFFSET: %d\n", recordMetadata.partition(), recordMetadata.offset());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
        producer.close();
    }
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:23,代码来源:LatestProducer.java

示例12: emitToParent

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
private Consumer<Resource> emitToParent(final IRI identifier, final Dataset dataset,
        final List<Future<RecordMetadata>> results) {
    final Boolean isCreate = dataset.contains(of(PreferAudit), null, type, Create);
    final Boolean isDelete = dataset.contains(of(PreferAudit), null, type, Delete);
    final String containmentTopic = isDelete ? TOPIC_LDP_CONTAINMENT_DELETE : TOPIC_LDP_CONTAINMENT_ADD;
    final String membershipTopic = isDelete ? TOPIC_LDP_MEMBERSHIP_DELETE : TOPIC_LDP_MEMBERSHIP_ADD;

    return container -> {
        if (isDelete || isCreate) {
            try {
                LOGGER.info("Sending to parent: {}", container.getIdentifier());
                results.add(producer.send(buildContainmentMessage(containmentTopic, identifier, container,
                                dataset)));

                buildMembershipMessage(membershipTopic, identifier, container, dataset).ifPresent(msg -> {
                        LOGGER.info("Sending to member resource: {}", container.getMembershipResource());
                        results.add(producer.send(msg));
                });
            } catch (final Exception ex) {
                LOGGER.error("Error processing dataset: {}", ex.getMessage());
            }
        }
    };
}
 
开发者ID:trellis-ldp,项目名称:trellis-rosid,代码行数:25,代码来源:EventProducer.java

示例13: main

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
public static void main(final String[] args) {
    Properties producerProps = new Properties();
    producerProps.put("bootstrap.servers", "localhost:9092");
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("acks", "all");
    producerProps.put("retries", 1);
    producerProps.put("batch.size", 20000);
    producerProps.put("linger.ms", 1);
    producerProps.put("buffer.memory", 24568545);
    KafkaProducer<String, String> producer = new KafkaProducer<String, String>(producerProps);

    for (int i = 0; i < 2000; i++) {
        ProducerRecord data = new ProducerRecord<String, String>("test1", "Hello this is record " + i);
        Future<RecordMetadata> recordMetadata = producer.send(data);
    }
    producer.close();
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:19,代码来源:DemoProducer.java

示例14: resendFailedProduceRequestAfterAbortableError

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@Test
public void resendFailedProduceRequestAfterAbortableError() throws Exception {
    final long pid = 13131L;
    final short epoch = 1;
    doInitTransactions(pid, epoch);
    transactionManager.beginTransaction();

    transactionManager.maybeAddPartitionToTransaction(tp0);

    Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(),
            "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future;

    prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, pid);
    prepareProduceResponse(Errors.NOT_LEADER_FOR_PARTITION, pid, epoch);
    sender.run(time.milliseconds()); // AddPartitions
    sender.run(time.milliseconds()); // Produce

    assertFalse(responseFuture.isDone());

    transactionManager.transitionToAbortableError(new KafkaException());
    prepareProduceResponse(Errors.NONE, pid, epoch);

    sender.run(time.milliseconds());
    assertTrue(responseFuture.isDone());
    assertNotNull(responseFuture.get());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:TransactionManagerTest.java

示例15: testSimple

import org.apache.kafka.clients.producer.RecordMetadata; //导入依赖的package包/类
@Test
public void testSimple() throws Exception {
    long offset = 0;
    Future<RecordMetadata> future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
    sender.run(time.milliseconds()); // connect
    sender.run(time.milliseconds()); // send produce request
    assertEquals("We should have a single produce request in flight.", 1, client.inFlightRequestCount());
    assertTrue(client.hasInFlightRequests());
    client.respond(produceResponse(tp0, offset, Errors.NONE, 0));
    sender.run(time.milliseconds());
    assertEquals("All requests completed.", 0, client.inFlightRequestCount());
    assertFalse(client.hasInFlightRequests());
    sender.run(time.milliseconds());
    assertTrue("Request should be completed", future.isDone());
    assertEquals(offset, future.get().offset());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:SenderTest.java


注:本文中的org.apache.kafka.clients.producer.RecordMetadata类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。