本文整理匯總了Java中org.apache.kafka.clients.producer.Producer類的典型用法代碼示例。如果您正苦於以下問題:Java Producer類的具體用法?Java Producer怎麽用?Java Producer使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Producer類屬於org.apache.kafka.clients.producer包,在下文中一共展示了Producer類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: start
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
public void start() throws InterruptedException {
RandomGenerator random = RandomManager.getRandom();
Properties props = ConfigUtils.keyValueToProperties(
"bootstrap.servers", "localhost:" + kafkaPort,
"key.serializer", "org.apache.kafka.common.serialization.StringSerializer",
"value.serializer", "org.apache.kafka.common.serialization.StringSerializer",
"compression.type", "gzip",
"batch.size", 0,
"acks", 1,
"max.request.size", 1 << 26 // TODO
);
try (Producer<String,String> producer = new KafkaProducer<>(props)) {
for (int i = 0; i < howMany; i++) {
Pair<String,String> datum = datumGenerator.generate(i, random);
ProducerRecord<String,String> record =
new ProducerRecord<>(topic, datum.getFirst(), datum.getSecond());
producer.send(record);
log.debug("Sent datum {} = {}", record.key(), record.value());
if (intervalMsec > 0) {
Thread.sleep(intervalMsec);
}
}
}
}
示例2: nullKey
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
@Test
public void nullKey() throws Exception {
Producer<Integer, String> producer = createProducer();
ProducerRecord<Integer, String> record = new ProducerRecord<>("messages", "test");
producer.send(record);
final Map<String, Object> consumerProps = KafkaTestUtils
.consumerProps("sampleRawConsumer", "false", embeddedKafka);
consumerProps.put("auto.offset.reset", "earliest");
final CountDownLatch latch = new CountDownLatch(1);
createConsumer(latch, null);
producer.close();
}
示例3: setUp
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
@Before
public void setUp() {
super.setUp();
Properties props = new Properties();
props.setProperty(ProducerConfig.ACKS_CONFIG, "-1");
AtomicInteger failed = new AtomicInteger(0);
try (Producer<String, String> producer = createProducer(props)) {
for (int i = 0; i < 10; i++) {
producer.send(new ProducerRecord<>("TestTopic", Integer.toString(i)), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
failed.incrementAndGet();
}
}
});
}
}
assertEquals(0, failed.get());
}
示例4: AbstractResourceService
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
/**
* Create an AbstractResourceService with the given producer
* @param baseUrl the base URL
* @param producer the kafka producer
* @param curator the zookeeper curator
* @param notifications the event service
* @param idSupplier a supplier of new identifiers
* @param async write cached resources asynchronously if true, synchronously if false
*/
public AbstractResourceService(final String baseUrl, final Producer<String, String> producer,
final CuratorFramework curator, final EventService notifications, final Supplier<String> idSupplier,
final Boolean async) {
this.baseUrl = baseUrl;
this.notifications = notifications;
this.async = async;
this.idSupplier = idSupplier;
this.producer = producer;
this.curator = curator;
try {
this.curator.createContainers(ZNODE_COORDINATION);
} catch (final Exception ex) {
LOGGER.error("Could not create zk session node: {}", ex.getMessage());
throw new RuntimeTrellisException(ex);
}
}
示例5: sendAckInfoToCtrlTopic
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
private static void sendAckInfoToCtrlTopic(String dataSourceInfo, String completedTime, String pullStatus) {
try {
// 在源dataSourceInfo的基礎上,更新全量拉取相關信息。然後發回src topic
JSONObject jsonObj = JSONObject.parseObject(dataSourceInfo);
jsonObj.put(DataPullConstants.FullPullInterfaceJson.FROM_KEY, DataPullConstants.FullPullInterfaceJson.FROM_VALUE);
jsonObj.put(DataPullConstants.FullPullInterfaceJson.TYPE_KEY, DataPullConstants.FullPullInterfaceJson.TYPE_VALUE);
// notifyFullPullRequestor
JSONObject payloadObj = jsonObj.getJSONObject(DataPullConstants.FullPullInterfaceJson.PAYLOAD_KEY);
// 完成時間
payloadObj.put(DataPullConstants.FullPullInterfaceJson.COMPLETE_TIME_KEY, completedTime);
// 拉取是否成功標誌位
payloadObj.put(DataPullConstants.FullPullInterfaceJson.DATA_STATUS_KEY, pullStatus);
jsonObj.put(DataPullConstants.FullPullInterfaceJson.PAYLOAD_KEY, payloadObj);
String ctrlTopic = getFullPullProperties(Constants.ZkTopoConfForFullPull.COMMON_CONFIG, true)
.getProperty(Constants.ZkTopoConfForFullPull.FULL_PULL_SRC_TOPIC);
Producer producer = DbusHelper
.getProducer(getFullPullProperties(Constants.ZkTopoConfForFullPull.BYTE_PRODUCER_CONFIG, true));
ProducerRecord record = new ProducerRecord<>(ctrlTopic, DataPullConstants.FullPullInterfaceJson.TYPE_VALUE, jsonObj.toString().getBytes());
Future<RecordMetadata> future = producer.send(record);
RecordMetadata meta = future.get();
}
catch (Exception e) {
Log.error("Error occurred when report full data pulling status.", e);
throw new RuntimeException(e);
}
}
示例6: loadRunningConf
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
private void loadRunningConf(String reloadMsgJson) {
String notifyEvtName = reloadMsgJson == null ? "loaded" : "reloaded";
String loadResultMsg = null;
try {
this.confMap = FullPullHelper.loadConfProps(zkconnect, topologyId, zkTopoRoot, null);
this.commonProps = (Properties) confMap.get(FullPullHelper.RUNNING_CONF_KEY_COMMON);
this.dsName = commonProps.getProperty(Constants.ZkTopoConfForFullPull.DATASOURCE_NAME);
this.byteProducer = (Producer) confMap.get(FullPullHelper.RUNNING_CONF_KEY_BYTE_PRODUCER);
this.zkService = (ZkService) confMap.get(FullPullHelper.RUNNING_CONF_KEY_ZK_SERVICE);
loadResultMsg = "Running Config is " + notifyEvtName + " successfully for DataShardsSplittingBolt!";
LOG.info(loadResultMsg);
} catch (Exception e) {
loadResultMsg = e.getMessage();
LOG.error(notifyEvtName + "ing running configuration encountered Exception!", loadResultMsg);
} finally {
if (reloadMsgJson != null) {
FullPullHelper.saveReloadStatus(reloadMsgJson, "splitting-bolt", false, zkconnect);
}
}
}
示例7: main
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.77.7:9094,192.168.77.7:9093,192.168.77.7:9092");
props.put("retries", 0);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
for(int i = 0; i < 10; i++) {
producer.send(new ProducerRecord<>("test", Long.toString(System.currentTimeMillis()), Integer.toString(i)));
System.out.println("Sent message: " + i);
}
producer.close();
}
示例8: publishDataToKafka
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
/**
* Publish 'numMessages' arbitrary events from live users with the provided delay, to a
* Kafka topic.
*/
public static void publishDataToKafka(int numMessages, int delayInMillis)
throws IOException {
Producer<String, String> producer = new KafkaProducer<>(kafkaProps);
for (int i = 0; i < Math.max(1, numMessages); i++) {
Long currTime = System.currentTimeMillis();
String message = generateEvent(currTime, delayInMillis);
producer.send(new ProducerRecord<String, String>("game", null, message)); //TODO(fjp): Generalize
// TODO(fjp): How do we get late data working?
// if (delayInMillis != 0) {
// System.out.println(pubsubMessage.getAttributes());
// System.out.println("late data for: " + message);
// }
// pubsubMessages.add(pubsubMessage);
}
producer.close();
}
示例9: TestStreamTask
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
TestStreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final Producer<byte[], byte[]> producer,
final Consumer<byte[], byte[]> restoreConsumer,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory) {
super(id,
applicationId,
partitions,
topology,
consumer,
new StoreChangelogReader(restoreConsumer, Time.SYSTEM, 5000),
config,
metrics,
stateDirectory,
null,
new MockTime(),
producer);
}
示例10: FileResourceService
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
/**
* Create a File-based repository service
* @param partitionData the partition data configuration
* @param partitionUrls the partition URL configuration
* @param curator the curator framework
* @param producer the kafka producer
* @param notifications the notification service
* @param idSupplier an identifier supplier for new resources
* @param async generate cached resources asynchronously if true, synchonously if false
* @throws IOException if the directory is not writable
*/
public FileResourceService(final Map<String, String> partitionData, final Map<String, String> partitionUrls,
final CuratorFramework curator, final Producer<String, String> producer, final EventService notifications,
final Supplier<String> idSupplier, final Boolean async) throws IOException {
super(partitionUrls, producer, curator, notifications, idSupplier, async);
requireNonNull(partitionData, "partition data configuration may not be null!");
RESERVED_PARTITION_NAMES.stream().filter(partitionData::containsKey).findAny().ifPresent(name -> {
throw new IllegalArgumentException("Invalid partition name: " + name);
});
this.partitionData = partitionData;
init();
}
示例11: sendStringMessage
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
public static void sendStringMessage() throws Exception{
Properties props = new Properties();
props.put("bootstrap.servers", servers);
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props);
//沒有任何分區,默認1個分區,發送消息
int i=0;
while(i<1000){
Thread.sleep(1000L);
String message = "zhangsan"+i;
producer.send(new ProducerRecord<>("NL_U_APP_ALARM_APP_STRING",message));
i++;
producer.flush();
}
producer.close();
}
示例12: main
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ACKS_CONFIG, "all");
props.put(RETRIES_CONFIG, 0);
props.put(BATCH_SIZE_CONFIG, 16384);
props.put(LINGER_MS_CONFIG, 0);
props.put(BUFFER_MEMORY_CONFIG, 33554432);
props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongSerializer");
props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
org.apache.kafka.clients.producer.Producer<Long, String> producer = new KafkaProducer<>(props);
System.out.println("Start sending!");
for(int i = 1; i <= 12; i++) {
producer.send(new ProducerRecord<>("produktion", round(random() * 6) + 1, "Message: " + i));
}
System.out.println("done!");
producer.close();
}
示例13: getTransporter
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
/**
* Prepare Transporter for production based on Delivery type {@link DeliveryType}
* <p/>
* Provide {@link Producer} to transporter that will be used to communicate and
* send data to kafka broker.
*
* @param deliveryType
* @param producer
* @return
*/
public static Transporter getTransporter(DeliveryType deliveryType, Producer<byte[], byte[]> producer) {
Transporter transporter = null;
switch (deliveryType) {
case NORMAL:
transporter = new NormalTransporter(producer);
break;
case YIElD:
transporter = new YieldTransporter(producer);
break;
default:
transporter = new NormalTransporter(producer);
break;
}
return transporter;
}
示例14: afterPropertiesSet
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
/** {@inheritDoc} */
@Override public void afterPropertiesSet() throws Exception {
bindings.put(Exporter.class, FileExporter.class);
bindings.put(Serializer.class, JavaSerializer.class);
bindings.put(KeyValueManager.class, KeyValueManagerImpl.class);
bindings.put(MetadataProvider.class, MetadataProviderImpl.class);
bindings.put(MetadataManager.class, InMemoryMetadataManager.class);
bindings.put(KeyValueProvider.class, QuasiKafkaKeyValueProvider.class);
bindings.put(KeyValueReader.class, SnapshotAwareKeyValueReaderListener.class);
bindings.put(IdSequencer.class, InMemoryIdSequencer.class);
if (producer != null) {
factories.put(Producer.class, factoryOf((Serializable)producer));
} else {
factories.put(Producer.class, producerFactory);
}
List classes = Collections.singletonList(SnapshotAwareKeyValueReaderListener.class);
factories.put(List.class, new Injection.ListOf<>(classes));
}
示例15: main
import org.apache.kafka.clients.producer.Producer; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
// set producer properties
Properties prop = PropertyFileReader.readPropertyFile();
Properties properties = new Properties();
properties.put("bootstrap.servers", prop.getProperty("kafka.bootstrap.servers"));
properties.put("acks", prop.getProperty("kafka.acks"));
properties.put("retries",prop.getProperty("kafka.retries"));
properties.put("batch.size", prop.getProperty("kafka.batch.size"));
properties.put("linger.ms", prop.getProperty("kafka.linger.ms"));
properties.put("max.request.size", prop.getProperty("kafka.max.request.size"));
properties.put("compression.type", prop.getProperty("kafka.compression.type"));
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// generate event
Producer<String, String> producer = new KafkaProducer<String, String>(properties);
generateIoTEvent(producer,prop.getProperty("kafka.topic"),prop.getProperty("camera.id"),prop.getProperty("camera.url"));
}