当前位置: 首页>>代码示例>>Java>>正文


Java SourceRecord类代码示例

本文整理汇总了Java中org.apache.kafka.connect.source.SourceRecord的典型用法代码示例。如果您正苦于以下问题:Java SourceRecord类的具体用法?Java SourceRecord怎么用?Java SourceRecord使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SourceRecord类属于org.apache.kafka.connect.source包,在下文中一共展示了SourceRecord类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSendRecordsPropagatesTimestamp

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void testSendRecordsPropagatesTimestamp() throws Exception {
    final Long timestamp = System.currentTimeMillis();

    createWorkerTask();

    List<SourceRecord> records = Collections.singletonList(
            new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp)
    );

    Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();

    PowerMock.replayAll();

    Whitebox.setInternalState(workerTask, "toSend", records);
    Whitebox.invokeMethod(workerTask, "sendRecords");
    assertEquals(timestamp, sent.getValue().timestamp());

    PowerMock.verifyAll();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:WorkerSourceTaskTest.java

示例2: oneFilePerFs

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void oneFilePerFs() throws IOException, InterruptedException {
    for (Path dir : directories) {
        Path dataFile = new Path(dir, String.valueOf(System.nanoTime() + ".txt"));
        createDataFile(dataFile);
        //this file does not match the regexp
        fs.createNewFile(new Path(dir, String.valueOf(System.nanoTime())));
    }

    task.start(taskConfig);
    List<SourceRecord> records = task.poll();
    assertEquals(10, records.size());
    checkRecords(records);
    //policy has ended
    assertNull(task.poll());
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:17,代码来源:FsSourceTaskTestBase.java

示例3: testSendRecordsConvertsData

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void testSendRecordsConvertsData() throws Exception {
    createWorkerTask();

    List<SourceRecord> records = new ArrayList<>();
    // Can just use the same record for key and value
    records.add(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD));

    Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();

    PowerMock.replayAll();

    Whitebox.setInternalState(workerTask, "toSend", records);
    Whitebox.invokeMethod(workerTask, "sendRecords");
    assertEquals(SERIALIZED_KEY, sent.getValue().key());
    assertEquals(SERIALIZED_RECORD, sent.getValue().value());

    PowerMock.verifyAll();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:WorkerSourceTaskTest.java

示例4: poll

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
/**
 * Poll this SourceTask for new records. This method should block if no data is currently
 * available.
 *
 * @return a list of source records
 */
@Override public List<SourceRecord> poll() throws InterruptedException {
    final List<SourceRecord> msgs = new ArrayList<>();
    int messageCount = 0;

    log.info("Polling for records");
    SourceRecord src;
    do {
        // For the first message in the batch, wait indefinitely
        src = reader.receive(messageCount == 0 ? true : false);
        if (src != null) {
            msgs.add(src);
            messageCount++;
        }
    } while ((src != null) && (messageCount < BATCH_SIZE));

    log.trace("Poll returning {} records", messageCount);
    return msgs;
}
 
开发者ID:ibm-messaging,项目名称:kafka-connect-mq-source,代码行数:25,代码来源:MQSourceTask.java

示例5: poll

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Override
public List<SourceRecord> poll() throws InterruptedException {
    while (stop != null && !stop.get() && !policy.hasEnded()) {
        log.trace("Polling for new data");

        final List<SourceRecord> results = new ArrayList<>();
        List<FileMetadata> files = filesToProcess();
        files.forEach(metadata -> {
            try (FileReader reader = policy.offer(metadata, context.offsetStorageReader())) {
                log.info("Processing records for file {}", metadata);
                while (reader.hasNext()) {
                    results.add(convert(metadata, reader.currentOffset(), reader.next()));
                }
            } catch (ConnectException | IOException e) {
                //when an exception happens reading a file, the connector continues
                log.error("Error reading file from FS: " + metadata.getPath() + ". Keep going...", e);
            }
        });
        return results;
    }

    return null;
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:24,代码来源:FsSourceTask.java

示例6: sourceRecord

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
SourceRecord sourceRecord(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] bytes) {
  Struct key = MessageConverter.key(basicProperties);
  Struct value = MessageConverter.value(consumerTag, envelope, basicProperties, bytes);
  final String topic = this.config.kafkaTopic.execute(RabbitMQSourceConnectorConfig.KAFKA_TOPIC_TEMPLATE, value);

  return new SourceRecord(
      ImmutableMap.of("routingKey", envelope.getRoutingKey()),
      ImmutableMap.of("deliveryTag", envelope.getDeliveryTag()),
      topic,
      null,
      key.schema(),
      key,
      value.schema(),
      value,
      null == basicProperties.getTimestamp() ? this.time.milliseconds() : basicProperties.getTimestamp().getTime()
  );
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-rabbitmq,代码行数:18,代码来源:SourceRecordBuilder.java

示例7: expectPolls

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
private CountDownLatch expectPolls(int minimum, final AtomicInteger count) throws InterruptedException {
    final CountDownLatch latch = new CountDownLatch(minimum);
    // Note that we stub these to allow any number of calls because the thread will continue to
    // run. The count passed in + latch returned just makes sure we get *at least* that number of
    // calls
    EasyMock.expect(sourceTask.poll())
            .andStubAnswer(new IAnswer<List<SourceRecord>>() {
                @Override
                public List<SourceRecord> answer() throws Throwable {
                    count.incrementAndGet();
                    latch.countDown();
                    return RECORDS;
                }
            });
    // Fallout of the poll() call
    expectSendRecordAnyTimes();
    return latch;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:WorkerSourceTaskTest.java

示例8: expectApplyTransformationChain

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
private void expectApplyTransformationChain(boolean anyTimes) {
    final Capture<SourceRecord> recordCapture = EasyMock.newCapture();
    IExpectationSetters<SourceRecord> convertKeyExpect = EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture)));
    if (anyTimes)
        convertKeyExpect.andStubAnswer(new IAnswer<SourceRecord>() {
            @Override
            public SourceRecord answer() {
                return recordCapture.getValue();
            }
        });
    else
        convertKeyExpect.andAnswer(new IAnswer<SourceRecord>() {
            @Override
            public SourceRecord answer() {
                return recordCapture.getValue();
            }
        });
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:19,代码来源:WorkerSourceTaskTest.java

示例9: oneRecordWithNullValue

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void oneRecordWithNullValue() throws InterruptedException, IOException {
    Map<String, String> config = new HashMap<>();
    config.put(FluentdSourceConnectorConfig.FLUENTD_SCHEMAS_ENABLE, "false");
    task.start(config);
    Map<String, Object> record = new HashMap<>();
    record.put("message", null);
    fluency.emit("test", record);
    Thread.sleep(1000);
    List<SourceRecord> sourceRecords = task.poll();
    assertEquals(1, sourceRecords.size());
    SourceRecord sourceRecord = sourceRecords.get(0);
    assertNull(sourceRecord.key());
    assertNull(sourceRecord.valueSchema());
    assertEquals(record, sourceRecord.value());
}
 
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:17,代码来源:FluentdSourceTaskTest.java

示例10: noSourceOffsets

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void noSourceOffsets() throws InterruptedException {
  when(this.kinesisClient.getShardIterator(any())).thenReturn(
      new GetShardIteratorResult().withShardIterator("dfasdfsadfasdf")
  );
  this.task.start(settings);

  GetRecordsResult recordsResult = new GetRecordsResult()
      .withNextShardIterator("dsfargadsfasdfasda")
      .withRecords(TestData.record())
      .withMillisBehindLatest(0L);

  when(this.kinesisClient.getRecords(any())).thenReturn(recordsResult);

  List<SourceRecord> records = this.task.poll();

  assertNotNull(records, "records should not be null.");
  assertFalse(records.isEmpty(), "records should not be empty.");

  verify(this.kinesisClient, atLeastOnce()).getShardIterator(any());
  verify(this.kinesisClient, atLeastOnce()).getRecords(any());
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-kinesis,代码行数:23,代码来源:KinesisSourceTaskTest.java

示例11: multipleRecords

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void multipleRecords() throws InterruptedException, IOException {
    Map<String, String> config = new HashMap<>();
    config.put(FluentdSourceConnectorConfig.FLUENTD_SCHEMAS_ENABLE, "false");
    task.start(config);
    Map<String, Object> record1 = new HashMap<>();
    record1.put("message", "This is a test message1");
    Map<String, Object> record2 = new HashMap<>();
    record2.put("message", "This is a test message2");
    fluency.emit("test", record1);
    fluency.emit("test", record2);
    Thread.sleep(1000);
    List<SourceRecord> sourceRecords = task.poll();
    assertEquals(2, sourceRecords.size());
    assertNull(sourceRecords.get(0).valueSchema());
    Map<String, Object> value1 = (Map<String, Object>) sourceRecords.get(0).value();
    assertEquals("This is a test message1", value1.get("message"));
    assertNull(sourceRecords.get(1).valueSchema());
    Map<String, Object> value2 = (Map<String, Object>) sourceRecords.get(1).value();
    assertEquals("This is a test message2", value2.get("message"));
}
 
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:22,代码来源:FluentdSourceTaskTest.java

示例12: noRecords

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void noRecords() throws InterruptedException {
  final String SEQUENCE_NUMBER = "asdfasdfddsa";
  Map<String, Object> sourceOffset = ImmutableMap.of(RecordConverter.FIELD_SEQUENCE_NUMBER, SEQUENCE_NUMBER);
  when(this.offsetStorageReader.offset(anyMap())).thenReturn(sourceOffset);
  when(this.kinesisClient.getShardIterator(any())).thenReturn(
      new GetShardIteratorResult().withShardIterator("dfasdfsadfasdf")
  );
  this.task.start(settings);

  GetRecordsResult recordsResult = new GetRecordsResult()
      .withNextShardIterator("dsfargadsfasdfasda")
      .withRecords(Arrays.asList())
      .withMillisBehindLatest(0L);

  when(this.kinesisClient.getRecords(any())).thenReturn(recordsResult);

  List<SourceRecord> records = this.task.poll();
  assertNotNull(records, "records should not be null");
  assertTrue(records.isEmpty(), "records should be empty.");

  verify(this.task.time, atLeastOnce()).sleep(this.config.kinesisEmptyRecordsBackoffMs);
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-kinesis,代码行数:24,代码来源:KinesisSourceTaskTest.java

示例13: schemalessKeyValue

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void schemalessKeyValue() {
    Map<String, String> map = new HashMap<>();
    map.put(FluentdSourceConnectorConfig.FLUENTD_SCHEMAS_ENABLE, "false");
    FluentdSourceConnectorConfig config = new FluentdSourceConnectorConfig(map);
    EventEntry eventEntry = EventEntry.of(
            Instant.now(),
            ValueFactory.newMap(
                    ValueFactory.newString("message"),
                    ValueFactory.newString("This is a message.")));

    MessagePackConverver converter = new MessagePackConverver(config);
    SourceRecord sourceRecord = converter.convert("topic", "tag", 0L, eventEntry);

    Assert.assertNull(sourceRecord.keySchema());
    Assert.assertNull(sourceRecord.key());
    Assert.assertNull(sourceRecord.valueSchema());
    Map<String, Object> value = (Map<String, Object>) sourceRecord.value();
    Assert.assertEquals("This is a message.", value.get("message"));
}
 
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:21,代码来源:MessagePackConverterTest.java

示例14: oneRecord

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void oneRecord() throws InterruptedException, IOException {
    Map<String, String> config = new HashMap<>();
    config.put(FluentdSourceConnectorConfig.FLUENTD_SCHEMAS_ENABLE, "false");
    task.start(config);
    Map<String, Object> record = new HashMap<>();
    record.put("message", "This is a test message");
    fluency.emit("test", record);
    Thread.sleep(1000);
    List<SourceRecord> sourceRecords = task.poll();
    assertEquals(1, sourceRecords.size());
    SourceRecord sourceRecord = sourceRecords.get(0);
    assertNull(sourceRecord.key());
    assertNull(sourceRecord.valueSchema());
    assertEquals(record, sourceRecord.value());
}
 
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:17,代码来源:FluentdSourceTaskTest.java

示例15: schemalessInsertConfiguredFields

import org.apache.kafka.connect.source.SourceRecord; //导入依赖的package包/类
@Test
public void schemalessInsertConfiguredFields() {
    final Map<String, Object> props = new HashMap<>();
    props.put("topic.field", "topic_field!");
    props.put("partition.field", "partition_field");
    props.put("timestamp.field", "timestamp_field?");
    props.put("static.field", "instance_id");
    props.put("static.value", "my-instance-id");

    final InsertField<SourceRecord> xform = new InsertField.Value<>();
    xform.configure(props);

    final SourceRecord record = new SourceRecord(null, null, "test", 0,
            null, Collections.singletonMap("magic", 42L));

    final SourceRecord transformedRecord = xform.apply(record);

    assertEquals(42L, ((Map) transformedRecord.value()).get("magic"));
    assertEquals("test", ((Map) transformedRecord.value()).get("topic_field"));
    assertEquals(0, ((Map) transformedRecord.value()).get("partition_field"));
    assertEquals(null, ((Map) transformedRecord.value()).get("timestamp_field"));
    assertEquals("my-instance-id", ((Map) transformedRecord.value()).get("instance_id"));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:24,代码来源:InsertFieldTest.java


注:本文中的org.apache.kafka.connect.source.SourceRecord类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。