当前位置: 首页>>代码示例>>Java>>正文


Java SinkRecord类代码示例

本文整理汇总了Java中org.apache.kafka.connect.sink.SinkRecord的典型用法代码示例。如果您正苦于以下问题:Java SinkRecord类的具体用法?Java SinkRecord怎么用?Java SinkRecord使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SinkRecord类属于org.apache.kafka.connect.sink包,在下文中一共展示了SinkRecord类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: shouldWrite

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
@Test
public void shouldWrite() throws ClientProtocolException, IOException, URISyntaxException{
	
	final List<SinkRecord> documents = new ArrayList<SinkRecord>();
	final Account account = new Account("A1");
	final Client client = new Client("C1", account);
	final QuoteRequest quoteRequest = new QuoteRequest("Q1", "APPL", 100, client, new Date());
	
	documents.add(new SinkRecord("topic", 1, null, null, null, MAPPER.convertValue(quoteRequest, Map.class), 0));
	writer.write(documents);
	
	final HttpResponse response = super.get("/C1/A1/Q1.json");
	final QuoteRequest qr = MAPPER.readValue(response.getEntity().getContent(), QuoteRequest.class);
	assertEquals("APPL", qr.getSymbol());
	super.delete("/C1/A1/Q1.json");
}
 
开发者ID:sanjuthomas,项目名称:kafka-connect-marklogic,代码行数:17,代码来源:TestMarkLogicDefaultWriter.java

示例2: expectPollInitialAssignment

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
private void expectPollInitialAssignment() throws Exception {
    final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);

    sinkTask.open(partitions);
    EasyMock.expectLastCall();

    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            rebalanceListener.getValue().onPartitionsAssigned(partitions);
            return ConsumerRecords.empty();
        }
    });
    EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);

    sinkTask.put(Collections.<SinkRecord>emptyList());
    EasyMock.expectLastCall();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:WorkerSinkTaskThreadedTest.java

示例3: shouldWrite

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
@Test
public void shouldWrite() throws ClientProtocolException, IOException, URISyntaxException{
       
       final List<SinkRecord> documents = new ArrayList<SinkRecord>();
       final QuoteRequest quoteRequest1 = new QuoteRequest("Q4", "IBM", 100, new Client("C4", new Account("A4")), new Date());
       final QuoteRequest quoteRequest2 = new QuoteRequest("Q5", "GS", 100, new Client("C5", new Account("A5")), new Date());
       
       documents.add(new SinkRecord("topic", 1, null, null, null, MAPPER.convertValue(quoteRequest1, Map.class), 0));
       documents.add(new SinkRecord("topic", 1, null, null, null, MAPPER.convertValue(quoteRequest2, Map.class), 0));
       writer.write(documents);
       
       HttpResponse response = super.get("/C4/A4/Q4.json");
       QuoteRequest qr = MAPPER.readValue(response.getEntity().getContent(), QuoteRequest.class);
       assertEquals("IBM", qr.getSymbol());
       response = super.get("/C5/A5/Q5.json");
       qr = MAPPER.readValue(response.getEntity().getContent(), QuoteRequest.class);
       assertEquals("GS", qr.getSymbol());
       super.delete("/C5/A5/Q5.json");
       super.delete("/C4/A4/Q4.json");
   }
 
开发者ID:sanjuthomas,项目名称:kafka-connect-marklogic,代码行数:21,代码来源:TestMarkLogicAsyncWriter.java

示例4: shouldPut

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
@Test
public void shouldPut() throws ClientProtocolException, IOException, URISyntaxException{
	
	List<SinkRecord> documents = new ArrayList<SinkRecord>();
	final Account account = new Account("A1");
	final Client client = new Client("C1", account);
	final QuoteRequest quoteRequest = new QuoteRequest("Q1", "APPL", 100, client, new Date());

	documents.add(new SinkRecord("trades", 1, null, null, null,  MAPPER.convertValue(quoteRequest, Map.class), 0));
	markLogicSinkTask.put(documents);
	
       final HttpResponse response = super.get("/C1/A1/Q1.json");
       final QuoteRequest qr = MAPPER.readValue(response.getEntity().getContent(), QuoteRequest.class);
       assertEquals("APPL", qr.getSymbol());
       super.delete("/C1/A1/Q1.json");
}
 
开发者ID:sanjuthomas,项目名称:kafka-connect-marklogic,代码行数:17,代码来源:TestMarkLogicSincTask.java

示例5: shouldWrite

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
@Test
public void shouldWrite() throws ClientProtocolException, IOException, URISyntaxException{
    
    final List<SinkRecord> documents = new ArrayList<SinkRecord>();
    final QuoteRequest quoteRequest1 = new QuoteRequest("Q2", "IBM", 100, new Client("C2", new Account("A2")), new Date());
    final QuoteRequest quoteRequest2 = new QuoteRequest("Q3", "GS", 100, new Client("C3", new Account("A3")), new Date());
    
    documents.add(new SinkRecord("topic", 1, null, null, null, MAPPER.convertValue(quoteRequest1, Map.class), 0));
    documents.add(new SinkRecord("topic", 1, null, null, null, MAPPER.convertValue(quoteRequest2, Map.class), 0));
    writer.write(documents);
    
    HttpResponse response = super.get("/C2/A2/Q2.json");
    QuoteRequest qr = MAPPER.readValue(response.getEntity().getContent(), QuoteRequest.class);
    assertEquals("IBM", qr.getSymbol());
    response = super.get("/C3/A3/Q3.json");
    qr = MAPPER.readValue(response.getEntity().getContent(), QuoteRequest.class);
    assertEquals("GS", qr.getSymbol());
    super.delete("/C3/A3/Q3.json");
    super.delete("/C2/A2/Q2.json");
}
 
开发者ID:sanjuthomas,项目名称:kafka-connect-marklogic,代码行数:21,代码来源:TestMarkLogicBufferedWriter.java

示例6: delete

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
@Test
public void delete() {
  final Schema keySchema = SchemaBuilder.struct()
      .field("id", Schema.STRING_SCHEMA)
      .build();
  final Struct key = new Struct(keySchema)
      .put("id", "asdf");
  final SinkRecord record = new SinkRecord(
      "testing",
      1,
      keySchema,
      key,
      null,
      null,
      123L
  );

  this.task.put(Arrays.asList(record));
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-cassandra,代码行数:20,代码来源:CassandraSinkTaskTest.java

示例7: convert

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
public FluentdEventRecord convert(SinkRecord sinkRecord) {
    logger.debug("SinkRecord: {}", sinkRecord);
    FluentdEventRecord eventRecord = null;

    if (sinkRecord.value() != null) {
        eventRecord = getRecordConverter(sinkRecord.valueSchema(), sinkRecord.value())
                .convert(sinkRecord.valueSchema(), sinkRecord.value());
    }
    eventRecord.setTag(sinkRecord.topic());

    if (config.getFluentdClientTimestampInteger()) {
        eventRecord.setTimestamp(sinkRecord.timestamp() / 1000);
    } else {
        eventRecord.setEventTime(EventTime.fromEpochMilli(sinkRecord.timestamp()));
    }

    return eventRecord;
}
 
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:19,代码来源:SinkRecordConverter.java

示例8: putTest

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
@Test
public void putTest() throws IOException {
    MySqlSinkTask task = startTask();
    final Collection<SinkRecord> records = new ArrayList<>();

    String topic = "estation.db_ez.t_box";
    int partition = 0;
    long kafkaOffset = 100;

    String key = "{\"database\":\"db_ez\",\"table\":\"t_parcel\",\"pk.id\":\"100000A01120160629150390\"}";
    String val = "{\"commit\":true,\"data\":{\"book_code\":\"0\",\"book_expire_time\":\"2016-06-01 11:55:39\",\"box_id\":\"EZ004016145026\",\"box_type\":\"1\",\"business_type\":\"1\",\"channel_id\":\"0\",\"code_expire_time\":\"2016-07-22 10:19:40\",\"company_id\":\"433\",\"create_time\":\"2016-08-10 09:26:14\",\"delivery_time\":\"2016-06-29 15:04:17\",\"expire_time\":\"2016-07-01 15:04:17\",\"id\":\"100000A01120160629150390\",\"is_old\":\"2\",\"package_id\":\"sxFuckFuck\",\"partner_cid\":\"0\",\"postman_mobile\":\"18618307356\",\"postman_name\":\"乔德康\",\"postman_uid\":\"5357\",\"reminder_num\":\"15\",\"retrieve_code\":\"000000\",\"sn\":\"100000A013\",\"status\":\"exceptional\",\"take_code\":\"281587\",\"take_mobile\":\"13801174051\"},\"database\":\"test\",\"old\":{\"package_id\":\"sxDSDSDS\"},\"table\":\"t_parcel\",\"ts\":1502884124,\"type\":\"update\",\"xid\":11524421}";

    SinkRecord record = new SinkRecord(topic, partition, null, key, null, val, kafkaOffset);

    records.add(record);

    task.put(records);
}
 
开发者ID:songxin1990,项目名称:maxwell-sink,代码行数:19,代码来源:MySqlSinkTaskTest.java

示例9: getWriter

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
private RecordWriter<SinkRecord> getWriter(SinkRecord record, String encodedPartition)
    throws ConnectException {
  try {
    if (writers.containsKey(encodedPartition)) {
      return writers.get(encodedPartition);
    }
    String tempFile = getTempFile(encodedPartition);
    RecordWriter<SinkRecord> writer = writerProvider.getRecordWriter(conf, tempFile, record, avroData);
    writers.put(encodedPartition, writer);
    if (hiveIntegration && !hivePartitions.contains(encodedPartition)) {
      addHivePartition(encodedPartition);
      hivePartitions.add(encodedPartition);
    }
    return writer;
  } catch (IOException e) {
    throw new ConnectException(e);
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:19,代码来源:TopicPartitionWriter.java

示例10: project

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
public static SinkRecord project(SinkRecord record, Schema currentSchema, Compatibility compatibility) {
  switch (compatibility) {
    case BACKWARD:
    case FULL:
    case FORWARD:
      Schema sourceSchema = record.valueSchema();
      Object value = record.value();
      if (sourceSchema == currentSchema || sourceSchema.equals(currentSchema)) {
        return record;
      }
      Object projected = SchemaProjector.project(sourceSchema, value, currentSchema);
      return new SinkRecord(record.topic(), record.kafkaPartition(), record.keySchema(),
                            record.key(), currentSchema, projected, record.kafkaOffset());
    default:
      return record;
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:18,代码来源:SchemaUtils.java

示例11: put

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
public void put(SinkRecord record) {
  try {
    ByteArrayOutputStream resultStream = new ByteArrayOutputStream();
    Writer writer = new OutputStreamWriter(resultStream);

    Object key = record.key();
    if (key != null) {
      writer.write(key.toString());
      writer.write(',');
    }

    Object value = record.value();
    if (value != null) {
      writer.write(value.toString());
    }

    writer.write('\n');
    writer.close();

    this.buffer.put(resultStream.toByteArray());
  } catch (IOException exception) {
    //TODO: check exception
    throw new RuntimeException(exception);
  }
}
 
开发者ID:yuuzi41,项目名称:kafka-connect-swift,代码行数:26,代码来源:KeyValueCsvSinkRecordBulker.java

示例12: prepareData

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
private void prepareData(String topic, int partition) throws Exception {
  TopicPartition tp = new TopicPartition(topic, partition);
  DataWriter hdfsWriter = createWriter(context, avroData);
  hdfsWriter.recover(tp);
  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:20,代码来源:AvroHiveUtilTest.java

示例13: expectPollInitialAssignment

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
private void expectPollInitialAssignment() {
    final List<TopicPartition> partitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2);

    sinkTask.open(partitions);
    EasyMock.expectLastCall();

    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            rebalanceListener.getValue().onPartitionsAssigned(partitions);
            return ConsumerRecords.empty();
        }
    });
    EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);

    sinkTask.put(Collections.<SinkRecord>emptyList());
    EasyMock.expectLastCall();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:WorkerSinkTaskTest.java

示例14: prepareData

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
private void prepareData(String topic, int partition) throws Exception {
  TopicPartition tp = new TopicPartition(topic, partition);
  DataWriter hdfsWriter = createWriter(context, avroData);
  hdfsWriter.recover(tp);
  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }
  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:19,代码来源:ParquetHiveUtilTest.java

示例15: testTimestampPropagation

import org.apache.kafka.connect.sink.SinkRecord; //导入依赖的package包/类
@Test
public void testTimestampPropagation() throws Exception {
    final Long timestamp = System.currentTimeMillis();
    final TimestampType timestampType = TimestampType.CREATE_TIME;

    expectInitializeTask();
    expectConsumerPoll(1, timestamp, timestampType);
    expectConversionAndTransformation(1);

    Capture<Collection<SinkRecord>> records = EasyMock.newCapture(CaptureType.ALL);

    sinkTask.put(EasyMock.capture(records));

    PowerMock.replayAll();

    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    workerTask.iteration();

    SinkRecord record = records.getValue().iterator().next();

    assertEquals(timestamp, record.timestamp());
    assertEquals(timestampType, record.timestampType());

    PowerMock.verifyAll();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:WorkerSinkTaskTest.java


注:本文中的org.apache.kafka.connect.sink.SinkRecord类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。