當前位置: 首頁>>代碼示例>>Java>>正文


Java Schema.STRING_SCHEMA屬性代碼示例

本文整理匯總了Java中org.apache.kafka.connect.data.Schema.STRING_SCHEMA屬性的典型用法代碼示例。如果您正苦於以下問題:Java Schema.STRING_SCHEMA屬性的具體用法?Java Schema.STRING_SCHEMA怎麽用?Java Schema.STRING_SCHEMA使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在org.apache.kafka.connect.data.Schema的用法示例。


在下文中一共展示了Schema.STRING_SCHEMA屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: prepareData

private void prepareData(String topic, int partition) throws Exception {
  TopicPartition tp = new TopicPartition(topic, partition);
  DataWriter hdfsWriter = createWriter(context, avroData);
  hdfsWriter.recover(tp);
  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:19,代碼來源:AvroHiveUtilTest.java

示例2: prepareData

private void prepareData(String topic, int partition) throws Exception {
  TopicPartition tp = new TopicPartition(topic, partition);
  DataWriter hdfsWriter = createWriter(context, avroData);
  hdfsWriter.recover(tp);
  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }
  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:18,代碼來源:ParquetHiveUtilTest.java

示例3: test

@Test
public void test() throws InterruptedException {
    Map<String, String> sinkProperties = new HashMap<>();
    FluentdSinkTask task = new FluentdSinkTask();
    task.initialize(PowerMock.createMock(SinkTaskContext.class));
    //sinkProperties.put(FluentdSinkConnectorConfig.FLUENTD_CLIENT_MAX_BUFFER_BYTES, "100000");
    task.start(sinkProperties);
    final String topic = "testtopic";
    final String value = "{\"message\":\"This is a test message\"}";
    SinkRecord sinkRecord = new SinkRecord(
            topic,
            1,
            Schema.STRING_SCHEMA,
            topic,
            null,
            value,
            0,
            System.currentTimeMillis(),
            TimestampType.NO_TIMESTAMP_TYPE
    );
    task.put(Collections.singleton(sinkRecord));
    TimeUnit.SECONDS.sleep(1);
    EventEntry eventEntry = queue.poll();
    Assert.assertNotNull(eventEntry);
    Assert.assertEquals(value, eventEntry.getRecord().asMapValue().toJson());
}
 
開發者ID:fluent,項目名稱:kafka-connect-fluentd,代碼行數:26,代碼來源:FluentdSinkTaskTest.java

示例4: testWriteRecordNonZeroInitailOffset

@Test
public void testWriteRecordNonZeroInitailOffset() throws Exception {
  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  Partitioner partitioner = hdfsWriter.getPartitioner();
  hdfsWriter.recover(TOPIC_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 3; offset < 10; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  String directory = partitioner.generatePartitionedPath(TOPIC, "partition=" + String.valueOf(PARTITION));

  // Last file (offset 9) doesn't satisfy size requirement and gets discarded on close
  long[] validOffsets = {2, 5, 8};
  for (int i = 1; i < validOffsets.length; i++) {
    long startOffset = validOffsets[i - 1] + 1;
    long endOffset = validOffsets[i];
    Path path = new Path(FileUtils.committedFileName(url, topicsDir, directory,
                                                     TOPIC_PARTITION, startOffset, endOffset,
                                                     extension, ZERO_PAD_FMT));
    Collection<Object> records = schemaFileReader.readData(conf, path);
    long size = endOffset - startOffset + 1;
    assertEquals(size, records.size());
    for (Object avroRecord : records) {
      assertEquals(avroData.fromConnectData(schema, record), avroRecord);
    }
  }
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:39,代碼來源:DataWriterAvroTest.java

示例5: createSinkRecords

private ArrayList<SinkRecord> createSinkRecords(Struct[] records, String key, Schema schema) {
  ArrayList<SinkRecord> sinkRecords = new ArrayList<>();
  long offset = 0;
  for (Struct record : records) {
    for (long count = 0; count < 3; count++) {
      SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record,
                                             offset + count);
      sinkRecords.add(sinkRecord);
    }
    offset = offset + 3;
  }
  return sinkRecords;
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:13,代碼來源:TopicPartitionWriterTest.java

示例6: testWriteRecord

@Test
public void testWriteRecord() throws Exception {
  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  Partitioner partitioner = hdfsWriter.getPartitioner();
  hdfsWriter.recover(TOPIC_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);

    sinkRecords.add(sinkRecord);
  }
  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  String encodedPartition = "partition=" + String.valueOf(PARTITION);
  String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition);

  // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close
  long[] validOffsets = {-1, 2, 5};
  for (int i = 1; i < validOffsets.length; i++) {
    long startOffset = validOffsets[i - 1] + 1;
    long endOffset = validOffsets[i];
    Path path = new Path(
        FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset,
                                    endOffset, extension, ZERO_PAD_FMT));
    Collection<Object> records = schemaFileReader.readData(conf, path);
    long size = endOffset - startOffset + 1;
    assertEquals(size, records.size());
    for (Object avroRecord : records) {
      assertEquals(avroData.fromConnectData(schema, record), avroRecord);
    }
  }
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:40,代碼來源:DataWriterParquetTest.java

示例7: convert

public SourceRecord convert(String topic, String tag, Long timestamp, EventEntry entry) {
    if (config.isFluentdSchemasEnable()) {
        SchemaAndValue schemaAndValue = convert(topic, entry);
        return new SourceRecord(
                null,
                null,
                topic,
                null,
                Schema.STRING_SCHEMA,
                tag,
                schemaAndValue.schema(),
                schemaAndValue.value(),
                timestamp
        );
    } else {
        Object record;
        try {
            record = new ObjectMapper().readValue(entry.getRecord().toJson(), LinkedHashMap.class);
        } catch (IOException e) {
            record = entry.getRecord().toJson();
        }
        return new SourceRecord(
                null,
                null,
                topic,
                null,
                null,
                null,
                null,
                record,
                timestamp
        );
    }
}
 
開發者ID:fluent,項目名稱:kafka-connect-fluentd,代碼行數:34,代碼來源:MessagePackConverver.java

示例8: testSyncWithHiveAvro

@Test
public void testSyncWithHiveAvro() throws Exception {
  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  hdfsWriter.recover(TOPIC_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  Map<String, String> props = createProps();
  props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
  HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);

  hdfsWriter = new DataWriter(config, context, avroData);
  hdfsWriter.syncWithHive();

  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }
  assertEquals(expectedColumnNames, actualColumnNames);

  List<String> expectedPartitions = new ArrayList<>();
  String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION);
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory));

  List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);

  assertEquals(expectedPartitions, partitions);

  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:50,代碼來源:HiveIntegrationAvroTest.java

示例9: testHiveIntegrationAvro

@Test
public void testHiveIntegrationAvro() throws Exception {
  Map<String, String> props = createProps();
  props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
  HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);

  DataWriter hdfsWriter = new DataWriter(config, context, avroData);
  hdfsWriter.recover(TOPIC_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);

    sinkRecords.add(sinkRecord);
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }
  assertEquals(expectedColumnNames, actualColumnNames);

  List<String> expectedPartitions = new ArrayList<>();
  String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION);
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory));

  List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);

  assertEquals(expectedPartitions, partitions);
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:45,代碼來源:HiveIntegrationAvroTest.java

示例10: testHiveIntegrationTopicWithDotsAvro

@Test
public void testHiveIntegrationTopicWithDotsAvro() throws Exception {
  assignment.add(TOPIC_WITH_DOTS_PARTITION);

  Map<String, String> props = createProps();
  props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
  HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);

  DataWriter hdfsWriter = new DataWriter(config, context, avroData);
  hdfsWriter.recover(TOPIC_WITH_DOTS_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
       new SinkRecord(TOPIC_WITH_DOTS, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);

    sinkRecords.add(sinkRecord);
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC_WITH_DOTS);
  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }
  assertEquals(expectedColumnNames, actualColumnNames);

  List<String> expectedPartitions = new ArrayList<>();
  String directory = TOPIC_WITH_DOTS + "/" + "partition=" + String.valueOf(PARTITION);
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory));

  List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC_WITH_DOTS, (short)-1);

  assertEquals(expectedPartitions, partitions);
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:47,代碼來源:HiveIntegrationAvroTest.java

示例11: testHiveIntegrationFieldPartitionerAvro

@Test
public void testHiveIntegrationFieldPartitionerAvro() throws Exception {
  Map<String, String> props = createProps();
  props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
  props.put(HdfsSinkConnectorConfig.PARTITIONER_CLASS_CONFIG, FieldPartitioner.class.getName());
  props.put(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG, "int");

  HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);
  DataWriter hdfsWriter = new DataWriter(config, context, avroData);

  String key = "key";
  Schema schema = createSchema();

  Struct[] records = createRecords(schema);
  ArrayList<SinkRecord> sinkRecords = new ArrayList<>();
  long offset = 0;
  for (Struct record : records) {
    for (long count = 0; count < 3; count++) {
      SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record,
                                             offset + count);
      sinkRecords.add(sinkRecord);
    }
    offset = offset + 3;
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);

  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }
  assertEquals(expectedColumnNames, actualColumnNames);


  String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG);
  String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16);
  String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17);
  String directory3 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(18);

  List<String> expectedPartitions = new ArrayList<>();
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory1));
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory2));
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory3));

  List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);

  assertEquals(expectedPartitions, partitions);

  ArrayList<String[]> expectedResult = new ArrayList<>();
  for (int i = 16; i <= 18; ++i) {
    String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"};
    for (int j = 0; j < 3; ++j) {
      expectedResult.add(part);
    }
  }
  String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC);
  String[] rows = result.split("\n");
  assertEquals(9, rows.length);
  for (int i = 0; i < rows.length; ++i) {
    String[] parts = HiveTestUtils.parseOutput(rows[i]);
    for (int j = 0; j < expectedResult.get(i).length; ++j) {
      assertEquals(expectedResult.get(i)[j], parts[j]);
    }
  }
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:74,代碼來源:HiveIntegrationAvroTest.java

示例12: testWriteRecord

@Test
public void testWriteRecord() throws Exception {
  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  Partitioner partitioner = hdfsWriter.getPartitioner();
  hdfsWriter.recover(TOPIC_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);

    sinkRecords.add(sinkRecord);
  }
  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  String encodedPartition = "partition=" + String.valueOf(PARTITION);
  String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition);

  // Last file (offset 6) doesn't satisfy size requirement and gets discarded on close
  long[] validOffsets = {-1, 2, 5};
  for (int i = 1; i < validOffsets.length; i++) {
    long startOffset = validOffsets[i - 1] + 1;
    long endOffset = validOffsets[i];
    Path path =
        new Path(FileUtils
                     .committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset,
                                        endOffset, extension, ZERO_PAD_FMT));
    Collection<Object> records = schemaFileReader.readData(conf, path);
    long size = endOffset - startOffset + 1;
    assertEquals(size, records.size());
    for (Object avroRecord : records) {
      assertEquals(avroData.fromConnectData(schema, record), avroRecord);
    }
  }
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:41,代碼來源:DataWriterAvroTest.java

示例13: testFlushPartialFile

@Test
public void testFlushPartialFile() throws Exception {
  String ROTATE_INTERVAL_MS_CONFIG = "1000";
  // wait for 2 * ROTATE_INTERVAL_MS_CONFIG
  long WAIT_TIME = Long.valueOf(ROTATE_INTERVAL_MS_CONFIG) * 2;

  String FLUSH_SIZE_CONFIG = "10";
  // send 1.5 * FLUSH_SIZE_CONFIG records
  long NUMBER_OF_RECORD = Long.valueOf(FLUSH_SIZE_CONFIG) + Long.valueOf(FLUSH_SIZE_CONFIG) / 2;

  Map<String, String> props = createProps();
  props.put(HdfsSinkConnectorConfig.FLUSH_SIZE_CONFIG, FLUSH_SIZE_CONFIG);
  props.put(HdfsSinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG, ROTATE_INTERVAL_MS_CONFIG);
  HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props);
  assignment = new HashSet<>();
  assignment.add(TOPIC_PARTITION);

  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  hdfsWriter.recover(TOPIC_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < NUMBER_OF_RECORD; offset++) {
    SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }
  hdfsWriter.write(sinkRecords);

  // wait for rotation to happen
  long start = System.currentTimeMillis();
  long end = start + WAIT_TIME;
  while(System.currentTimeMillis() < end) {
    List<SinkRecord> messageBatch = new ArrayList<>();
    hdfsWriter.write(messageBatch);
  }

  Map<TopicPartition, Long> committedOffsets = hdfsWriter.getCommittedOffsets();
  assertTrue(committedOffsets.containsKey(TOPIC_PARTITION));
  long previousOffset = committedOffsets.get(TOPIC_PARTITION);
  assertEquals(NUMBER_OF_RECORD, previousOffset);

  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:47,代碼來源:DataWriterAvroTest.java

示例14: testCommitFailure

@Test
public void testCommitFailure() throws Exception {
  Map<String, String> props = createProps();
  HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }

  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  MemoryStorage storage = (MemoryStorage) hdfsWriter.getStorage();
  storage.setFailure(MemoryStorage.Failure.appendFailure);

  hdfsWriter.write(sinkRecords);
  assertEquals(context.timeout(), (long) connectorConfig.getLong(HdfsSinkConnectorConfig.RETRY_BACKOFF_CONFIG));

  Map<String, List<Object>> data = Data.getData();

  String logFile = FileUtils.logFileName(url, logsDir, TOPIC_PARTITION);
  List<Object> content = data.get(logFile);
  assertEquals(null, content);

  hdfsWriter.write(new ArrayList<SinkRecord>());
  content = data.get(logFile);
  assertEquals(null, content);

  Thread.sleep(context.timeout());
  hdfsWriter.write(new ArrayList<SinkRecord>());
  content = data.get(logFile);
  assertEquals(6, content.size());

  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:41,代碼來源:FailureRecoveryTest.java

示例15: testSyncWithHiveParquet

@Test
public void testSyncWithHiveParquet() throws Exception {
  Map<String, String> props = createProps();
  HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props);

  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  hdfsWriter.recover(TOPIC_PARTITION);

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  props = createProps();
  props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
  HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);

  hdfsWriter = new DataWriter(config, context, avroData);
  hdfsWriter.syncWithHive();

  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }
  assertEquals(expectedColumnNames, actualColumnNames);

  List<String> expectedPartitions = new ArrayList<>();
  String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION);
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory));

  List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);

  assertEquals(expectedPartitions, partitions);

  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:53,代碼來源:HiveIntegrationParquetTest.java


注:本文中的org.apache.kafka.connect.data.Schema.STRING_SCHEMA屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。