当前位置: 首页>>代码示例>>Java>>正文


Java HdfsSinkConnectorConfig.getString方法代码示例

本文整理汇总了Java中io.confluent.connect.hdfs.HdfsSinkConnectorConfig.getString方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsSinkConnectorConfig.getString方法的具体用法?Java HdfsSinkConnectorConfig.getString怎么用?Java HdfsSinkConnectorConfig.getString使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在io.confluent.connect.hdfs.HdfsSinkConnectorConfig的用法示例。


在下文中一共展示了HdfsSinkConnectorConfig.getString方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: HiveMetaStore

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入方法依赖的package包/类
public HiveMetaStore(Configuration conf, HdfsSinkConnectorConfig connectorConfig) throws HiveMetaStoreException {
  HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
  String hiveConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_CONF_DIR_CONFIG);
  String hiveMetaStoreURIs = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_METASTORE_URIS_CONFIG);
  if (hiveMetaStoreURIs.isEmpty()) {
    log.warn("hive.metastore.uris empty, an embedded Hive metastore will be "
             + "created in the directory the connector is started. "
             + "You need to start Hive in that specific directory to query the data.");
  }
  if (!hiveConfDir.equals("")) {
    String hiveSitePath = hiveConfDir + "/hive-site.xml";
    File hiveSite = new File(hiveSitePath);
    if (!hiveSite.exists()) {
      log.warn("hive-site.xml does not exist in provided Hive configuration directory {}.", hiveConf);
    }
    hiveConf.addResource(new Path(hiveSitePath));
  }
  hiveConf.set("hive.metastore.uris", hiveMetaStoreURIs);
  try {
    client = HCatUtil.getHiveMetastoreClient(hiveConf);
  } catch (IOException | MetaException e) {
    throw new HiveMetaStoreException(e);
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:25,代码来源:HiveMetaStore.java

示例2: HiveExec

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入方法依赖的package包/类
/**
 * HiveExec constructor
 * @param config HDFS Connector configuration
 */
public HiveExec(HdfsSinkConnectorConfig config) {
  hiveConf = new HiveConf();
  String hiveConfDir = config.getString(HdfsSinkConnectorConfig.HIVE_CONF_DIR_CONFIG);
  hiveConf.addResource(new Path(hiveConfDir, "hive-site.xml"));
  SessionState.start(new CliSessionState(hiveConf));
  cliDriver = new CliDriver();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:12,代码来源:HiveExec.java

示例3: HiveUtil

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入方法依赖的package包/类
public HiveUtil(HdfsSinkConnectorConfig connectorConfig, AvroData avroData, HiveMetaStore hiveMetaStore) {
  this.url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
  this.topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
  this.avroData = avroData;
  this.hiveMetaStore = hiveMetaStore;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:7,代码来源:HiveUtil.java

示例4: testeAppend

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入方法依赖的package包/类
@Test
public void testeAppend() throws Exception {
  Map<String, String> props = createProps();
  HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props);

  String topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
  String topic = "topic";
  int partition = 0;
  TopicPartition topicPart = new TopicPartition(topic, partition);

  Path file = new Path(FileUtils.logFileName(url, topicsDir, topicPart));

  WALFile.Writer writer = WALFile.createWriter(conf, WALFile.Writer.file(file));

  WALEntry key1 = new WALEntry("key1");
  WALEntry val1 = new WALEntry("val1");

  WALEntry key2 = new WALEntry("key2");
  WALEntry val2 = new WALEntry("val2");

  writer.append(key1, val1);
  writer.append(key2, val2);
  writer.close();

  verify2Values(file);

  writer = WALFile.createWriter(conf, WALFile.Writer.file(file), WALFile.Writer.appendIfExists(true));

  WALEntry key3 = new WALEntry("key3");
  WALEntry val3 = new WALEntry("val3");

  WALEntry key4 = new WALEntry("key4");
  WALEntry val4 = new WALEntry("val4");

  writer.append(key3, val3);
  writer.append(key4, val4);
  writer.hsync();
  writer.close();

  verifyAll4Values(file);

  fs.deleteOnExit(file);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:44,代码来源:WALFileTest.java

示例5: testHiveIntegrationFieldPartitionerAvro

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入方法依赖的package包/类
@Test
public void testHiveIntegrationFieldPartitionerAvro() throws Exception {
  Map<String, String> props = createProps();
  props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
  props.put(HdfsSinkConnectorConfig.PARTITIONER_CLASS_CONFIG, FieldPartitioner.class.getName());
  props.put(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG, "int");

  HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);
  DataWriter hdfsWriter = new DataWriter(config, context, avroData);

  String key = "key";
  Schema schema = createSchema();

  Struct[] records = createRecords(schema);
  ArrayList<SinkRecord> sinkRecords = new ArrayList<>();
  long offset = 0;
  for (Struct record : records) {
    for (long count = 0; count < 3; count++) {
      SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record,
                                             offset + count);
      sinkRecords.add(sinkRecord);
    }
    offset = offset + 3;
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);

  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }
  assertEquals(expectedColumnNames, actualColumnNames);


  String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG);
  String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16);
  String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17);
  String directory3 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(18);

  List<String> expectedPartitions = new ArrayList<>();
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory1));
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory2));
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory3));

  List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);

  assertEquals(expectedPartitions, partitions);

  ArrayList<String[]> expectedResult = new ArrayList<>();
  for (int i = 16; i <= 18; ++i) {
    String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"};
    for (int j = 0; j < 3; ++j) {
      expectedResult.add(part);
    }
  }
  String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC);
  String[] rows = result.split("\n");
  assertEquals(9, rows.length);
  for (int i = 0; i < rows.length; ++i) {
    String[] parts = HiveTestUtils.parseOutput(rows[i]);
    for (int j = 0; j < expectedResult.get(i).length; ++j) {
      assertEquals(expectedResult.get(i)[j], parts[j]);
    }
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:75,代码来源:HiveIntegrationAvroTest.java

示例6: testHiveIntegrationFieldPartitionerParquet

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入方法依赖的package包/类
@Test
public void testHiveIntegrationFieldPartitionerParquet() throws Exception {
  Map<String, String> props = createProps();
  props.put(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG, "true");
  props.put(HdfsSinkConnectorConfig.PARTITIONER_CLASS_CONFIG, FieldPartitioner.class.getName());
  props.put(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG, "int");

  HdfsSinkConnectorConfig config = new HdfsSinkConnectorConfig(props);
  DataWriter hdfsWriter = new DataWriter(config, context, avroData);

  String key = "key";
  Schema schema = createSchema();

  Struct[] records = createRecords(schema);
  ArrayList<SinkRecord> sinkRecords = new ArrayList<>();
  long offset = 0;
  for (Struct record : records) {
    for (long count = 0; count < 3; count++) {
      SinkRecord sinkRecord = new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record,
                                             offset + count);
      sinkRecords.add(sinkRecord);
    }
    offset = offset + 3;
  }

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);

  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }
  assertEquals(expectedColumnNames, actualColumnNames);


  String partitionFieldName = config.getString(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG);
  String directory1 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(16);
  String directory2 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(17);
  String directory3 = TOPIC + "/" + partitionFieldName + "=" + String.valueOf(18);

  List<String> expectedPartitions = new ArrayList<>();
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory1));
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory2));
  expectedPartitions.add(FileUtils.directoryName(url, topicsDir, directory3));

  List<String> partitions = hiveMetaStore.listPartitions(hiveDatabase, TOPIC, (short)-1);

  assertEquals(expectedPartitions, partitions);

  ArrayList<String[]> expectedResult = new ArrayList<>();
  for (int i = 16; i <= 18; ++i) {
    String[] part = {"true", String.valueOf(i), "12", "12.2", "12.2"};
    for (int j = 0; j < 3; ++j) {
      expectedResult.add(part);
    }
  }
  String result = HiveTestUtils.runHive(hiveExec, "SELECT * FROM " + TOPIC);
  String[] rows = result.split("\n");
  assertEquals(9, rows.length);
  for (int i = 0; i < rows.length; ++i) {
    String[] parts = HiveTestUtils.parseOutput(rows[i]);
    for (int j = 0; j < expectedResult.get(i).length; ++j) {
      assertEquals(expectedResult.get(i)[j], parts[j]);
    }
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:75,代码来源:HiveIntegrationParquetTest.java

示例7: HiveUtil

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入方法依赖的package包/类
public HiveUtil(HdfsSinkConnectorConfig connectorConfig, AvroData avroData, HiveMetaStore hiveMetaStore) {
  this.url = connectorConfig.getString(S3SinkConnectorConfig.S3_URL_CONFIG);
  this.topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
  this.avroData = avroData;
  this.hiveMetaStore = hiveMetaStore;
}
 
开发者ID:qubole,项目名称:streamx,代码行数:7,代码来源:HiveUtil.java


注:本文中的io.confluent.connect.hdfs.HdfsSinkConnectorConfig.getString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。