当前位置: 首页>>代码示例>>Java>>正文


Java HdfsSinkConnectorConfig类代码示例

本文整理汇总了Java中io.confluent.connect.hdfs.HdfsSinkConnectorConfig的典型用法代码示例。如果您正苦于以下问题:Java HdfsSinkConnectorConfig类的具体用法?Java HdfsSinkConnectorConfig怎么用?Java HdfsSinkConnectorConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HdfsSinkConnectorConfig类属于io.confluent.connect.hdfs包,在下文中一共展示了HdfsSinkConnectorConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: HiveMetaStore

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
public HiveMetaStore(Configuration conf, HdfsSinkConnectorConfig connectorConfig) throws HiveMetaStoreException {
  HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
  String hiveConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_CONF_DIR_CONFIG);
  String hiveMetaStoreURIs = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_METASTORE_URIS_CONFIG);
  if (hiveMetaStoreURIs.isEmpty()) {
    log.warn("hive.metastore.uris empty, an embedded Hive metastore will be "
             + "created in the directory the connector is started. "
             + "You need to start Hive in that specific directory to query the data.");
  }
  if (!hiveConfDir.equals("")) {
    String hiveSitePath = hiveConfDir + "/hive-site.xml";
    File hiveSite = new File(hiveSitePath);
    if (!hiveSite.exists()) {
      log.warn("hive-site.xml does not exist in provided Hive configuration directory {}.", hiveConf);
    }
    hiveConf.addResource(new Path(hiveSitePath));
  }
  hiveConf.set("hive.metastore.uris", hiveMetaStoreURIs);
  try {
    client = HCatUtil.getHiveMetastoreClient(hiveConf);
  } catch (IOException | MetaException e) {
    throw new HiveMetaStoreException(e);
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:25,代码来源:HiveMetaStore.java

示例2: configure

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Override
public void configure(Map<String, Object> config) {
  String localeString = (String) config.get(HdfsSinkConnectorConfig.LOCALE_CONFIG);
  if (localeString.equals("")) {
    throw new ConfigException(HdfsSinkConnectorConfig.LOCALE_CONFIG,
                              localeString, "Locale cannot be empty.");
  }
  String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
  if (timeZoneString.equals("")) {
    throw new ConfigException(HdfsSinkConnectorConfig.TIMEZONE_CONFIG,
                              timeZoneString, "Timezone cannot be empty.");
  }
  String hiveIntString = (String) config.get(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
  boolean hiveIntegration = hiveIntString != null && hiveIntString.toLowerCase().equals("true");
  Locale locale = new Locale(localeString);
  DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
  init(partitionDurationMs, pathFormat, locale, timeZone, hiveIntegration);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:19,代码来源:HourlyPartitioner.java

示例3: setUp

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  super.setUp();

  @SuppressWarnings("unchecked")
  Format format = ((Class<Format>) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.FORMAT_CLASS_CONFIG))).newInstance();
  writerProvider = format.getRecordWriterProvider();
  schemaFileReader = format.getSchemaFileReader(avroData);
  extension = writerProvider.getExtension();
  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
          .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  storage = StorageFactory.createStorage(storageClass, conf, url);
  createTopicDir(url, topicsDir, TOPIC);
  createLogsDir(url, logsDir);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:17,代码来源:TopicPartitionWriterTest.java

示例4: wal

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Override
public WAL wal(String topicsDir, TopicPartition topicPart) {
  try {
    Class<? extends WAL> walClass = (Class<? extends WAL>) Class
        .forName(config.getString(S3SinkConnectorConfig.WAL_CLASS_CONFIG));
    if (walClass.equals(DummyWAL.class)) {
      return new DummyWAL();
    }
    else {
      Constructor<? extends WAL> ctor = walClass.getConstructor(String.class, TopicPartition.class, Storage.class, HdfsSinkConnectorConfig.class);
      return ctor.newInstance(topicsDir, topicPart, this, config);
    }
  } catch (ClassNotFoundException | NoSuchMethodException | InvocationTargetException | MethodInvocationException | InstantiationException | IllegalAccessException e) {
    throw new ConnectException(e);
  }
}
 
开发者ID:qubole,项目名称:streamx,代码行数:17,代码来源:S3Storage.java

示例5: setUp

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  super.setUp();

  @SuppressWarnings("unchecked")
  Format format = ((Class<Format>) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.FORMAT_CLASS_CONFIG))).newInstance();
  writerProvider = format.getRecordWriterProvider();
  schemaFileReader = format.getSchemaFileReader(avroData);
  extension = writerProvider.getExtension();
  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
          .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url);
  createTopicDir(url, topicsDir, TOPIC);
  createLogsDir(url, logsDir);
}
 
开发者ID:qubole,项目名称:streamx,代码行数:17,代码来源:TopicPartitionWriterTest.java

示例6: configure

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Override
public void configure(Map<String, Object> config) {
  long partitionDurationMs = (long) config.get(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG);
  if (partitionDurationMs < 0) {
    throw new ConfigException(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG,
                              partitionDurationMs, "Partition duration needs to be a positive.");
  }

  String pathFormat = (String) config.get(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG);
  if (pathFormat.equals("")) {
    throw new ConfigException(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG,
                              pathFormat, "Path format cannot be empty.");
  }

  String localeString = (String) config.get(HdfsSinkConnectorConfig.LOCALE_CONFIG);
  if (localeString.equals("")) {
    throw new ConfigException(HdfsSinkConnectorConfig.LOCALE_CONFIG,
                              localeString, "Locale cannot be empty.");
  }
  String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
  if (timeZoneString.equals("")) {
    throw new ConfigException(HdfsSinkConnectorConfig.TIMEZONE_CONFIG,
                              timeZoneString, "Timezone cannot be empty.");
  }

  String hiveIntString = (String) config.get(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
  boolean hiveIntegration = hiveIntString != null && hiveIntString.toLowerCase().equals("true");

  Locale locale = new Locale(localeString);
  DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
  init(partitionDurationMs, pathFormat, locale, timeZone, hiveIntegration);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:33,代码来源:TimeBasedPartitioner.java

示例7: addToPartitionFields

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
private void addToPartitionFields(String pathFormat, boolean hiveIntegration) {
  if (hiveIntegration && !verifyDateTimeFormat(pathFormat)) {
    throw new ConfigException(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG, pathFormat,
                              "Path format doesn't meet the requirements for Hive integration, "
                              + "which require prefixing each DateTime component with its name.");
  }
  for (String field: pathFormat.split("/")) {
    String[] parts = field.split("=");
    FieldSchema fieldSchema = new FieldSchema(parts[0].replace("'", ""), TypeInfoFactory.stringTypeInfo.toString(), "");
    partitionFields.add(fieldSchema);
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:13,代码来源:TimeBasedPartitioner.java

示例8: setUp

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  super.setUp();
  hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
  hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
  hiveExec = new HiveExec(connectorConfig);
  cleanHive();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:9,代码来源:HiveTestBase.java

示例9: HiveExec

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
/**
 * HiveExec constructor
 * @param config HDFS Connector configuration
 */
public HiveExec(HdfsSinkConnectorConfig config) {
  hiveConf = new HiveConf();
  String hiveConfDir = config.getString(HdfsSinkConnectorConfig.HIVE_CONF_DIR_CONFIG);
  hiveConf.addResource(new Path(hiveConfDir, "hive-site.xml"));
  SessionState.start(new CliSessionState(hiveConf));
  cliDriver = new CliDriver();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:12,代码来源:HiveExec.java

示例10: testWriteRecordDefaultWithPadding

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Test
public void testWriteRecordDefaultWithPadding() throws Exception {
  Partitioner partitioner = new DefaultPartitioner();
  partitioner.configure(Collections.<String, Object>emptyMap());
  connectorProps.put(HdfsSinkConnectorConfig.FILENAME_OFFSET_ZERO_PAD_WIDTH_CONFIG, "2");
  configureConnector();
  TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(
      TOPIC_PARTITION, storage, writerProvider, partitioner,  connectorConfig, context, avroData);

  String key = "key";
  Schema schema = createSchema();
  Struct[] records = createRecords(schema);

  Collection<SinkRecord> sinkRecords = createSinkRecords(records, key, schema);

  for (SinkRecord record : sinkRecords) {
    topicPartitionWriter.buffer(record);
  }

  topicPartitionWriter.recover();
  topicPartitionWriter.write();
  topicPartitionWriter.close();

  Set<Path> expectedFiles = new HashSet<>();
  expectedFiles.add(new Path(url + "/" + topicsDir + "/" + TOPIC + "/partition=" + PARTITION +
                             "/" + TOPIC + "+" + PARTITION + "+00+02" + extension));
  expectedFiles.add(new Path(url + "/" + topicsDir + "/" + TOPIC + "/partition=" + PARTITION +
                             "/" + TOPIC + "+" + PARTITION + "+03+05" + extension));
  expectedFiles.add(new Path(url + "/" + topicsDir + "/" + TOPIC + "/partition=" + PARTITION +
                             "/" + TOPIC + "+" + PARTITION + "+06+08" + extension));
  verify(expectedFiles, records, schema);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:33,代码来源:TopicPartitionWriterTest.java

示例11: testWriteRecordFieldPartitioner

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Test
public void testWriteRecordFieldPartitioner() throws Exception {
  Map<String, Object> config = createConfig();
  Partitioner partitioner = new FieldPartitioner();
  partitioner.configure(config);

  String partitionField = (String) config.get(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG);

  TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(
      TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData);

  String key = "key";
  Schema schema = createSchema();
  Struct[] records = createRecords(schema);

  Collection<SinkRecord> sinkRecords = createSinkRecords(records, key, schema);

  for (SinkRecord record : sinkRecords) {
    topicPartitionWriter.buffer(record);
  }

  topicPartitionWriter.recover();
  topicPartitionWriter.write();
  topicPartitionWriter.close();


  String directory1 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(16));
  String directory2 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(17));
  String directory3 = partitioner.generatePartitionedPath(TOPIC, partitionField + "=" + String.valueOf(18));

  Set<Path> expectedFiles = new HashSet<>();
  expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory1, TOPIC_PARTITION, 0, 2, extension, ZERO_PAD_FMT)));
  expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory2, TOPIC_PARTITION, 3, 5, extension, ZERO_PAD_FMT)));
  expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory3, TOPIC_PARTITION, 6, 8, extension, ZERO_PAD_FMT)));

  verify(expectedFiles, records, schema);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:38,代码来源:TopicPartitionWriterTest.java

示例12: testWriteRecordTimeBasedPartition

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Test
public void testWriteRecordTimeBasedPartition() throws Exception {
  Map<String, Object> config = createConfig();
  Partitioner partitioner = new TimeBasedPartitioner();
  partitioner.configure(config);

  TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(
      TOPIC_PARTITION, storage, writerProvider, partitioner, connectorConfig, context, avroData);

  String key = "key";
  Schema schema = createSchema();
  Struct[] records = createRecords(schema);

  Collection<SinkRecord> sinkRecords = createSinkRecords(records, key, schema);

  for (SinkRecord record : sinkRecords) {
    topicPartitionWriter.buffer(record);
  }

  topicPartitionWriter.recover();
  topicPartitionWriter.write();
  topicPartitionWriter.close();


  long partitionDurationMs = (Long) config.get(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG);
  String pathFormat = (String) config.get(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG);
  String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
  long timestamp = System.currentTimeMillis();

  String encodedPartition = TimeUtils.encodeTimestamp(partitionDurationMs, pathFormat, timeZoneString, timestamp);

  String directory = partitioner.generatePartitionedPath(TOPIC, encodedPartition);

  Set<Path> expectedFiles = new HashSet<>();
  expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 0, 2, extension, ZERO_PAD_FMT)));
  expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 3, 5, extension, ZERO_PAD_FMT)));
  expectedFiles.add(new Path(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, 6, 8, extension, ZERO_PAD_FMT)));

  verify(expectedFiles, records, schema);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:41,代码来源:TopicPartitionWriterTest.java

示例13: createConfig

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
private Map<String, Object> createConfig() {
  Map<String, Object> config = new HashMap<>();
  config.put(HdfsSinkConnectorConfig.PARTITION_FIELD_NAME_CONFIG, "int");
  config.put(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG, TimeUnit.HOURS.toMillis(1));
  config.put(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG, "'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH/");
  config.put(HdfsSinkConnectorConfig.LOCALE_CONFIG, "en");
  config.put(HdfsSinkConnectorConfig.TIMEZONE_CONFIG, "America/Los_Angeles");
  return config;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:10,代码来源:TopicPartitionWriterTest.java

示例14: testDailyPartitioner

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Test
public void testDailyPartitioner() throws Exception {
  Map<String, Object> config = createConfig();

  DailyPartitioner partitioner = new DailyPartitioner();
  partitioner.configure(config);

  String pathFormat = partitioner.getPathFormat();
  String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
  long timestamp = new DateTime(2014, 2, 1, 3, 0, 0, 0, DateTimeZone.forID(timeZoneString)).getMillis();
  String encodedPartition = TimeUtils.encodeTimestamp(partitionDurationMs, pathFormat, timeZoneString, timestamp);
  String path = partitioner.generatePartitionedPath("topic", encodedPartition);
  assertEquals("topic/year=2014/month=02/day=01/", path);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:15,代码来源:DailyPartitionerTest.java

示例15: testHourlyPartitioner

import io.confluent.connect.hdfs.HdfsSinkConnectorConfig; //导入依赖的package包/类
@Test
public void testHourlyPartitioner() throws Exception {
  Map<String, Object> config = createConfig();

  HourlyPartitioner partitioner = new HourlyPartitioner();
  partitioner.configure(config);

  String pathFormat = partitioner.getPathFormat();
  String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
  long timestamp = new DateTime(2015, 2, 1, 3, 0, 0, 0, DateTimeZone.forID(timeZoneString)).getMillis();
  String encodedPartition = TimeUtils.encodeTimestamp(partitionDurationMs, pathFormat,
                                                      timeZoneString, timestamp);
  String path = partitioner.generatePartitionedPath("topic", encodedPartition);
  assertEquals("topic/year=2015/month=02/day=01/hour=03/", path);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:16,代码来源:HourlyPartitionerTest.java


注:本文中的io.confluent.connect.hdfs.HdfsSinkConnectorConfig类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。