当前位置: 首页>>代码示例>>Java>>正文


Java Storage类代码示例

本文整理汇总了Java中io.confluent.connect.hdfs.storage.Storage的典型用法代码示例。如果您正苦于以下问题:Java Storage类的具体用法?Java Storage怎么用?Java Storage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Storage类属于io.confluent.connect.hdfs.storage包,在下文中一共展示了Storage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: traverseImpl

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
private static ArrayList<FileStatus> traverseImpl(Storage storage, Path path, PathFilter filter)
    throws IOException {
  if (!storage.exists(path.toString())) {
    return new ArrayList<>();
  }
  ArrayList<FileStatus> result = new ArrayList<>();
  FileStatus[] statuses = storage.listStatus(path.toString());
  for (FileStatus status : statuses) {
    if (status.isDirectory()) {
      result.addAll(traverseImpl(storage, status.getPath(), filter));
    } else {
      if (filter.accept(status.getPath())) {
        result.add(status);
      }
    }
  }
  return result;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:19,代码来源:FileUtils.java

示例2: getDirectoriesImpl

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
private static ArrayList<FileStatus> getDirectoriesImpl(Storage storage, Path path)
    throws IOException {
  FileStatus[] statuses = storage.listStatus(path.toString());
  ArrayList<FileStatus> result = new ArrayList<>();
  for (FileStatus status : statuses) {
    if (status.isDirectory()) {
      int count = 0;
      FileStatus[] fileStatuses = storage.listStatus(status.getPath().toString());
      for (FileStatus fileStatus : fileStatuses) {
        if (fileStatus.isDirectory()) {
          result.addAll(getDirectoriesImpl(storage, fileStatus.getPath()));
        } else {
          count++;
        }
      }
      if (count == fileStatuses.length) {
        result.add(status);
      }
    }
  }
  return result;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:23,代码来源:FileUtils.java

示例3: testTruncate

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
@Test
public void testTruncate() throws Exception {
  Storage storage = new HdfsStorage(conf, url);
  TopicPartition tp = new TopicPartition("mytopic", 123);
  FSWAL wal = new FSWAL("/logs", tp, storage);
  wal.append("a", "b");
  assertTrue("WAL file should exist after append",
          storage.exists("/logs/mytopic/123/log"));
  wal.truncate();
  assertFalse("WAL file should not exist after truncate",
          storage.exists("/logs/mytopic/123/log"));
  assertTrue("Rotated WAL file should exist after truncate",
          storage.exists("/logs/mytopic/123/log.1"));
  wal.append("c", "d");
  assertTrue("WAL file should be recreated after truncate + append",
          storage.exists("/logs/mytopic/123/log"));
  assertTrue("Rotated WAL file should exist after truncate + append",
          storage.exists("/logs/mytopic/123/log.1"));
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:20,代码来源:FSWALTest.java

示例4: setUp

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  super.setUp();

  @SuppressWarnings("unchecked")
  Format format = ((Class<Format>) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.FORMAT_CLASS_CONFIG))).newInstance();
  writerProvider = format.getRecordWriterProvider();
  schemaFileReader = format.getSchemaFileReader(avroData);
  extension = writerProvider.getExtension();
  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
          .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  storage = StorageFactory.createStorage(storageClass, conf, url);
  createTopicDir(url, topicsDir, TOPIC);
  createLogsDir(url, logsDir);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:17,代码来源:TopicPartitionWriterTest.java

示例5: createWALs

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
private void createWALs(Map<TopicPartition, List<String>> tempfiles,
                        Map<TopicPartition, List<String>> committedFiles) throws Exception {
  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>)
      Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  Storage storage = StorageFactory.createStorage(storageClass, conf, url);

  for (TopicPartition tp: tempfiles.keySet()) {
    WAL wal = storage.wal(logsDir, tp);
    List<String> tempList = tempfiles.get(tp);
    List<String> committedList = committedFiles.get(tp);
    wal.append(WAL.beginMarker, "");
    for (int i = 0; i < tempList.size(); ++i) {
      wal.append(tempList.get(i), committedList.get(i));
    }
    wal.append(WAL.endMarker, "");
    wal.close();
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:20,代码来源:HdfsSinkTaskTest.java

示例6: wal

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
@Override
public WAL wal(String topicsDir, TopicPartition topicPart) {
  try {
    Class<? extends WAL> walClass = (Class<? extends WAL>) Class
        .forName(config.getString(S3SinkConnectorConfig.WAL_CLASS_CONFIG));
    if (walClass.equals(DummyWAL.class)) {
      return new DummyWAL();
    }
    else {
      Constructor<? extends WAL> ctor = walClass.getConstructor(String.class, TopicPartition.class, Storage.class, HdfsSinkConnectorConfig.class);
      return ctor.newInstance(topicsDir, topicPart, this, config);
    }
  } catch (ClassNotFoundException | NoSuchMethodException | InvocationTargetException | MethodInvocationException | InstantiationException | IllegalAccessException e) {
    throw new ConnectException(e);
  }
}
 
开发者ID:qubole,项目名称:streamx,代码行数:17,代码来源:S3Storage.java

示例7: setUp

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  super.setUp();

  @SuppressWarnings("unchecked")
  Format format = ((Class<Format>) Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.FORMAT_CLASS_CONFIG))).newInstance();
  writerProvider = format.getRecordWriterProvider();
  schemaFileReader = format.getSchemaFileReader(avroData);
  extension = writerProvider.getExtension();
  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
          .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url);
  createTopicDir(url, topicsDir, TOPIC);
  createLogsDir(url, logsDir);
}
 
开发者ID:qubole,项目名称:streamx,代码行数:17,代码来源:TopicPartitionWriterTest.java

示例8: createWALs

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
private void createWALs(Map<TopicPartition, List<String>> tempfiles,
                        Map<TopicPartition, List<String>> committedFiles) throws Exception {
  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>)
      Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  Storage storage = StorageFactory.createStorage(storageClass, connectorConfig, conf, url);

  for (TopicPartition tp: tempfiles.keySet()) {
    WAL wal = storage.wal(logsDir, tp);
    List<String> tempList = tempfiles.get(tp);
    List<String> committedList = committedFiles.get(tp);
    wal.append(WAL.beginMarker, "");
    for (int i = 0; i < tempList.size(); ++i) {
      wal.append(tempList.get(i), committedList.get(i));
    }
    wal.append(WAL.endMarker, "");
    wal.close();
  }
}
 
开发者ID:qubole,项目名称:streamx,代码行数:20,代码来源:HdfsSinkTaskTest.java

示例9: FSWAL

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
public FSWAL(String logsDir, TopicPartition topicPart, Storage storage)
    throws ConnectException {
  this.storage = storage;
  this.conf = storage.conf();
  String url = storage.url();
  logFile = FileUtils.logFileName(url, logsDir, topicPart);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:8,代码来源:FSWAL.java

示例10: TopicPartitionWriter

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
public TopicPartitionWriter(
    TopicPartition tp,
    Storage storage,
    RecordWriterProvider writerProvider,
    Partitioner partitioner,
    HdfsSinkConnectorConfig connectorConfig,
    SinkTaskContext context,
    AvroData avroData) {
  this(tp, storage, writerProvider, partitioner, connectorConfig, context, avroData, null, null, null, null, null);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:11,代码来源:TopicPartitionWriter.java

示例11: getStorage

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
public Storage getStorage() {
  return storage;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:4,代码来源:DataWriter.java

示例12: traverse

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
public static FileStatus[] traverse(Storage storage, Path path, PathFilter filter)
    throws IOException {
  ArrayList<FileStatus> result = traverseImpl(storage, path, filter);
  return result.toArray(new FileStatus[result.size()]);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:6,代码来源:FileUtils.java

示例13: getDirectories

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
public static FileStatus[] getDirectories(Storage storage, Path path) throws IOException {
  ArrayList<FileStatus> result = getDirectoriesImpl(storage, path);
  return result.toArray(new FileStatus[result.size()]);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:5,代码来源:FileUtils.java

示例14: testWALMultiClient

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
@Test
public void testWALMultiClient() throws Exception {
  fs.delete(new Path(FileUtils.directoryName(url, topicsDir, TOPIC_PARTITION)), true);

  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>)
      Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  Storage storage = StorageFactory.createStorage(storageClass, conf, url);

  final WAL wal1 = storage.wal(topicsDir, TOPIC_PARTITION);
  final WAL wal2 = storage.wal(topicsDir, TOPIC_PARTITION);

  String directory = TOPIC + "/" + String.valueOf(PARTITION);
  final String tempfile = FileUtils.tempFileName(url, topicsDir, directory, extension);
  final String commitedFile = FileUtils.committedFileName(url, topicsDir, directory,
                                                          TOPIC_PARTITION, 0, 10, extension,
                                                          ZERO_PAD_FMT);

  fs.createNewFile(new Path(tempfile));

  wal1.acquireLease();
  wal1.append(WAL.beginMarker, "");
  wal1.append(tempfile, commitedFile);
  wal1.append(WAL.endMarker, "");

  Thread thread = new Thread(new Runnable() {
    @Override
    public void run() {
      try {
        // holding the lease for awhile
        Thread.sleep(3000);
        closed = true;
        wal1.close();
      } catch (ConnectException | InterruptedException e) {
        // Ignored
      }
    }
  });
  thread.start();

  wal2.acquireLease();
  assertTrue(closed);
  wal2.apply();
  wal2.close();

  assertTrue(fs.exists(new Path(commitedFile)));
  assertFalse(fs.exists(new Path(tempfile)));
  storage.close();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:50,代码来源:WALTest.java

示例15: testRecovery

import io.confluent.connect.hdfs.storage.Storage; //导入依赖的package包/类
@Test
public void testRecovery() throws Exception {
  fs.delete(new Path(FileUtils.directoryName(url, topicsDir, TOPIC_PARTITION)), true);

  @SuppressWarnings("unchecked")
  Class<? extends Storage> storageClass = (Class<? extends Storage>)
      Class.forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
  Storage storage = StorageFactory.createStorage(storageClass, conf, url);

  WAL wal = storage.wal(logsDir, TOPIC_PARTITION);

  wal.append(WAL.beginMarker, "");
  Set<String> committedFiles = new HashSet<>();

  String directory = TOPIC + "/" + "partition=" + String.valueOf(PARTITION);

  for (int i = 0; i < 5; ++i) {
    long startOffset = i * 10;
    long endOffset = (i + 1) * 10 - 1;
    String tempfile = FileUtils.tempFileName(url, topicsDir, directory, extension);
    fs.createNewFile(new Path(tempfile));
    String committedFile = FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION, startOffset,
                                                       endOffset, extension, ZERO_PAD_FMT);
    committedFiles.add(committedFile);
    wal.append(tempfile, committedFile);
  }
  wal.append(WAL.endMarker, "");
  wal.close();

  DataWriter hdfsWriter = new DataWriter(connectorConfig, context, avroData);
  hdfsWriter.recover(TOPIC_PARTITION);
  Map<TopicPartition, Long> offsets = context.offsets();
  assertTrue(offsets.containsKey(TOPIC_PARTITION));
  assertEquals(50L, (long) offsets.get(TOPIC_PARTITION));

  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  // Need enough records to trigger file rotation
  ArrayList<SinkRecord> sinkRecords = new ArrayList<>();
  for (int i = 0; i < 3; i++)
    sinkRecords.add(
        new SinkRecord(TOPIC, PARTITION, Schema.STRING_SCHEMA, key, schema, record, 50 + i));

  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();

  committedFiles.add(FileUtils.committedFileName(url, topicsDir, directory, TOPIC_PARTITION,
                                                 50, 52, extension, ZERO_PAD_FMT));
  FileStatus[] statuses = fs.listStatus(new Path(FileUtils.directoryName(url, topicsDir, directory)),
                    new TopicPartitionCommittedFileFilter(TOPIC_PARTITION));
  assertEquals(committedFiles.size(), statuses.length);
  for (FileStatus status : statuses) {
    assertTrue(committedFiles.contains(status.getPath().toString()));
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:59,代码来源:DataWriterAvroTest.java


注:本文中的io.confluent.connect.hdfs.storage.Storage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。