当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.getConf方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getConf方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getConf方法的具体用法?Java FileSystem.getConf怎么用?Java FileSystem.getConf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.getConf方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: SequenceFileReader

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public SequenceFileReader(FileSystem fs, Path filePath, Map<String, Object> config) throws IOException {
    super(fs, filePath, new SeqToStruct(), config);

    this.reader = new SequenceFile.Reader(fs.getConf(),
            SequenceFile.Reader.file(filePath),
            SequenceFile.Reader.bufferSize(fs.getConf().getInt(FILE_READER_BUFFER_SIZE, DEFAULT_BUFFER_SIZE)));
    this.key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf());
    this.value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf());
    this.schema = SchemaBuilder.struct()
            .field(keyFieldName, getSchema(this.key))
            .field(valueFieldName, getSchema(this.value))
            .build();
    this.offset = new SeqOffset(0);
    this.recordIndex = this.hasNextIndex = -1;
    this.hasNext = false;
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:17,代码来源:SequenceFileReader.java

示例2: testFileContextDoesntDnsResolveLogicalURI

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();
  
  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDFSClientFailover.java

示例3: testMiniClusterStore

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testMiniClusterStore() throws EventDeliveryException, IOException {
  // setup a minicluster
  MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new Configuration())
      .build();

  FileSystem dfs = cluster.getFileSystem();
  Configuration conf = dfs.getConf();

  URI hdfsUri = URI.create(
      "dataset:" + conf.get("fs.defaultFS") + "/tmp/repo" + DATASET_NAME);
  try {
    // create a repository and dataset in HDFS
    Datasets.create(hdfsUri, DESCRIPTOR);

    // update the config to use the HDFS repository
    config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_URI, hdfsUri.toString());

    DatasetSink sink = sink(in, config);

    // run the sink
    sink.start();
    sink.process();
    sink.stop();

    Assert.assertEquals(
        Sets.newHashSet(expected),
        read(Datasets.load(hdfsUri)));
    Assert.assertEquals("Should have committed", 0, remaining(in));

  } finally {
    if (Datasets.exists(hdfsUri)) {
      Datasets.delete(hdfsUri);
    }
    cluster.shutdown();
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:39,代码来源:TestDatasetSink.java

示例4: DFSAdminCommand

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Constructor */
public DFSAdminCommand(FileSystem fs) {
  super(fs.getConf());
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IllegalArgumentException("FileSystem " + fs.getUri() + 
        " is not an HDFS file system");
  }
  this.dfs = (DistributedFileSystem)fs;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:DFSAdmin.java

示例5: testNonDefaultFS

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testNonDefaultFS() throws IOException {
  FileSystem fs = cluster.getFileSystem();
  Configuration conf = fs.getConf();
  conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
  TestTrash.trashNonDefaultFS(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestHDFSTrash.java

示例6: run

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public int run(String[] args, PrintStream stream) throws Exception {
  out = stream;
  List<String> paths = parseArgs(args);
  if (paths.size() != 1) {
    errorln(USAGE);
    return E_USAGE;
  }
  println("Hadoop %s", getVersion());
  println("Compiled by %s on %s", getUser(), getDate());
  println("Compiled with protoc %s", getProtocVersion());
  println("From source with checksum %s", getSrcChecksum());


  Configuration conf = getConf();
  Path path = new Path(paths.get(0));
  FileSystem fs = path.getFileSystem(conf);

  println("Filesystem for %s is %s", path, fs);

  // examine the FS
  Configuration fsConf = fs.getConf();
  for (int i = 0; i < props.length; i++) {
    showProp(fsConf, (String) props[i][0], (Boolean) props[i][1]);
  }

  Path root = fs.makeQualified(new Path("/"));
  try (DurationInfo d = new DurationInfo(LOG,
      "Listing  %s", root)) {
    println("%s has %d entries", root, fs.listStatus(root).length);
  }

  String dirName = "dir-" + UUID.randomUUID();
  Path dir = new Path(root, dirName);
  try (DurationInfo d = new DurationInfo(LOG,
      "Creating a directory %s", dir)) {
    fs.mkdirs(dir);
  }
  try {
    Path file = new Path(dir, "file");
    try (DurationInfo d = new DurationInfo(LOG,
        "Creating a file %s", file)) {
      FSDataOutputStream data = fs.create(file, true);
      data.writeUTF(HELLO);
      data.close();
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Listing  %s", dir)) {
      fs.listFiles(dir, false);
    }

    try (DurationInfo d = new DurationInfo(LOG,
        "Reading a file %s", file)) {
      FSDataInputStream in = fs.open(file);
      String utf = in.readUTF();
      in.close();
      if (!HELLO.equals(utf)) {
        throw new IOException("Expected " + file + " to contain the text "
            + HELLO + " -but it has the text \"" + utf + "\"");
      }
    }
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting file %s", file)) {
      fs.delete(file, true);
    }
  } finally {
    try (DurationInfo d = new DurationInfo(LOG,
        "Deleting directory %s", dir)) {
      try {
        fs.delete(dir, true);
      } catch (Exception e) {
        LOG.warn("When deleting {}: ", dir, e);
      }
    }


  }


  // Validate parameters.
  return SUCCESS;
}
 
开发者ID:steveloughran,项目名称:cloudup,代码行数:82,代码来源:S3ADiag.java

示例7: BlockMapBuilder

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public BlockMapBuilder(FileSystem fs, Collection<DrillbitEndpoint> endpoints) {
  this.fs = fs;
  this.codecFactory = new CompressionCodecFactory(fs.getConf());
  this.endPointMap = buildEndpointMap(endpoints);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:6,代码来源:BlockMapBuilder.java

示例8: changeBlockLen

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
    throws IOException, InterruptedException, TimeoutException {
  final Path fileName = new Path("/file1");
  final short REPLICATION_FACTOR = (short)1;
  final FileSystem fs = cluster.getFileSystem();
  final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
  DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
  DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);

  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);

  // Change the length of a replica
  for (int i=0; i<cluster.getDataNodes().size(); i++) {
    if (DFSTestUtil.changeReplicaLength(cluster, block, i, lenDelta)) {
      break;
    }
  }

  // increase the file's replication factor
  fs.setReplication(fileName, (short)(REPLICATION_FACTOR+1));

  // block replication triggers corrupt block detection
  DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
      cluster.getNameNodePort()), fs.getConf());
  LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
      fileName.toString(), 0, fileLen);
  if (lenDelta < 0) { // replica truncated
  	while (!blocks.get(0).isCorrupt() || 
  			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  } else { // no corruption detected; block replicated
  	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  }
  fs.delete(fileName, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestReplication.java

示例9: BlockMapBuilder

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public BlockMapBuilder(FileSystem fs, Collection<NodeEndpoint> endpoints) {
  this.fs = fs;
  this.codecFactory = new CompressionCodecFactory(fs.getConf());
  this.endPointMap = buildEndpointMap(endpoints);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:6,代码来源:BlockMapBuilder.java


注:本文中的org.apache.hadoop.fs.FileSystem.getConf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。