当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.newInstance方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.newInstance方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.newInstance方法的具体用法?Java FileSystem.newInstance怎么用?Java FileSystem.newInstance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.newInstance方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFsUniqueness

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testFsUniqueness() throws Exception {
  final Configuration conf = new Configuration();
  conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
  // multiple invocations of FileSystem.get return the same object.
  FileSystem fs1 = FileSystem.get(conf);
  FileSystem fs2 = FileSystem.get(conf);
  assertTrue(fs1 == fs2);

  // multiple invocations of FileSystem.newInstance return different objects
  fs1 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
  fs2 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
  assertTrue(fs1 != fs2 && !fs1.equals(fs2));
  fs1.close();
  fs2.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:17,代码来源:TestFileSystemCaching.java

示例2: testDisableCache

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test that the socket cache can be disabled by setting the capacity to
 * 0. Regression test for HDFS-3365.
 * @throws Exception 
 */
@Test
public void testDisableCache() throws Exception {
  HdfsConfiguration confWithoutCache = new HdfsConfiguration();
  // Configure a new instance with no peer caching, ensure that it doesn't
  // cache anything
  confWithoutCache.setInt(
      DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
  BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache);
  final Path testFile = new Path("/testConnCache.dat");
  util.writeFile(testFile, FILE_SIZE / 1024);
  FileSystem fsWithoutCache = FileSystem.newInstance(util.getConf());
  try {
    DFSTestUtil.readFile(fsWithoutCache, testFile);
    assertEquals(0, ((DistributedFileSystem)fsWithoutCache).
        dfs.getClientContext().getPeerCache().size());
  } finally {
    fsWithoutCache.close();
    util.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestDisableConnCache.java

示例3: configFs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void configFs(Map<String, Object> customConfigs) throws IOException {
    for (String uri : this.conf.getFsUris()) {
        Configuration fsConfig = new Configuration();
        customConfigs.entrySet().stream()
                .filter(entry -> entry.getKey().startsWith(FsSourceTaskConfig.POLICY_PREFIX_FS))
                .forEach(entry -> fsConfig.set(entry.getKey().replace(FsSourceTaskConfig.POLICY_PREFIX_FS, ""),
                        (String) entry.getValue()));

        Path workingDir = new Path(convert(uri));
        FileSystem fs = FileSystem.newInstance(workingDir.toUri(), fsConfig);
        fs.setWorkingDirectory(workingDir);
        this.fileSystems.add(fs);
    }
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:15,代码来源:AbstractPolicy.java

示例4: initFs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void initFs() throws IOException {
    clusterConfig = new Configuration();
    hdfsDir = Files.createTempDirectory("test-");
    clusterConfig.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.toAbsolutePath().toString());
    cluster = new MiniDFSCluster.Builder(clusterConfig).build();
    fsUri = URI.create("hdfs://localhost:" + cluster.getNameNodePort() + "/");
    fs = FileSystem.newInstance(fsUri, new Configuration());
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:10,代码来源:HdfsPolicyTestBase.java

示例5: initFs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void initFs() throws IOException {
    clusterConfig = new Configuration();
    hdfsDir = Files.createTempDirectory("test-");
    clusterConfig.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.toAbsolutePath().toString());
    cluster = new MiniDFSCluster.Builder(clusterConfig).build();
    fsUri = URI.create("hdfs://localhost:" + cluster.getNameNodePort() + "/");
    fs = FileSystem.newInstance(fsUri, clusterConfig);
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:10,代码来源:HdfsFsSourceTaskTestBase.java

示例6: testLeaseAfterRenameAndRecreate

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test that we can open up a file for write, move it to another location,
 * and then create a new file in the previous location, without causing any
 * lease conflicts.  This is possible because we now use unique inode IDs
 * to identify files to the NameNode.
 */
@Test
public void testLeaseAfterRenameAndRecreate() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  try {
    final Path path1 = new Path("/test-file");
    final String contents1 = "contents1";
    final Path path2 = new Path("/test-file-new-location");
    final String contents2 = "contents2";

    // open a file to get a lease
    FileSystem fs = cluster.getFileSystem();
    FSDataOutputStream out1 = fs.create(path1);
    out1.writeBytes(contents1);
    Assert.assertTrue(hasLease(cluster, path1));
    Assert.assertEquals(1, leaseCount(cluster));

    DistributedFileSystem fs2 = (DistributedFileSystem)
        FileSystem.newInstance(fs.getUri(), fs.getConf());
    fs2.rename(path1, path2);

    FSDataOutputStream out2 = fs2.create(path1);
    out2.writeBytes(contents2);
    out2.close();

    // The first file should still be open and valid
    Assert.assertTrue(hasLease(cluster, path2));
    out1.close();

    // Contents should be as expected
    DistributedFileSystem fs3 = (DistributedFileSystem)
        FileSystem.newInstance(fs.getUri(), fs.getConf());
    Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
    Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestLease.java

示例7: initFs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void initFs() throws IOException {
    localDir = Files.createTempDirectory("test-");
    fsUri = localDir.toUri();
    fs = FileSystem.newInstance(fsUri, new Configuration());
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:7,代码来源:LocalPolicyTestBase.java

示例8: HdfsStorage

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public HdfsStorage(Configuration conf,  String url) throws IOException {
  fs = FileSystem.newInstance(URI.create(url), conf);
  this.conf = conf;
  this.url = url;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:6,代码来源:HdfsStorage.java

示例9: testBlockRecoveryWithLessMetafile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Block Recovery when the meta file not having crcs for all chunks in block
 * file
 */
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
      UserGroupInformation.getCurrentUser().getShortUserName());
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Path file = new Path("/testRecoveryFile");
  DistributedFileSystem dfs = cluster.getFileSystem();
  FSDataOutputStream out = dfs.create(file);
  int count = 0;
  while (count < 2 * 1024 * 1024) {
    out.writeBytes("Data");
    count += 4;
  }
  out.hsync();
  // abort the original stream
  ((DFSOutputStream) out.getWrappedStream()).abort();

  LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
      file.toString(), 0, count);
  ExtendedBlock block = locations.get(0).getBlock();
  DataNode dn = cluster.getDataNodes().get(0);
  BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
  File metafile = new File(localPathInfo.getMetaPath());
  assertTrue(metafile.exists());

  // reduce the block meta file size
  RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
  raf.setLength(metafile.length() - 20);
  raf.close();

  // restart DN to make replica to RWR
  DataNodeProperties dnProp = cluster.stopDataNode(0);
  cluster.restartDataNode(dnProp, true);

  // try to recover the lease
  DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem
      .newInstance(cluster.getConfiguration(0));
  count = 0;
  while (++count < 10 && !newdfs.recoverLease(file)) {
    Thread.sleep(1000);
  }
  assertTrue("File should be closed", newdfs.recoverLease(file));

}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestLeaseRecovery.java

示例10: getNewFileSystemInstance

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Get another FileSystem instance that is different from FileSystem.get(conf).
 * This simulating different threads working on different FileSystem instances.
 */
public FileSystem getNewFileSystemInstance(int nnIndex) throws IOException {
  return FileSystem.newInstance(getURI(nnIndex), nameNodes[nnIndex].conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:MiniDFSCluster.java


注:本文中的org.apache.hadoop.fs.FileSystem.newInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。