当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.getWorkingDirectory方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getWorkingDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getWorkingDirectory方法的具体用法?Java FileSystem.getWorkingDirectory怎么用?Java FileSystem.getWorkingDirectory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.getWorkingDirectory方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWorkingdirectory

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testWorkingdirectory() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path workingDir = fs.getWorkingDirectory();
  fs.close();

  fs = getHttpFSFileSystem();
  if (isLocalFS()) {
    fs.setWorkingDirectory(workingDir);
  }
  Path httpFSWorkingDir = fs.getWorkingDirectory();
  fs.close();
  Assert.assertEquals(httpFSWorkingDir.toUri().getPath(),
                      workingDir.toUri().getPath());

  fs = getHttpFSFileSystem();
  fs.setWorkingDirectory(new Path("/tmp"));
  workingDir = fs.getWorkingDirectory();
  fs.close();
  Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:BaseTestHttpFSWith.java

示例2: getWorkingDirectory

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于获取文件系统的HomeDirectory
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @return 文件系统的WorkingDirectory
 */
public static Path getWorkingDirectory(FileSystemInfo fileSystemInfo) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	try {
		return fs.getWorkingDirectory();
	} finally {
		closeFileSystem(fs);
	}
}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:16,代码来源:HdfsAndAlluxioUtils_update.java

示例3: testHostsIncludeForDeadCount

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHostsIncludeForDeadCount() throws Exception {
  Configuration conf = getConf();

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  StringBuilder includeHosts = new StringBuilder();
  includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
      .append("\n");
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    assertTrue(ns.getNumDeadDataNodes() == 2);
    assertTrue(ns.getNumLiveDataNodes() == 0);

    // Testing using MBeans
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestHostsFiles.java

示例4: testWorkingDirectory

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Tests get/set working directory in DFS.
 */
@Test
public void testWorkingDirectory() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path orig_path = fileSys.getWorkingDirectory();
    assertTrue(orig_path.isAbsolute());
    Path file1 = new Path("somewhat/random.txt");
    writeFile(fileSys, file1);
    assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
    fileSys.delete(file1, true);
    Path subdir1 = new Path("/somewhere");
    fileSys.setWorkingDirectory(subdir1);
    writeFile(fileSys, file1);
    cleanupFile(fileSys, new Path(subdir1, file1.toString()));
    Path subdir2 = new Path("else");
    fileSys.setWorkingDirectory(subdir2);
    writeFile(fileSys, file1);
    readFile(fileSys, file1);
    cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
                                  file1.toString()));

    // test home directory
    Path home = 
      fileSys.makeQualified(
          new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
              + "/" + getUserName(fileSys))); 
    Path fsHome = fileSys.getHomeDirectory();
    assertEquals(home, fsHome);

  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestLocalDFS.java

示例5: getAbsoluteTestRootPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public Path getAbsoluteTestRootPath(FileSystem fSys) {
  Path testRootPath = new Path(TEST_ROOT_DIR);
  if (testRootPath.isAbsolute()) {
    return testRootPath;
  } else {
    return new Path(fSys.getWorkingDirectory(), TEST_ROOT_DIR);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestNativeAzureFileSystemOperationsMocked.java

示例6: testEmptyLogDir

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testEmptyLogDir() throws Exception {
  LOG.info("testEmptyLogDir");
  slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER);
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  Path emptyLogDirPath = new Path(fs.getWorkingDirectory(),
      UUID.randomUUID().toString());
  fs.mkdirs(emptyLogDirPath);
  slm.splitLogDistributed(emptyLogDirPath);
  assertFalse(fs.exists(emptyLogDirPath));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestSplitLogManager.java

示例7: testLastContactTime

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@SuppressWarnings({ "unchecked" })
@Test
public void testLastContactTime() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();

    FSNamesystem fsn = cluster.getNameNode().namesystem;

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
      "Hadoop:service=NameNode,name=NameNodeInfo");

    // Define include file to generate deadNodes metrics
    FileSystem localFileSys = FileSystem.getLocal(conf);
    Path workingDir = localFileSys.getWorkingDirectory();
    Path dir = new Path(workingDir,
      "build/test/data/temp/TestNameNodeMXBean");
    Path includeFile = new Path(dir, "include");
    assertTrue(localFileSys.mkdirs(dir));
    StringBuilder includeHosts = new StringBuilder();
    for(DataNode dn : cluster.getDataNodes()) {
      includeHosts.append(dn.getDisplayName()).append("\n");
    }
    DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
    conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
    fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);

    cluster.stopDataNode(0);
    while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes()
      != 2 ) {
      Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
    }

    // get attribute deadnodeinfo
    String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
      "DeadNodes"));
    assertEquals(fsn.getDeadNodes(), deadnodeinfo);
    Map<String, Map<String, Object>> deadNodes =
      (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
    assertTrue(deadNodes.size() > 0);
    for (Map<String, Object> deadNode : deadNodes.values()) {
      assertTrue(deadNode.containsKey("lastContact"));
      assertTrue(deadNode.containsKey("decommissioned"));
      assertTrue(deadNode.containsKey("xferaddr"));
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestNameNodeMXBean.java

示例8: testHostsExcludeInUI

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHostsExcludeInUI() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    String names = name + "\n" + "localhost:42\n";
    LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName =
        new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
    String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
    assertTrue("Live nodes should contain the decommissioned node",
        nodes.contains("Decommissioned"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestHostsFiles.java

示例9: testNodeDecomissionRespectsRackPolicy

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestBlocksWithNotEnoughRacks.java

示例10: testNodeDecomissionWithOverreplicationRespectsRackPolicy

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() 
    throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 5;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());

  // All hosts are on two racks, only one host on /rack2
  String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Lower the replication factor so the blocks are over replicated
    REPLICATION_FACTOR = 2;
    fs.setReplication(filePath, REPLICATION_FACTOR);

    // Decommission one of the hosts with the block that is not on
    // the lone host on rack2 (if we decomission that host it would
    // be impossible to respect the rack policy).
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    for (String top : locs[0].getTopologyPaths()) {
      if (!top.startsWith("/rack2")) {
        String name = top.substring("/rack1".length()+1);
        DFSTestUtil.writeFile(localFileSys, excludeFile, name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        break;
      }
    }

    // Check the block still has sufficient # replicas across racks,
    // ie we didn't remove the replica on the host on /rack1.
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestBlocksWithNotEnoughRacks.java

示例11: getBaseTestDirOnTestFS

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * @return Where to write test data on the test filesystem; Returns working directory
 * for the test filesystem by default
 * @see #setupDataTestDirOnTestFS()
 * @see #getTestFileSystem()
 */
private Path getBaseTestDirOnTestFS() throws IOException {
  FileSystem fs = getTestFileSystem();
  return new Path(fs.getWorkingDirectory(), "test-data");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:HBaseTestingUtility.java


注:本文中的org.apache.hadoop.fs.FileSystem.getWorkingDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。