本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getWorkingDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getWorkingDirectory方法的具体用法?Java FileSystem.getWorkingDirectory怎么用?Java FileSystem.getWorkingDirectory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.getWorkingDirectory方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testWorkingdirectory
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testWorkingdirectory() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf());
Path workingDir = fs.getWorkingDirectory();
fs.close();
fs = getHttpFSFileSystem();
if (isLocalFS()) {
fs.setWorkingDirectory(workingDir);
}
Path httpFSWorkingDir = fs.getWorkingDirectory();
fs.close();
Assert.assertEquals(httpFSWorkingDir.toUri().getPath(),
workingDir.toUri().getPath());
fs = getHttpFSFileSystem();
fs.setWorkingDirectory(new Path("/tmp"));
workingDir = fs.getWorkingDirectory();
fs.close();
Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
}
示例2: getWorkingDirectory
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* 此方法用于获取文件系统的HomeDirectory
*
* @param fileSystemInfo
* 文件系统信息
* @return 文件系统的WorkingDirectory
*/
public static Path getWorkingDirectory(FileSystemInfo fileSystemInfo) {
FileSystem fs = getFileSystem(fileSystemInfo);
try {
return fs.getWorkingDirectory();
} finally {
closeFileSystem(fs);
}
}
示例3: testHostsIncludeForDeadCount
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHostsIncludeForDeadCount() throws Exception {
Configuration conf = getConf();
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
.append("\n");
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
assertTrue(ns.getNumDeadDataNodes() == 2);
assertTrue(ns.getNumLiveDataNodes() == 0);
// Testing using MBeans
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例4: testWorkingDirectory
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Tests get/set working directory in DFS.
*/
@Test
public void testWorkingDirectory() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
Path orig_path = fileSys.getWorkingDirectory();
assertTrue(orig_path.isAbsolute());
Path file1 = new Path("somewhat/random.txt");
writeFile(fileSys, file1);
assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
fileSys.delete(file1, true);
Path subdir1 = new Path("/somewhere");
fileSys.setWorkingDirectory(subdir1);
writeFile(fileSys, file1);
cleanupFile(fileSys, new Path(subdir1, file1.toString()));
Path subdir2 = new Path("else");
fileSys.setWorkingDirectory(subdir2);
writeFile(fileSys, file1);
readFile(fileSys, file1);
cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
file1.toString()));
// test home directory
Path home =
fileSys.makeQualified(
new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
+ "/" + getUserName(fileSys)));
Path fsHome = fileSys.getHomeDirectory();
assertEquals(home, fsHome);
} finally {
fileSys.close();
cluster.shutdown();
}
}
示例5: getAbsoluteTestRootPath
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public Path getAbsoluteTestRootPath(FileSystem fSys) {
Path testRootPath = new Path(TEST_ROOT_DIR);
if (testRootPath.isAbsolute()) {
return testRootPath;
} else {
return new Path(fSys.getWorkingDirectory(), TEST_ROOT_DIR);
}
}
示例6: testEmptyLogDir
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testEmptyLogDir() throws Exception {
LOG.info("testEmptyLogDir");
slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER);
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path emptyLogDirPath = new Path(fs.getWorkingDirectory(),
UUID.randomUUID().toString());
fs.mkdirs(emptyLogDirPath);
slm.splitLogDistributed(emptyLogDirPath);
assertFalse(fs.exists(emptyLogDirPath));
}
示例7: testLastContactTime
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@SuppressWarnings({ "unchecked" })
@Test
public void testLastContactTime() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
// Define include file to generate deadNodes metrics
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir,
"build/test/data/temp/TestNameNodeMXBean");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
for(DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
}
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes()
!= 2 ) {
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
}
// get attribute deadnodeinfo
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
Map<String, Map<String, Object>> deadNodes =
(Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
assertTrue(deadNodes.size() > 0);
for (Map<String, Object> deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例8: testHostsExcludeInUI
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
// Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
String names = name + "\n" + "localhost:42\n";
LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName =
new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
assertTrue("Live nodes should contain the decommissioned node",
nodes.contains("Decommissioned"));
} finally {
cluster.shutdown();
}
}
示例9: testNodeDecomissionRespectsRackPolicy
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
// Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
示例10: testNodeDecomissionWithOverreplicationRespectsRackPolicy
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy()
throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 5;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
// All hosts are on two racks, only one host on /rack2
String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Lower the replication factor so the blocks are over replicated
REPLICATION_FACTOR = 2;
fs.setReplication(filePath, REPLICATION_FACTOR);
// Decommission one of the hosts with the block that is not on
// the lone host on rack2 (if we decomission that host it would
// be impossible to respect the rack policy).
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
for (String top : locs[0].getTopologyPaths()) {
if (!top.startsWith("/rack2")) {
String name = top.substring("/rack1".length()+1);
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
break;
}
}
// Check the block still has sufficient # replicas across racks,
// ie we didn't remove the replica on the host on /rack1.
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
示例11: getBaseTestDirOnTestFS
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* @return Where to write test data on the test filesystem; Returns working directory
* for the test filesystem by default
* @see #setupDataTestDirOnTestFS()
* @see #getTestFileSystem()
*/
private Path getBaseTestDirOnTestFS() throws IOException {
FileSystem fs = getTestFileSystem();
return new Path(fs.getWorkingDirectory(), "test-data");
}