本文整理匯總了Java中org.apache.hadoop.fs.FsShell.setConf方法的典型用法代碼示例。如果您正苦於以下問題:Java FsShell.setConf方法的具體用法?Java FsShell.setConf怎麽用?Java FsShell.setConf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.fs.FsShell
的用法示例。
在下文中一共展示了FsShell.setConf方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testDeleteSnapshotCommandWithIllegalArguments
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv1 = {"-deleteSnapshot", "/tmp"};
int val = shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
val = shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
示例2: testRenameSnapshotCommandWithIllegalArguments
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
int val = shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
val = shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
示例3: testDeleteSnapshotCommandWithIllegalArguments
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
PrintStream oldOut = System.out;
PrintStream oldErr = System.err;
try {
System.setOut(psOut);
System.setErr(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv1 = { "-deleteSnapshot", "/tmp" };
int val = shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString()
.contains(argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2 = { "-deleteSnapshot", "/tmp", "s1", "s2" };
val = shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString()
.contains(argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
} finally {
System.setOut(oldOut);
System.setErr(oldErr);
}
}
示例4: testRenameSnapshotCommandWithIllegalArguments
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
PrintStream oldOut = System.out;
PrintStream oldErr = System.err;
try {
System.setOut(psOut);
System.setErr(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv1 = { "-renameSnapshot", "/tmp", "s1" };
int val = shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString()
.contains(argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2 = { "-renameSnapshot", "/tmp", "s1", "s2", "s3" };
val = shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString()
.contains(argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
} finally {
System.setOut(oldOut);
System.setErr(oldErr);
}
}
示例5: main
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public static void main(final String[] argv) throws Exception {
FsShell shell = newShellInstance();
Configuration conf = new Configuration(false);
conf.setQuietMode(false);
shell.setConf(conf);
int res;
try {
res = ToolRunner.run(shell, argv);
} finally {
shell.close();
}
System.exit(res);
}
示例6: testCount
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testCount() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(conf);
try {
String root = createTree(dfs, "count");
// Verify the counts
runCount(root, 2, 4, conf);
runCount(root + "2", 2, 1, conf);
runCount(root + "2/f1", 0, 1, conf);
runCount(root + "2/sub", 1, 0, conf);
final FileSystem localfs = FileSystem.getLocal(conf);
Path localpath = new Path(TEST_ROOT_DIR, "testcount");
localpath = localpath.makeQualified(localfs);
localfs.mkdirs(localpath);
final String localstr = localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr, 1, 0, conf);
assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
} finally {
try {
dfs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
示例7: testChmod
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
* Test chmod.
*/
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
throws IOException {
FsShell shell = new FsShell();
shell.setConf(conf);
try {
//first make dir
Path dir = new Path(chmodDir);
fs.delete(dir, true);
fs.mkdirs(dir);
runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir);
assertEquals("rwxrw----",
fs.getFileStatus(dir).getPermission().toString());
//create an empty file
Path file = new Path(chmodDir, "file");
TestDFSShell.writeFile(fs, file);
//test octal mode
runCmd(shell, "-chmod", "644", file.toString());
assertEquals("rw-r--r--",
fs.getFileStatus(file).getPermission().toString());
//test recursive
runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
assertEquals("rwxrwxrwx",
fs.getFileStatus(dir).getPermission().toString());
assertEquals("rw-rw-rw-",
fs.getFileStatus(file).getPermission().toString());
fs.delete(dir, true);
} finally {
try {
fs.close();
shell.close();
} catch (IOException ignored) {}
}
}
示例8: testTouchFederation
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testTouchFederation() throws IOException, ParseException {
Configuration conf = new Configuration();
int numNamenodes = 2;
int numDatanodes = 2;
cluster = new MiniDFSCluster(conf, numDatanodes, true, null, numNamenodes);
cluster.waitActive();
// f1, f2 are non-default nameservice
FileSystem fs1 = cluster.getFileSystem(0);
FileSystem fs2 = cluster.getFileSystem(1);
// f3 is the default nameservice
FileSystem fs3 = FileSystem.get(conf);
FsShell shell = new FsShell();
shell.setConf(conf);
try {
Path file1 = new Path(fs1.getUri() + "/tmp/federateFile1.txt");
Path file2 = new Path(fs2.getUri() + "/tmp/federateFile2.txt");
Path file3 = new Path("/tmp/federateFile3.txt");
runCmd(shell, "-touch", "" + file1, "" + file2, "" + file3);
assertTrue("Touch didn't create a file!", fs1.exists(file1));
assertTrue("Touch didn't create a file!", fs2.exists(file2));
assertTrue("Touch didn't create a file!", fs3.exists(file3));
} finally {
try {
fs1.close();
fs2.close();
fs3.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
示例9: testCount
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test (timeout = 30000)
public void testCount() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(conf);
try {
String root = createTree(dfs, "count");
// Verify the counts
runCount(root, 2, 4, shell);
runCount(root + "2", 2, 1, shell);
runCount(root + "2/f1", 0, 1, shell);
runCount(root + "2/sub", 1, 0, shell);
final FileSystem localfs = FileSystem.getLocal(conf);
Path localpath = new Path(TEST_ROOT_DIR, "testcount");
localpath = localpath.makeQualified(localfs.getUri(),
localfs.getWorkingDirectory());
localfs.mkdirs(localpath);
final String localstr = localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr, 1, 0, shell);
assertEquals(0, runCmd(shell, "-count", root, localstr));
} finally {
try {
dfs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
示例10: testCount
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testCount() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(conf);
try {
String root = createTree(dfs, "count");
// Verify the counts
runCount(root, 2, 4, shell);
runCount(root + "2", 2, 1, shell);
runCount(root + "2/f1", 0, 1, shell);
runCount(root + "2/sub", 1, 0, shell);
final FileSystem localfs = FileSystem.getLocal(conf);
Path localpath = new Path(TEST_ROOT_DIR, "testcount");
localpath = localpath
.makeQualified(localfs.getUri(), localfs.getWorkingDirectory());
localfs.mkdirs(localpath);
final String localstr = localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr, 1, 0, shell);
assertEquals(0, runCmd(shell, "-count", root, localstr));
} finally {
try {
dfs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
示例11: trashEmptier
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
protected void trashEmptier(FileSystem fs, Configuration conf) throws Exception {
// Trash with 12 second deletes and 6 seconds checkpoints
conf.set("fs.trash.interval", "0.2"); // 12 seconds
conf.set("fs.trash.checkpoint.interval", "0.1"); // 6 seconds
Trash trash = new Trash(conf);
// clean up trash can
fs.delete(trash.getCurrentTrashDir().getParent(), true);
// Start Emptier in background
Runnable emptier = trash.getEmptier();
Thread emptierThread = new Thread(emptier);
emptierThread.start();
FsShell shell = new FsShell();
shell.setConf(conf);
shell.init();
// First create a new directory with mkdirs
Path myPath = new Path(TEST_DIR, "test/mkdirs");
mkdir(fs, myPath);
int fileIndex = 0;
Set<String> checkpoints = new HashSet<String>();
while (true) {
// Create a file with a new name
Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
writeFile(fs, myFile);
// Delete the file to trash
assertTrue(rmUsingShell(shell, myFile) == 0);
Path trashDir = shell.getCurrentTrashDir();
FileStatus files[] = fs.listStatus(trashDir.getParent());
// Scan files in .Trash and add them to set of checkpoints
for (FileStatus file : files) {
String fileName = file.getPath().getName();
checkpoints.add(fileName);
}
// If checkpoints has 5 objects it is Current + 4 checkpoint directories
if (checkpoints.size() == 5) {
// The actual contents should be smaller since the last checkpoint
// should've been deleted and Current might not have been recreated yet
assertTrue(5 > files.length);
break;
}
Thread.sleep(5000);
}
emptierThread.interrupt();
emptierThread.join();
}
示例12: testCopyToLocalWithStartingOffset
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testCopyToLocalWithStartingOffset() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster(conf, 2, true, null);
FileSystem fs = cluster.getFileSystem();
FileSystem localFs = FileSystem.getLocal(conf);
FsShell shell = new FsShell();
shell.setConf(conf);
String good = "good content";
try {
Path directory = new Path("/dir");
Path srcFile = new Path("/dir/file");
Path destFile = new Path(TEST_ROOT_DIR, "file");
assertTrue(fs.mkdirs(directory));
assertTrue(fs.exists(directory));
for (int offset : new int[]{0, 1}) {
// clear files
fs.delete(srcFile, true);
localFs.delete(destFile, true);
writeFileContents(fs, srcFile, good, offset);
String[] args = {"-copyToLocal",
"-start", Integer.toString(offset),
srcFile.toUri().getPath(),
TEST_ROOT_DIR};
assertEquals(0, shell.run(args));
assertTrue(localFs.exists(destFile));
assertEquals("We should get " + good, good,
readFileContents(localFs, destFile));
if (offset > 0) {
show("Test normal read");
localFs.delete(destFile, true);
args = new String[]{"-copyToLocal",
srcFile.toUri().getPath(),
TEST_ROOT_DIR};
assertEquals(0, shell.run(args));
assertTrue(localFs.exists(destFile));
assertNotSame("We should not get " + good, good,
readFileContents(localFs, destFile));
show("Test negative offset read");
localFs.delete(destFile, true);
args = new String[]{"-copyToLocal",
"-start",
Long.toString(offset - fs.getFileStatus(srcFile).getLen()),
srcFile.toUri().getPath(),
TEST_ROOT_DIR};
assertEquals(0, shell.run(args));
assertTrue(localFs.exists(destFile));
assertEquals("We should get " + good, good,
readFileContents(localFs, destFile));
}
}
} finally {
try {
fs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
示例13: testFilePermissions
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testFilePermissions() throws IOException {
Configuration conf = new Configuration();
//test chmod on local fs
FileSystem fs = FileSystem.getLocal(conf);
testChmod(conf, fs,
(new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
conf.set("dfs.permissions", "true");
//test chmod on DFS
cluster = new MiniDFSCluster(conf, 2, true, null);
try {
fs = cluster.getFileSystem();
testChmod(conf, fs, "/tmp/chmodTest");
// test chown and chgrp on DFS:
FsShell shell = new FsShell();
shell.setConf(conf);
fs = cluster.getFileSystem();
/*
* For dfs, I am the super user and I can change ower of any file to
* anything. "-R" option is already tested by chmod test above.
*/
String file = "/tmp/chownTest";
Path path = new Path(file);
Path parent = new Path("/tmp");
Path root = new Path("/");
TestDFSShell.writeFile(fs, path);
FileStatus oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
confirmOwner(null, "herbivores", oldStatus, fs, parent, path);
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chgrp", "mammals", file);
confirmOwner(null, "mammals", oldStatus, fs, path);
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chown", "-R", ":reptiles", "/");
confirmOwner(null, "reptiles", oldStatus, fs, root, parent, path);
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
confirmOwner("python", "reptiles", oldStatus, fs, path);
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
confirmOwner("hadoop", "toys", oldStatus, fs, root, parent, path);
// Test different characters in names
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chown", "hdfs.user", file);
confirmOwner("hdfs.user", null, oldStatus, fs, path);
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
confirmOwner("_Hdfs.User-10", "_hadoop.users--", oldStatus, fs, path);
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chown", "hdfs/[email protected]:asf-projects", file);
confirmOwner("hdfs/[email protected]", "asf-projects", oldStatus, fs, path);
oldStatus = fs.getFileStatus(path);
runCmd(shell, "-chgrp", "[email protected]/100", file);
confirmOwner(null, "[email protected]/100", oldStatus, fs, path);
} finally {
cluster.shutdown();
}
}
示例14: testChmod
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
* Test chmod.
*/
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
throws IOException {
FsShell shell = new FsShell();
shell.setConf(conf);
try {
//first make dir
Path dir = new Path(chmodDir);
fs.delete(dir, true);
fs.mkdirs(dir);
confirmPermissionChange(/* Setting */ "u+rwx,g=rw,o-rwx",
/* Should give */ "rwxrw----", fs, shell, dir);
//create an empty file
Path file = new Path(chmodDir, "file");
TestDFSShell.writeFile(fs, file);
//test octal mode
confirmPermissionChange("644", "rw-r--r--", fs, shell, file);
//test recursive
runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
assertEquals("rwxrwxrwx",
fs.getFileStatus(dir).getPermission().toString());
assertEquals("rw-rw-rw-",
fs.getFileStatus(file).getPermission().toString());
// Skip "sticky bit" tests on Windows.
//
if (!Path.WINDOWS) {
// test sticky bit on directories
Path dir2 = new Path(dir, "stickybit");
fs.mkdirs(dir2);
LOG.info("Testing sticky bit on: " + dir2);
LOG.info("Sticky bit directory initial mode: " +
fs.getFileStatus(dir2).getPermission());
confirmPermissionChange("u=rwx,g=rx,o=rx", "rwxr-xr-x", fs, shell, dir2);
confirmPermissionChange("+t", "rwxr-xr-t", fs, shell, dir2);
confirmPermissionChange("-t", "rwxr-xr-x", fs, shell, dir2);
confirmPermissionChange("=t", "--------T", fs, shell, dir2);
confirmPermissionChange("0000", "---------", fs, shell, dir2);
confirmPermissionChange("1666", "rw-rw-rwT", fs, shell, dir2);
confirmPermissionChange("777", "rwxrwxrwt", fs, shell, dir2);
fs.delete(dir2, true);
} else {
LOG.info("Skipped sticky bit tests on Windows");
}
fs.delete(dir, true);
} finally {
try {
fs.close();
shell.close();
} catch (IOException ignored) {}
}
}
示例15: testFilePermissions
import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test (timeout = 30000)
public void testFilePermissions() throws IOException {
Configuration conf = new HdfsConfiguration();
//test chmod on local fs
FileSystem fs = FileSystem.getLocal(conf);
testChmod(conf, fs,
(new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
//test chmod on DFS
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = cluster.getFileSystem();
testChmod(conf, fs, "/tmp/chmodTest");
// test chown and chgrp on DFS:
FsShell shell = new FsShell();
shell.setConf(conf);
fs = cluster.getFileSystem();
/* For dfs, I am the super user and I can change owner of any file to
* anything. "-R" option is already tested by chmod test above.
*/
String file = "/tmp/chownTest";
Path path = new Path(file);
Path parent = new Path("/tmp");
Path root = new Path("/");
TestDFSShell.writeFile(fs, path);
runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
confirmOwner(null, "herbivores", fs, parent, path);
runCmd(shell, "-chgrp", "mammals", file);
confirmOwner(null, "mammals", fs, path);
runCmd(shell, "-chown", "-R", ":reptiles", "/");
confirmOwner(null, "reptiles", fs, root, parent, path);
runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
confirmOwner("python", "reptiles", fs, path);
runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
confirmOwner("hadoop", "toys", fs, root, parent, path);
// Test different characters in names
runCmd(shell, "-chown", "hdfs.user", file);
confirmOwner("hdfs.user", null, fs, path);
runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);
runCmd(shell, "-chown", "hdfs/[email protected]:asf-projects", file);
confirmOwner("hdfs/[email protected]", "asf-projects", fs, path);
runCmd(shell, "-chgrp", "[email protected]/100", file);
confirmOwner(null, "[email protected]/100", fs, path);
cluster.shutdown();
}