當前位置: 首頁>>代碼示例>>Java>>正文


Java FsShell.setConf方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FsShell.setConf方法的典型用法代碼示例。如果您正苦於以下問題:Java FsShell.setConf方法的具體用法?Java FsShell.setConf怎麽用?Java FsShell.setConf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FsShell的用法示例。


在下文中一共展示了FsShell.setConf方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testDeleteSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-deleteSnapshot", "/tmp"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestSnapshotDeletion.java

示例2: testRenameSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestSnapshotRename.java

示例3: testDeleteSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  PrintStream oldOut = System.out;
  PrintStream oldErr = System.err;
  try {
    System.setOut(psOut);
    System.setErr(psOut);
    FsShell shell = new FsShell();
    shell.setConf(conf);

    String[] argv1 = { "-deleteSnapshot", "/tmp" };
    int val = shell.run(argv1);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv1[0] + ": Incorrect number of arguments."));
    out.reset();

    String[] argv2 = { "-deleteSnapshot", "/tmp", "s1", "s2" };
    val = shell.run(argv2);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv2[0] + ": Incorrect number of arguments."));
    psOut.close();
    out.close();
  } finally {
    System.setOut(oldOut);
    System.setErr(oldErr);
  }
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:32,代碼來源:TestSnapshotDeletion.java

示例4: testRenameSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  PrintStream oldOut = System.out;
  PrintStream oldErr = System.err;
  try {
    System.setOut(psOut);
    System.setErr(psOut);
    FsShell shell = new FsShell();
    shell.setConf(conf);

    String[] argv1 = { "-renameSnapshot", "/tmp", "s1" };
    int val = shell.run(argv1);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv1[0] + ": Incorrect number of arguments."));
    out.reset();

    String[] argv2 = { "-renameSnapshot", "/tmp", "s1", "s2", "s3" };
    val = shell.run(argv2);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv2[0] + ": Incorrect number of arguments."));
    psOut.close();
    out.close();
  } finally {
    System.setOut(oldOut);
    System.setErr(oldErr);
  }
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:32,代碼來源:TestSnapshotRename.java

示例5: main

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public static void main(final String[] argv) throws Exception {
    FsShell shell = newShellInstance();
    Configuration conf = new Configuration(false);
    conf.setQuietMode(false);
    shell.setConf(conf);
    int res;
    try {
        res = ToolRunner.run(shell, argv);
    } finally {
        shell.close();
    }
    System.exit(res);
}
 
開發者ID:joyent,項目名稱:hadoop-manta,代碼行數:14,代碼來源:FsShellMantaWrapper.java

示例6: testCount

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testCount() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    String root = createTree(dfs, "count");

    // Verify the counts
    runCount(root, 2, 4, conf);
    runCount(root + "2", 2, 1, conf);
    runCount(root + "2/f1", 0, 1, conf);
    runCount(root + "2/sub", 1, 0, conf);

    final FileSystem localfs = FileSystem.getLocal(conf);
    Path localpath = new Path(TEST_ROOT_DIR, "testcount");
    localpath = localpath.makeQualified(localfs);
    localfs.mkdirs(localpath);

    final String localstr = localpath.toString();
    System.out.println("localstr=" + localstr);
    runCount(localstr, 1, 0, conf);
    assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
  } finally {
    try {
      dfs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:34,代碼來源:TestDFSShell.java

示例7: testChmod

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
 * Test chmod.
 */
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
  throws IOException {
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    //first make dir
    Path dir = new Path(chmodDir);
    fs.delete(dir, true);
    fs.mkdirs(dir);

    runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir);
    assertEquals("rwxrw----",
                 fs.getFileStatus(dir).getPermission().toString());

    //create an empty file
    Path file = new Path(chmodDir, "file");
    TestDFSShell.writeFile(fs, file);

    //test octal mode
    runCmd(shell, "-chmod", "644", file.toString());
    assertEquals("rw-r--r--",
                 fs.getFileStatus(file).getPermission().toString());

    //test recursive
    runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
    assertEquals("rwxrwxrwx",
                 fs.getFileStatus(dir).getPermission().toString());
    assertEquals("rw-rw-rw-",
                 fs.getFileStatus(file).getPermission().toString());

    fs.delete(dir, true);
  } finally {
    try {
      fs.close();
      shell.close();
    } catch (IOException ignored) {}
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:43,代碼來源:TestDFSShell.java

示例8: testTouchFederation

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testTouchFederation() throws IOException, ParseException {
  Configuration conf = new Configuration();
  int numNamenodes = 2;
  int numDatanodes = 2;
  cluster = new MiniDFSCluster(conf, numDatanodes, true, null, numNamenodes);
  cluster.waitActive();

  // f1, f2 are non-default nameservice
  FileSystem fs1 = cluster.getFileSystem(0);
  FileSystem fs2 = cluster.getFileSystem(1);
  // f3 is the default nameservice
  FileSystem fs3 = FileSystem.get(conf);
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    Path file1 = new Path(fs1.getUri() + "/tmp/federateFile1.txt");
    Path file2 = new Path(fs2.getUri() + "/tmp/federateFile2.txt");
    Path file3 = new Path("/tmp/federateFile3.txt");
    runCmd(shell, "-touch", "" + file1, "" + file2, "" + file3);
    assertTrue("Touch didn't create a file!", fs1.exists(file1));
    assertTrue("Touch didn't create a file!", fs2.exists(file2));
    assertTrue("Touch didn't create a file!", fs3.exists(file3));
  } finally {
    try {
      fs1.close();
      fs2.close();
      fs3.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:34,代碼來源:TestDFSShell.java

示例9: testCount

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test (timeout = 30000)
public void testCount() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    String root = createTree(dfs, "count");

    // Verify the counts
    runCount(root, 2, 4, shell);
    runCount(root + "2", 2, 1, shell);
    runCount(root + "2/f1", 0, 1, shell);
    runCount(root + "2/sub", 1, 0, shell);

    final FileSystem localfs = FileSystem.getLocal(conf);
    Path localpath = new Path(TEST_ROOT_DIR, "testcount");
    localpath = localpath.makeQualified(localfs.getUri(),
        localfs.getWorkingDirectory());
    localfs.mkdirs(localpath);
    
    final String localstr = localpath.toString();
    System.out.println("localstr=" + localstr);
    runCount(localstr, 1, 0, shell);
    assertEquals(0, runCmd(shell, "-count", root, localstr));
  } finally {
    try {
      dfs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:36,代碼來源:TestDFSShell.java

示例10: testCount

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testCount() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    String root = createTree(dfs, "count");

    // Verify the counts
    runCount(root, 2, 4, shell);
    runCount(root + "2", 2, 1, shell);
    runCount(root + "2/f1", 0, 1, shell);
    runCount(root + "2/sub", 1, 0, shell);

    final FileSystem localfs = FileSystem.getLocal(conf);
    Path localpath = new Path(TEST_ROOT_DIR, "testcount");
    localpath = localpath
        .makeQualified(localfs.getUri(), localfs.getWorkingDirectory());
    localfs.mkdirs(localpath);
    
    final String localstr = localpath.toString();
    System.out.println("localstr=" + localstr);
    runCount(localstr, 1, 0, shell);
    assertEquals(0, runCmd(shell, "-count", root, localstr));
  } finally {
    try {
      dfs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:37,代碼來源:TestDFSShell.java

示例11: trashEmptier

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
protected void trashEmptier(FileSystem fs, Configuration conf) throws Exception {
  // Trash with 12 second deletes and 6 seconds checkpoints
  conf.set("fs.trash.interval", "0.2"); // 12 seconds
  conf.set("fs.trash.checkpoint.interval", "0.1"); // 6 seconds
  Trash trash = new Trash(conf);
  // clean up trash can
  fs.delete(trash.getCurrentTrashDir().getParent(), true);

  // Start Emptier in background
  Runnable emptier = trash.getEmptier();
  Thread emptierThread = new Thread(emptier);
  emptierThread.start();

  FsShell shell = new FsShell();
  shell.setConf(conf);
  shell.init();
  // First create a new directory with mkdirs
  Path myPath = new Path(TEST_DIR, "test/mkdirs");
  mkdir(fs, myPath);
  int fileIndex = 0;
  Set<String> checkpoints = new HashSet<String>();
  while (true)  {
    // Create a file with a new name
    Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
    writeFile(fs, myFile);

    // Delete the file to trash
    assertTrue(rmUsingShell(shell, myFile) == 0);

    Path trashDir = shell.getCurrentTrashDir();
    FileStatus files[] = fs.listStatus(trashDir.getParent());
    // Scan files in .Trash and add them to set of checkpoints
    for (FileStatus file : files) {
      String fileName = file.getPath().getName();
      checkpoints.add(fileName);
    }
    // If checkpoints has 5 objects it is Current + 4 checkpoint directories
    if (checkpoints.size() == 5) {
      // The actual contents should be smaller since the last checkpoint
      // should've been deleted and Current might not have been recreated yet
      assertTrue(5 > files.length);
      break;
    }
    Thread.sleep(5000);
  }
  emptierThread.interrupt();
  emptierThread.join();
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:49,代碼來源:TestTrash.java

示例12: testCopyToLocalWithStartingOffset

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testCopyToLocalWithStartingOffset() throws Exception {
  Configuration conf = new Configuration();
  cluster = new MiniDFSCluster(conf, 2, true, null);
  FileSystem fs = cluster.getFileSystem();
  FileSystem localFs = FileSystem.getLocal(conf);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  String good = "good content";
  try {
    Path directory = new Path("/dir");
    Path srcFile = new Path("/dir/file");
    Path destFile = new Path(TEST_ROOT_DIR, "file");
    assertTrue(fs.mkdirs(directory));
    assertTrue(fs.exists(directory));
    
    for (int offset : new int[]{0, 1}) {
      // clear files
      fs.delete(srcFile, true);
      localFs.delete(destFile, true);
      writeFileContents(fs, srcFile, good, offset);
      String[] args = {"-copyToLocal",
          "-start", Integer.toString(offset),
          srcFile.toUri().getPath(),
          TEST_ROOT_DIR};
      assertEquals(0, shell.run(args));
      assertTrue(localFs.exists(destFile));
      assertEquals("We should get " + good, good,
          readFileContents(localFs, destFile));
      if (offset > 0) {
        show("Test normal read");
        localFs.delete(destFile, true);
        args = new String[]{"-copyToLocal",
            srcFile.toUri().getPath(),
            TEST_ROOT_DIR};
        assertEquals(0, shell.run(args));
        assertTrue(localFs.exists(destFile));
        assertNotSame("We should not get " + good, good,
            readFileContents(localFs, destFile));
        show("Test negative offset read");
        localFs.delete(destFile, true);
        args = new String[]{"-copyToLocal",
            "-start",
            Long.toString(offset - fs.getFileStatus(srcFile).getLen()),
            srcFile.toUri().getPath(),
            TEST_ROOT_DIR};
        assertEquals(0, shell.run(args));
        assertTrue(localFs.exists(destFile));
        assertEquals("We should get " + good, good,
            readFileContents(localFs, destFile));
      } 
    }
  } finally {
    try {
      fs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:60,代碼來源:TestDFSShell.java

示例13: testFilePermissions

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
public void testFilePermissions() throws IOException {
  Configuration conf = new Configuration();

  //test chmod on local fs
  FileSystem fs = FileSystem.getLocal(conf);
  testChmod(conf, fs,
            (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());

  conf.set("dfs.permissions", "true");

  //test chmod on DFS
  cluster = new MiniDFSCluster(conf, 2, true, null);
  
  try {
    fs = cluster.getFileSystem();
    testChmod(conf, fs, "/tmp/chmodTest");

    // test chown and chgrp on DFS:

    FsShell shell = new FsShell();
    shell.setConf(conf);
    fs = cluster.getFileSystem();

    /*
     * For dfs, I am the super user and I can change ower of any file to
     * anything. "-R" option is already tested by chmod test above.
     */

    String file = "/tmp/chownTest";
    Path path = new Path(file);
    Path parent = new Path("/tmp");
    Path root = new Path("/");
    TestDFSShell.writeFile(fs, path);

    FileStatus oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
    confirmOwner(null, "herbivores", oldStatus, fs, parent, path);

    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chgrp", "mammals", file);
    confirmOwner(null, "mammals", oldStatus, fs, path);

    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chown", "-R", ":reptiles", "/");
    confirmOwner(null, "reptiles", oldStatus, fs, root, parent, path);

    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
    confirmOwner("python", "reptiles", oldStatus, fs, path);

    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
    confirmOwner("hadoop", "toys", oldStatus, fs, root, parent, path);

    // Test different characters in names
    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chown", "hdfs.user", file);
    confirmOwner("hdfs.user", null, oldStatus, fs, path);

    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
    confirmOwner("_Hdfs.User-10", "_hadoop.users--", oldStatus, fs, path);

    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chown", "hdfs/[email protected]:asf-projects", file);
    confirmOwner("hdfs/[email protected]", "asf-projects", oldStatus, fs, path);

    oldStatus = fs.getFileStatus(path);
    runCmd(shell, "-chgrp", "[email protected]/100", file);
    confirmOwner(null, "[email protected]/100", oldStatus, fs, path);

  } finally {
    cluster.shutdown();
  }
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:76,代碼來源:TestDFSShell.java

示例14: testChmod

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
 * Test chmod.
 */
void testChmod(Configuration conf, FileSystem fs, String chmodDir) 
                                                  throws IOException {
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  try {
    //first make dir
    Path dir = new Path(chmodDir);
    fs.delete(dir, true);
    fs.mkdirs(dir);

    confirmPermissionChange(/* Setting */ "u+rwx,g=rw,o-rwx",
                           /* Should give */ "rwxrw----", fs, shell, dir);

    //create an empty file
    Path file = new Path(chmodDir, "file");
    TestDFSShell.writeFile(fs, file);

    //test octal mode
    confirmPermissionChange("644", "rw-r--r--", fs, shell, file);

    //test recursive
    runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
    assertEquals("rwxrwxrwx",
        fs.getFileStatus(dir).getPermission().toString());
    assertEquals("rw-rw-rw-",
        fs.getFileStatus(file).getPermission().toString());

    // Skip "sticky bit" tests on Windows.
    //
    if (!Path.WINDOWS) {
      // test sticky bit on directories
      Path dir2 = new Path(dir, "stickybit");
      fs.mkdirs(dir2);
      LOG.info("Testing sticky bit on: " + dir2);
      LOG.info("Sticky bit directory initial mode: " +
          fs.getFileStatus(dir2).getPermission());

      confirmPermissionChange("u=rwx,g=rx,o=rx", "rwxr-xr-x", fs, shell, dir2);

      confirmPermissionChange("+t", "rwxr-xr-t", fs, shell, dir2);

      confirmPermissionChange("-t", "rwxr-xr-x", fs, shell, dir2);

      confirmPermissionChange("=t", "--------T", fs, shell, dir2);

      confirmPermissionChange("0000", "---------", fs, shell, dir2);

      confirmPermissionChange("1666", "rw-rw-rwT", fs, shell, dir2);

      confirmPermissionChange("777", "rwxrwxrwt", fs, shell, dir2);

      fs.delete(dir2, true);
    } else {
      LOG.info("Skipped sticky bit tests on Windows");
    }

    fs.delete(dir, true);

  } finally {
    try {
      fs.close();
      shell.close();
    } catch (IOException ignored) {}
  }
}
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:70,代碼來源:TestDFSShell.java

示例15: testFilePermissions

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test (timeout = 30000)
public void testFilePermissions() throws IOException {
  Configuration conf = new HdfsConfiguration();
  
  //test chmod on local fs
  FileSystem fs = FileSystem.getLocal(conf);
  testChmod(conf, fs, 
            (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
  
  conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
  
  //test chmod on DFS
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  fs = cluster.getFileSystem();
  testChmod(conf, fs, "/tmp/chmodTest");
  
  // test chown and chgrp on DFS:
  
  FsShell shell = new FsShell();
  shell.setConf(conf);
  fs = cluster.getFileSystem();
  
  /* For dfs, I am the super user and I can change owner of any file to
   * anything. "-R" option is already tested by chmod test above.
   */
  
  String file = "/tmp/chownTest";
  Path path = new Path(file);
  Path parent = new Path("/tmp");
  Path root = new Path("/");
  TestDFSShell.writeFile(fs, path);
  
  runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
  confirmOwner(null, "herbivores", fs, parent, path);
  
  runCmd(shell, "-chgrp", "mammals", file);
  confirmOwner(null, "mammals", fs, path);
  
  runCmd(shell, "-chown", "-R", ":reptiles", "/");
  confirmOwner(null, "reptiles", fs, root, parent, path);
  
  runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
  confirmOwner("python", "reptiles", fs, path);

  runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
  confirmOwner("hadoop", "toys", fs, root, parent, path);
  
  // Test different characters in names

  runCmd(shell, "-chown", "hdfs.user", file);
  confirmOwner("hdfs.user", null, fs, path);
  
  runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
  confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);
  
  runCmd(shell, "-chown", "hdfs/[email protected]:asf-projects", file);
  confirmOwner("hdfs/[email protected]", "asf-projects", fs, path);
  
  runCmd(shell, "-chgrp", "[email protected]/100", file);
  confirmOwner(null, "[email protected]/100", fs, path);
  
  cluster.shutdown();
}
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:64,代碼來源:TestDFSShell.java


注:本文中的org.apache.hadoop.fs.FsShell.setConf方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。