當前位置: 首頁>>代碼示例>>Java>>正文


Java FsShell.run方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FsShell.run方法的典型用法代碼示例。如果您正苦於以下問題:Java FsShell.run方法的具體用法?Java FsShell.run怎麽用?Java FsShell.run使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FsShell的用法示例。


在下文中一共展示了FsShell.run方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testDeleteSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-deleteSnapshot", "/tmp"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestSnapshotDeletion.java

示例2: testRenameSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestSnapshotRename.java

示例3: execCmd

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
static String execCmd(FsShell shell, final String[] args) throws Exception {
  ByteArrayOutputStream baout = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(baout, true);
  PrintStream old = System.out;
  System.setOut(out);
  int ret = shell.run(args);
  out.close();
  System.setOut(old);
  return String.valueOf(ret);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:TestFsShellPermission.java

示例4: execCmd

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
static String execCmd(FsShell shell, String... args) throws Exception {
  ByteArrayOutputStream baout = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(baout, true);
  PrintStream old = System.out;
  System.setOut(out);
  shell.run(args);
  out.close();
  System.setOut(old);
  return baout.toString();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:TestCopyFiles.java

示例5: writeInputData

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
 * Write random bytes at the path <inputDir> if needed.
 * @see org.apache.hadoop.mapred.gridmix.GenerateData
 * @return exit status
 */
@SuppressWarnings("deprecation")
protected int writeInputData(long genbytes, Path inputDir)
    throws IOException, InterruptedException {
  if (genbytes > 0) {
    final Configuration conf = getConf();

    if (inputDir.getFileSystem(conf).exists(inputDir)) {
      LOG.error("Gridmix input data directory " + inputDir
                + " already exists when -generate option is used.\n");
      return STARTUP_FAILED_ERROR;
    }

    // configure the compression ratio if needed
    CompressionEmulationUtil.setupDataGeneratorConfig(conf);
  
    final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
    LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) +
             " of test data...");
    launchGridmixJob(genData);
  
    FsShell shell = new FsShell(conf);
    try {
      LOG.info("Changing the permissions for inputPath " + inputDir.toString());
      shell.run(new String[] {"-chmod","-R","777", inputDir.toString()});
    } catch (Exception e) {
      LOG.error("Couldnt change the file permissions " , e);
      throw new IOException(e);
    }

    LOG.info("Input data generation successful.");
  }

  return 0;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:40,代碼來源:Gridmix.java

示例6: set94FSLayout

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
 * Lays out 0.94 file system layout using {@link TestNamespaceUpgrade} apis.
 * @param testdir
 * @throws IOException
 * @throws Exception
 */
private static void set94FSLayout(Path testdir) throws IOException, Exception {
  File untar = TestNamespaceUpgrade.untar(new File(testdir.toString()));
  if (!fs.exists(hbaseRootDir.getParent())) {
    // mkdir at first
    fs.mkdirs(hbaseRootDir.getParent());
  }
  FsShell shell = new FsShell(TEST_UTIL.getConfiguration());
  shell.run(new String[] { "-put", untar.toURI().toString(), hbaseRootDir.toString() });
  // See whats in minihdfs.
  shell.run(new String[] { "-lsr", "/" });
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestUpgradeTo96.java

示例7: execCmd

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
static String execCmd(FsShell shell, final String[] args) throws Exception {
  ByteArrayOutputStream baout = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(baout, true);
  PrintStream old = System.out;
  int ret;
  try {
    System.setOut(out);
    ret = shell.run(args);
    out.close();
  } finally {
    System.setOut(old);
  }
  return String.valueOf(ret);
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:15,代碼來源:TestFsShellPermission.java

示例8: testDeleteSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  PrintStream oldOut = System.out;
  PrintStream oldErr = System.err;
  try {
    System.setOut(psOut);
    System.setErr(psOut);
    FsShell shell = new FsShell();
    shell.setConf(conf);

    String[] argv1 = { "-deleteSnapshot", "/tmp" };
    int val = shell.run(argv1);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv1[0] + ": Incorrect number of arguments."));
    out.reset();

    String[] argv2 = { "-deleteSnapshot", "/tmp", "s1", "s2" };
    val = shell.run(argv2);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv2[0] + ": Incorrect number of arguments."));
    psOut.close();
    out.close();
  } finally {
    System.setOut(oldOut);
    System.setErr(oldErr);
  }
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:32,代碼來源:TestSnapshotDeletion.java

示例9: testRenameSnapshotCommandWithIllegalArguments

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  PrintStream oldOut = System.out;
  PrintStream oldErr = System.err;
  try {
    System.setOut(psOut);
    System.setErr(psOut);
    FsShell shell = new FsShell();
    shell.setConf(conf);

    String[] argv1 = { "-renameSnapshot", "/tmp", "s1" };
    int val = shell.run(argv1);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv1[0] + ": Incorrect number of arguments."));
    out.reset();

    String[] argv2 = { "-renameSnapshot", "/tmp", "s1", "s2", "s3" };
    val = shell.run(argv2);
    assertTrue(val == -1);
    assertTrue(out.toString()
        .contains(argv2[0] + ": Incorrect number of arguments."));
    psOut.close();
    out.close();
  } finally {
    System.setOut(oldOut);
    System.setErr(oldErr);
  }
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:32,代碼來源:TestSnapshotRename.java

示例10: writeInputData

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
 * Write random bytes at the path <inputDir> if needed.
 * @see org.apache.hadoop.mapred.gridmix.GenerateData
 * @return exit status
 */
protected int writeInputData(long genbytes, Path inputDir)
    throws IOException, InterruptedException {
  if (genbytes > 0) {
    final Configuration conf = getConf();

    if (inputDir.getFileSystem(conf).exists(inputDir)) {
      LOG.error("Gridmix input data directory " + inputDir
                + " already exists when -generate option is used.\n");
      return STARTUP_FAILED_ERROR;
    }

    // configure the compression ratio if needed
    CompressionEmulationUtil.setupDataGeneratorConfig(conf);
  
    final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
    LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) +
             " of test data...");
    launchGridmixJob(genData);
  
    FsShell shell = new FsShell(conf);
    try {
      LOG.info("Changing the permissions for inputPath " + inputDir.toString());
      shell.run(new String[] {"-chmod","-R","777", inputDir.toString()});
    } catch (Exception e) {
      LOG.error("Couldnt change the file permissions " , e);
      throw new IOException(e);
    }

    LOG.info("Input data generation successful.");
  }

  return 0;
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:39,代碼來源:Gridmix.java

示例11: cmdUsingShell

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
protected int cmdUsingShell(String cmd, FsShell shell, Path myFile) {
  // Delete the file to trash
  String[] args = new String[2];
  args[0] = cmd;
  args[1] = myFile.toString();
  try {
    return shell.run(args);
  } catch (Exception e) {
    System.err.println("Exception raised from Trash.run " +
                       e.getLocalizedMessage());
  }
  return -1;
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:14,代碼來源:TestTrash.java

示例12: importFilesToChildTable

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/**
 * Imports the files that hold the table data into the child instance.
 * @param childTableName the name of the child table to import.
 * @throws Exception
 */
public void importFilesToChildTable(final String childTableName) throws Exception {
    final Configuration childConfig = MergeToolMapper.getChildConfig(conf);
    final AccumuloRdfConfiguration childAccumuloRdfConfiguration = new AccumuloRdfConfiguration(childConfig);
    childAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
    final Connector childConnector = AccumuloRyaUtils.setupConnector(childAccumuloRdfConfiguration);
    final TableOperations childTableOperations = childConnector.tableOperations();

    final Path localWorkDir = getPath(localCopyFileImportDir, childTableName);
    final Path hdfsBaseWorkDir = getPath(baseImportDir, childTableName);

    final FileSystem fs = FileSystem.get(conf);
    if (fs.exists(hdfsBaseWorkDir)) {
        fs.delete(hdfsBaseWorkDir, true);
    }

    log.info("Importing from the local directory: " + localWorkDir);
    log.info("Importing to the HDFS directory: " + hdfsBaseWorkDir);
    copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir);

    final Path files = getPath(hdfsBaseWorkDir.toString(), "files");
    final Path failures = getPath(hdfsBaseWorkDir.toString(), "failures");

    // With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
    final FsShell hdfs = new FsShell(conf);
    if (!fs.isDirectory(hdfsBaseWorkDir)) {
        throw new IllegalArgumentException("Configured working directory is not a valid directory" + hdfsBaseWorkDir.toString());
    }
    hdfs.run(new String[] {"-chmod", "777", hdfsBaseWorkDir.toString()});
    if (fs.exists(failures)) {
        fs.delete(failures, true);
    }
    fs.mkdirs(failures);

    childTableOperations.importDirectory(childTableName, files.toString(), failures.toString(), false);
}
 
開發者ID:apache,項目名稱:incubator-rya,代碼行數:41,代碼來源:CopyTool.java

示例13: exec

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Override
public int exec(List<String> args, Reader in, Writer out) throws InterruptedException, IOException {
    FsShell fsShell = new FsShellWithIO(configuration, in, out);
    try {
        return fsShell.run(args.subList(1, args.size()).toArray(new String[args.size() - 1]));
    } catch (Exception e) {
        LOGGER.error("Failed to execute hdfs shell with args: " + args, e);
        return 1;
    }
}
 
開發者ID:intropro,項目名稱:prairie,代碼行數:11,代碼來源:HdfsShell.java

示例14: run

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
@Override
public int run(String[] args) throws Exception {
    Options options = new Options();

    options.addOption(OPTION_INPUT_PATH);
    options.addOption(OPTION_HTABLE_NAME);
    options.addOption(OPTION_CUBE_NAME);
    parseOptions(options, args);

    String tableName = getOptionValue(OPTION_HTABLE_NAME);
    // e.g
    // /tmp/kylin-3f150b00-3332-41ca-9d3d-652f67f044d7/test_kylin_cube_with_slr_ready_2_segments/hfile/
    // end with "/"
    String input = getOptionValue(OPTION_INPUT_PATH);

    Configuration conf = HBaseConnection.getCurrentHBaseConfiguration();
    FsShell shell = new FsShell(conf);

    int exitCode = -1;
    int retryCount = 10;
    while (exitCode != 0 && retryCount >= 1) {
        exitCode = shell.run(new String[] { "-chmod", "-R", "777", input });
        retryCount--;
        Thread.sleep(5000);
    }

    if (exitCode != 0) {
        logger.error("Failed to change the file permissions: " + input);
        throw new IOException("Failed to change the file permissions: " + input);
    }

    String[] newArgs = new String[2];
    newArgs[0] = input;
    newArgs[1] = tableName;

    logger.debug("Start to run LoadIncrementalHFiles");
    int ret = ToolRunner.run(new LoadIncrementalHFiles(conf), newArgs);
    logger.debug("End to run LoadIncrementalHFiles");
    return ret;
}
 
開發者ID:apache,項目名稱:kylin,代碼行數:41,代碼來源:BulkLoadJob.java

示例15: testDelete

import org.apache.hadoop.fs.FsShell; //導入方法依賴的package包/類
/** test -delete */
public void testDelete() throws Exception {
  final Configuration conf = new Configuration();
  conf.setInt("fs.trash.interval", 60);
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final URI nnURI = FileSystem.getDefaultUri(conf);
    final String nnUri = nnURI.toString();
    final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);

    final DistCpV1 distcp = new DistCpV1(conf);
    final FsShell shell = new FsShell(conf);  

    final String srcrootdir = "/src_root";
    final String dstrootdir = "/dst_root";

    {
      //create source files
      createFiles(nnURI, srcrootdir);
      String srcresults = execCmd(shell, "-lsr", srcrootdir);
      srcresults = removePrefix(srcresults, srcrootdir);
      System.out.println("srcresults=" +  srcresults);

      //create some files in dst
      createFiles(nnURI, dstrootdir);
      System.out.println("dstrootdir=" +  dstrootdir);
      shell.run(new String[]{"-lsr", dstrootdir});

      //run distcp
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log",
                       nnUri+srcrootdir, nnUri+dstrootdir});

      //make sure src and dst contains the same files
      String dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("first dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);

      //create additional file in dst
      create(fs, new Path(dstrootdir, "foo"));
      create(fs, new Path(dstrootdir, "foobar"));

      //run distcp again
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log2",
                       nnUri+srcrootdir, nnUri+dstrootdir});
      
      //make sure src and dst contains the same files
      dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("second dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);
      // verify that files removed in -delete were moved to the trash
      // regrettably, this test will break if Trash changes incompatibly
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foo")));
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foobar")));

      //cleanup
      deldir(fs, dstrootdir);
      deldir(fs, srcrootdir);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:70,代碼來源:TestCopyFiles.java


注:本文中的org.apache.hadoop.fs.FsShell.run方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。