当前位置: 首页>>代码示例>>Java>>正文


Java Shell.ShellCommandExecutor方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.Shell.ShellCommandExecutor方法的典型用法代码示例。如果您正苦于以下问题:Java Shell.ShellCommandExecutor方法的具体用法?Java Shell.ShellCommandExecutor怎么用?Java Shell.ShellCommandExecutor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.Shell的用法示例。


在下文中一共展示了Shell.ShellCommandExecutor方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testShellScriptBuilderNonZeroExitCode

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
/**
 * Test that script exists with non-zero exit code when command fails.
 * @throws IOException
 */
@Test (timeout = 10000)
public void testShellScriptBuilderNonZeroExitCode() throws IOException {
  ShellScriptBuilder builder = ShellScriptBuilder.create();
  builder.command(Arrays.asList(new String[] {"unknownCommand"}));
  File shellFile = Shell.appendScriptExtension(tmpDir, "testShellScriptBuilderError");
  PrintStream writer = new PrintStream(new FileOutputStream(shellFile));
  builder.write(writer);
  writer.close();
  try {
    FileUtil.setExecutable(shellFile, true);

    Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor(
        new String[]{shellFile.getAbsolutePath()}, tmpDir);
    try {
      shexc.execute();
      fail("builder shell command was expected to throw");
    }
    catch(IOException e) {
      // expected
      System.out.println("Received an expected exception: " + e.getMessage());
    }
  }
  finally {
    FileUtil.fullyDelete(shellFile);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestContainerLaunch.java

示例2: shellExec

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
private Shell.ShellCommandExecutor shellExec(String command) {
  try {

    Shell.ShellCommandExecutor shExec = new Shell.ShellCommandExecutor(
        command.split("\\s+"),
        new File(workDir.toUri().getPath()),
        System.getenv());
    shExec.execute();
    return shExec;
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestDockerContainerExecutor.java

示例3: createNewJob

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
public static int createNewJob(String name, String metainfo,
    Configuration conf, int memory, int cpus) throws IOException {
  
  String slurmWorkDir = getHPCWorkDir(conf);
  if (metainfo == null)
    metainfo = "";
  String sbatchCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SBATCH,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SBATCH);
  try {
    String[] command = { sbatchCmd, "--job-name=" + name,
        "--comment=" + metainfo, "--workdir=" + slurmWorkDir,
        "--mem=" + memory, "--cpus-per-task=" + cpus };

    // This creates a parent job slot with a simple script
    // The idea is to have a script that does not take up any computational
    // resources but keeps running until we ask it to stop
    // Send stop signal to self to go into an endless wait
    // the actual yarn task will be added as a job step to this parent job
    String scancelCmd = conf.get(
        HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SCANCEL,
        HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SCANCEL);
    String script = "#!" + System.getenv("SHELL") + "\n" + scancelCmd
        + " -b -s STOP $SLURM_JOB_ID";
    Shell.ShellCommandExecutor shell = new Shell.ShellCommandExecutor(command);
    StdInputThread exec = new StdInputThread(shell, script);
    exec.start();
    shell.execute();
    exec.checkException();
    String result = shell.getOutput();

    Matcher matcher = Pattern.compile("(\\d+)").matcher(result);
    if (!matcher.find())
      throw new NumberFormatException("Invalid output for: " + result);
    return Integer.parseInt(matcher.group(1));
  } catch (Throwable t) {
    LOG.error("Failed to allocate a container.", t);
    throw new Error("Failed to allocate a container.", t);
  }
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:41,代码来源:HPCCommandExecutor.java

示例4: main

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
public static void main(String[] args) throws InterruptedException,
    IOException {
  if (args != null && args.length >= 3) {
    System.out.println("PBS Job ID : " + args[0] + ", Host Name :" + args[1]
        + ", Port :" + args[2]);
  } else {
    System.err.println("Invalid Number of arguments : " + args.length);
    System.exit(-1);
  }

  ClientSocket clientSocket = new ClientSocket(Integer.parseInt(args[0].substring(0, args[0].indexOf("."))), args[1],
      Integer.parseInt(args[2]), Integer.parseInt(args[3]),
      Integer.parseInt(args[4]), Integer.parseInt(args[5]));
  clientSocket.initialize();
  while (clientSocket.isCmdReady() == false) {
    Thread.sleep(100);
  }
  String cmdString = clientSocket.getCmdString();
  if ("####SHUTDOWN####".equals(cmdString)) {
    System.out
        .println("Received shutdown command from master, Shutting down.");
    System.exit(0);
  }
  Shell.ShellCommandExecutor shell = new Shell.ShellCommandExecutor(
      new String[] { "sh" });
  StdInputThread exec = new StdInputThread(shell, cmdString);
  exec.start();
  shell.execute();
  exec.checkException();
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:31,代码来源:ContainerLauncher.java

示例5: testInvalidEnvSyntaxDiagnostics

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Test (timeout = 20000)
public void testInvalidEnvSyntaxDiagnostics() throws IOException  {

  File shellFile = null;
  try {
    shellFile = Shell.appendScriptExtension(tmpDir, "hello");
    Map<Path, List<String>> resources =
        new HashMap<Path, List<String>>();
    FileOutputStream fos = new FileOutputStream(shellFile);
    FileUtil.setExecutable(shellFile, true);

    Map<String, String> env = new HashMap<String, String>();
    // invalid env
    env.put(
        "APPLICATION_WORKFLOW_CONTEXT", "{\"workflowId\":\"609f91c5cd83\"," +
        "\"workflowName\":\"\n\ninsert table " +
        "\npartition (cd_education_status)\nselect cd_demo_sk, cd_gender, " );
    List<String> commands = new ArrayList<String>();
    new DefaultContainerExecutor().writeLaunchEnv(fos, env, resources, commands);
    fos.flush();
    fos.close();

    // It is supposed that LANG is set as C.
    Map<String, String> cmdEnv = new HashMap<String, String>();
    cmdEnv.put("LANG", "C");
    Shell.ShellCommandExecutor shexc
    = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
      tmpDir, cmdEnv);
    String diagnostics = null;
    try {
      shexc.execute();
      Assert.fail("Should catch exception");
    } catch(ExitCodeException e){
      diagnostics = e.getMessage();
    }
    Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ?
        "is not recognized as an internal or external command" :
        "command not found"));
    Assert.assertTrue(shexc.getExitCode() != 0);
  }
  finally {
    // cleanup
    if (shellFile != null
        && shellFile.exists()) {
      shellFile.delete();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestContainerLaunch.java

示例6: testContainerLaunchStdoutAndStderrDiagnostics

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Test (timeout = 20000)
public void testContainerLaunchStdoutAndStderrDiagnostics() throws IOException {

  File shellFile = null;
  try {
    shellFile = Shell.appendScriptExtension(tmpDir, "hello");
    // echo "hello" to stdout and "error" to stderr and exit code with 2;
    String command = Shell.WINDOWS ?
        "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" :
        "echo \"hello\"; echo \"error\" 1>&2; exit 2;";
    PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
    FileUtil.setExecutable(shellFile, true);
    writer.println(command);
    writer.close();
    Map<Path, List<String>> resources =
        new HashMap<Path, List<String>>();
    FileOutputStream fos = new FileOutputStream(shellFile, true);

    Map<String, String> env = new HashMap<String, String>();
    List<String> commands = new ArrayList<String>();
    commands.add(command);
    ContainerExecutor exec = new DefaultContainerExecutor();
    exec.writeLaunchEnv(fos, env, resources, commands);
    fos.flush();
    fos.close();

    Shell.ShellCommandExecutor shexc
    = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()}, tmpDir);
    String diagnostics = null;
    try {
      shexc.execute();
      Assert.fail("Should catch exception");
    } catch(ExitCodeException e){
      diagnostics = e.getMessage();
    }
    // test stderr
    Assert.assertTrue(diagnostics.contains("error"));
    // test stdout
    Assert.assertTrue(shexc.getOutput().contains("hello"));
    Assert.assertTrue(shexc.getExitCode() == 2);
  }
  finally {
    // cleanup
    if (shellFile != null
        && shellFile.exists()) {
      shellFile.delete();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestContainerLaunch.java

示例7: startLogAggregation

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
public static void startLogAggregation(ApplicationAttemptId appAttemptId,
    Configuration conf) {
  String slurmWorkDir = getHPCWorkDir(conf);
  String sbatchCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SBATCH,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SBATCH);
  String srunCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SRUN,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SRUN);
 
  Map<String, String> launchEnv = new HashMap<String, String>();
  
  // Hadoop jars
  for (String c : conf.getStrings(
      YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
    Apps.addToEnvironment(launchEnv, Environment.CLASSPATH.name(), c.trim());
  }
  Map<String, String> environment = setUpEnvironment(launchEnv,
      getHPCLogDir(conf));
  
  try {
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    String logCmd = System.getenv("JAVA_HOME") + "/bin/java "
        + "org.apache.hadoop.yarn.hpc.log.HPCLogAggregateHandler "
        + appAttemptId.getApplicationId().toString() + " " + user;
    String[] command = { sbatchCmd, "--job-name=HPCLogAggregateHandler",
        "--share", "--workdir=" + slurmWorkDir };

    String script = "#!" + System.getenv("SHELL") + "\n"
        + "nodes=$(sinfo -h -o %D --state=idle,alloc,mixed,future,completing)\n"
        + srunCmd + " --share -N$nodes " + logCmd;
    Shell.ShellCommandExecutor shell = new Shell.ShellCommandExecutor(
        command, new File("."), environment);
    StdInputThread exec = new StdInputThread(shell, script);
    exec.start();
    shell.execute();
    exec.checkException();
    shell.getOutput();
  } catch (Throwable t) {
    throw new YarnRuntimeException("Failed to aggregate logs.", t);
  }
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:45,代码来源:HPCCommandExecutor.java

示例8: submitAndGetPBSJobId

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
public static int submitAndGetPBSJobId(Configuration conf, int priority,
    int memory, int cpuCores, String hostName, int port) {
  String qsubCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_PBS_QSUB,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_PBS_QSUB);
  try {
    Map<String, String> launchEnv = new HashMap<String, String>();

    // Hadoop jars
    for (String c : conf.getStrings(
        YarnConfiguration.YARN_APPLICATION_CLASSPATH,
        YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
      Apps.addToEnvironment(launchEnv, Environment.CLASSPATH.name(), c.trim());
    }

    Map<String, String> environment = setUpEnvironment(launchEnv);
    
    // specifying memory and cpu for qsub
    String[] command = { qsubCmd };
    if (cpuCores > 0 && memory > 0) {
      command = new String[] { qsubCmd, "-l",
          "select=1:mem=" + memory + "mb:ncpus=" + cpuCores };
    }
    if (memory > 0) {
      command = new String[] { qsubCmd, "-l", "mem=" + memory + "mb" };
    }
    if (cpuCores > 0) {
      command = new String[] { qsubCmd, "-l", "ncpus=" + cpuCores };
    }
    
    StringBuilder scriptBuilder = new StringBuilder();
    scriptBuilder.append("#!").append(System.getenv("SHELL")).append("\n");
    for (Entry<String, String> envPair : environment.entrySet()) {
      scriptBuilder.append("export ").append(envPair.getKey()).append("=")
          .append(envPair.getValue()).append("\n");
    }
    scriptBuilder.append(System.getenv("JAVA_HOME")).append(
        "/bin/java org.apache.hadoop.yarn.hpc.pbs.util.ContainerLauncher $PBS_JOBID "
            + hostName + " " + port + " " + priority + " " + memory + " "
            + cpuCores);
    LOG.debug("Script for creating PBS Job : \n" + scriptBuilder.toString());
    Shell.ShellCommandExecutor shell = new Shell.ShellCommandExecutor(
        command, new File("."), environment);
    StdInputThread exec = new StdInputThread(shell, scriptBuilder.toString());
    exec.start();
    shell.execute();
    exec.checkException();
    String result = shell.getOutput();
    LOG.debug("Created PBS Job with ID : " + result);
    Matcher matcher = Pattern.compile("(\\d+)").matcher(result);
    if (!matcher.find())
      throw new NumberFormatException("Invalid output for: " + result);
    int jobId = Integer.parseInt(matcher.group(1));
    return jobId;
  } catch (Throwable t) {
    LOG.error("Failed to allocate a container.", t);
    throw new Error("Failed to allocate a container.", t);
  }
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:61,代码来源:PBSCommandExecutor.java


注:本文中的org.apache.hadoop.util.Shell.ShellCommandExecutor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。