当前位置: 首页>>代码示例>>Java>>正文


Java Shell.execCommand方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.Shell.execCommand方法的典型用法代码示例。如果您正苦于以下问题:Java Shell.execCommand方法的具体用法?Java Shell.execCommand怎么用?Java Shell.execCommand使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.Shell的用法示例。


在下文中一共展示了Shell.execCommand方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: stashOriginalFilePermissions

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Override
protected void stashOriginalFilePermissions() throws IOException {
  // save off permissions in case we need to
  // rewrite the keystore in flush()
  if (!Shell.WINDOWS) {
    Path path = Paths.get(file.getCanonicalPath());
    permissions = Files.getPosixFilePermissions(path);
  } else {
    // On Windows, the JDK does not support the POSIX file permission APIs.
    // Instead, we can do a winutils call and translate.
    String[] cmd = Shell.getGetPermissionCommand();
    String[] args = new String[cmd.length + 1];
    System.arraycopy(cmd, 0, args, 0, cmd.length);
    args[cmd.length] = file.getCanonicalPath();
    String out = Shell.execCommand(args);
    StringTokenizer t = new StringTokenizer(out, Shell.TOKEN_SEPARATOR_REGEX);
    // The winutils output consists of 10 characters because of the leading
    // directory indicator, i.e. "drwx------".  The JDK parsing method expects
    // a 9-character string, so remove the leading character.
    String permString = t.nextToken().substring(1);
    permissions = PosixFilePermissions.fromString(permString);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:24,代码来源:LocalJavaKeyStoreProvider.java

示例2: test0

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
 * The second dir exists & is RW
 * @throws Exception
 */
@Test (timeout = 30000)
public void test0() throws Exception {
  if (isWindows) return;
  String dir0 = buildBufferDir(ROOT, 0);
  String dir1 = buildBufferDir(ROOT, 1);
  try {
    conf.set(CONTEXT, dir0 + "," + dir1);
    assertTrue(localFs.mkdirs(new Path(dir1)));
    BUFFER_ROOT.setReadOnly();
    validateTempDirCreation(dir1);
    validateTempDirCreation(dir1);
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestLocalDirAllocator.java

示例3: _checkDirs

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
private void _checkDirs(boolean isDir, String perm, boolean success)
    throws Throwable {
  File localDir = File.createTempFile("test", "tmp");
  if (isDir) {
    localDir.delete();
    localDir.mkdir();
  }
  Shell.execCommand(Shell.getSetPermissionCommand(perm, false,
                                                  localDir.getAbsolutePath()));
  try {
    DiskChecker.checkDir(localDir);
    assertTrue("checkDir success", success);
  } catch (DiskErrorException e) {
    e.printStackTrace();
    assertFalse("checkDir success", success);
  }
  localDir.delete();
  System.out.println("checkDir success: " + success);

}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestDiskChecker.java

示例4: getClusterMetrics

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Override
public YarnClusterMetrics getClusterMetrics() throws IOException {
  int noOfNodes = 0;
  String pbsNodesCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_PBS_PBSNODES,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_PBS_PBSNODES);
  String result = Shell.execCommand(pbsNodesCmd, "-a");
  String[] resultLines = result.split("\\n");
  for (String line : resultLines) {
    Matcher matcher = Pattern.compile("(\\s+.+)").matcher(line);
    if (!matcher.find()) {
      noOfNodes++;
    }
  }
  return YarnClusterMetrics.newInstance(noOfNodes);
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:17,代码来源:PBSApplicationClient.java

示例5: isWindowsSymlinkedDirectory

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
/**
 * Used on Windows to determine if the specified file is a symlink that
 * targets a directory.  On most platforms, these checks can be done using
 * commons-io.  On Windows, the commons-io implementation is unreliable and
 * always returns false.  Instead, this method checks the output of the dir
 * command.  After migrating to Java 7, this method can be removed in favor
 * of the new method java.nio.file.Files.isSymbolicLink, which is expected to
 * work cross-platform.
 * 
 * @param file File to check
 * @return boolean true if the file is a symlink that targets a directory
 * @throws IOException thrown for any I/O error
 */
private static boolean isWindowsSymlinkedDirectory(File file)
    throws IOException {
  String dirOut = Shell.execCommand("cmd", "/c", "dir",
    file.getAbsoluteFile().getParent());
  StringReader sr = new StringReader(dirOut);
  BufferedReader br = new BufferedReader(sr);
  try {
    String line = br.readLine();
    while (line != null) {
      line = br.readLine();
      if (line.contains(file.getName()) && line.contains("<SYMLINKD>")) {
        return true;
      }
    }
    return false;
  } finally {
    IOUtils.closeStream(br);
    IOUtils.closeStream(sr);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestMRJobs.java

示例6: getStorageAccountKey

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Override
public String getStorageAccountKey(String accountName, Configuration conf)
    throws KeyProviderException {
  String envelope = super.getStorageAccountKey(accountName, conf);

  final String command = conf.get(KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT);
  if (command == null) {
    throw new KeyProviderException(
        "Script path is not specified via fs.azure.shellkeyprovider.script");
  }

  String[] cmd = command.split(" ");
  String[] cmdWithEnvelope = Arrays.copyOf(cmd, cmd.length + 1);
  cmdWithEnvelope[cmdWithEnvelope.length - 1] = envelope;

  String decryptedKey = null;
  try {
    decryptedKey = Shell.execCommand(cmdWithEnvelope);
  } catch (IOException ex) {
    throw new KeyProviderException(ex);
  }

  // trim any whitespace
  return decryptedKey.trim();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:ShellDecryptionKeyProvider.java

示例7: testNoSideEffects

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
/** Test no side effect files are left over. After creating a temp
 * temp file, remove both the temp file and its parent. Verify that
 * no files or directories are left over as can happen when File objects
 * are mistakenly created from fully qualified path strings.
 * @throws IOException
 */
@Test (timeout = 30000)
public void testNoSideEffects() throws IOException {
  assumeTrue(!isWindows);
  String dir = buildBufferDir(ROOT, 0);
  try {
    conf.set(CONTEXT, dir);
    File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
    assertTrue(result.delete());
    assertTrue(result.getParentFile().delete());
    assertFalse(new File(dir).exists());
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestLocalDirAllocator.java

示例8: execShellGetUserForNetgroup

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
/**
 * Calls shell to get users for a netgroup by calling getent
 * netgroup, this is a low level function that just returns string
 * that 
 *
 * @param netgroup get users for this netgroup
 * @return string of users for a given netgroup in getent netgroups format
 */
protected String execShellGetUserForNetgroup(final String netgroup)
    throws IOException {
  String result = "";
  try {
    // shell command does not expect '@' at the begining of the group name
    result = Shell.execCommand(
      Shell.getUsersForNetgroupCommand(netgroup.substring(1)));
  } catch (ExitCodeException e) {
    // if we didn't get the group - just return empty list;
    LOG.warn("error getting users for netgroup " + netgroup, e);
  }
  return result;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:22,代码来源:ShellBasedUnixGroupsNetgroupMapping.java

示例9: testLocalPathForWriteDirCreation

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
 * The second dir exists & is RW
 * getLocalPathForWrite with checkAccess set to false should create a parent
 * directory. With checkAccess true, the directory should not be created.
 * @throws Exception
 */
@Test (timeout = 30000)
public void testLocalPathForWriteDirCreation() throws IOException {
  String dir0 = buildBufferDir(ROOT, 0);
  String dir1 = buildBufferDir(ROOT, 1);
  try {
    conf.set(CONTEXT, dir0 + "," + dir1);
    assertTrue(localFs.mkdirs(new Path(dir1)));
    BUFFER_ROOT.setReadOnly();
    Path p1 =
      dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
    assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());

    Path p2 =
      dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
          false);
    try {
      localFs.getFileStatus(p2.getParent());
    } catch (Exception e) {
      assertEquals(e.getClass(), FileNotFoundException.class);
    }
  } finally {
    Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
                                                    BUFFER_DIR_ROOT));
    rmBufferDirs();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestLocalDirAllocator.java

示例10: setJobState

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
public static void setJobState(int jobid, String state, Configuration conf)
    throws IOException {
  String scontrolCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SCONTROL,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SCONTROL);
  Shell
      .execCommand(scontrolCmd, "update", "job=" + jobid, "Comment=" + state);
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:9,代码来源:HPCCommandExecutor.java

示例11: finishApplicationMaster

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Override
  public ApplicationMasterFinishResponse finishApplicationMaster(
      ApplicationMasterFinishRequest request) throws IOException {
    //Close all the PBS Jobs
    Collection<ContainerResponse> responses = ContainerResponses.getResponses();
    for (ContainerResponse response : responses) {
//      response.writeShutDownCommand();
      response.close();
    }
    socketWrapper.close();
    
    //Kill all the remaining PBS Jobs
    if(ContainerResponses.createdPbsJobIds.size() > 0){
      String qdelCmd = conf.get(
          HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_PBS_QDEL,
          HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_PBS_QDEL);
      LOG.info("Killing PBS Jobs " + ContainerResponses.createdPbsJobIds + " during shutdown");
      String[] cmd = new String[ContainerResponses.createdPbsJobIds.size() + 3];
      int i = 0;
      cmd[i++] = qdelCmd;
      cmd[i++] = "-W";
      cmd[i++] = "force";
      for (Integer jobId : ContainerResponses.createdPbsJobIds) {
        cmd[i++] = String.valueOf(jobId);
      }
      
      try{
        Shell.execCommand(null, cmd, 0L);
      }catch(Throwable e){
        // Ignore exception while deleting pbs jobs during shutdown
        LOG.info("Error while killing PBS Jobs. " + e.getMessage());
      }
    }
    return new ApplicationMasterFinishResponse(true);
  }
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:36,代码来源:PBSApplicationMaster.java

示例12: getClusterMetrics

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Override
public YarnClusterMetrics getClusterMetrics() throws IOException {
  String sinfoCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SINFO,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SINFO);
  String result = Shell.execCommand(sinfoCmd, "-h", "-o %D");
  int parseInt;
  try {
    parseInt = Integer.parseInt(result.trim());
  } catch (Throwable e) {
    throw new IOException("Failed to get cluster metrics", e);
  }
  return YarnClusterMetrics.newInstance(parseInt);
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:15,代码来源:SlurmApplicationClient.java

示例13: forceKillApplication

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Override
public boolean forceKillApplication(ApplicationId applicationId)
    throws IOException {
  int jobid = applicationId.getId();
  String scancelCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_SLURM_SCANCEL,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_SLURM_SCANCEL);
  Shell.execCommand(scancelCmd, String.valueOf(jobid));
  return true;
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:11,代码来源:SlurmApplicationClient.java

示例14: submitApplication

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
@Override
public void submitApplication(ApplicationSubmissionContext context)
    throws IOException {
  int waitingTime = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_MAX_WAIT_MS);
  int noOfTimes = conf.getInt(
      HPCConfiguration.YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_CLIENT_RS_RETRIES_MAX);
  ApplicationId applicationId = context.getApplicationId();

  String applicationName = context.getApplicationName();
  SocketWrapper socket = SocketCache.getSocket(applicationId.getId());
  if (socket.waitForReady(waitingTime * noOfTimes)) {
    PBSCommandExecutor.launchContainer(
        context.getAMContainerSpec(),
        ContainerId.newContainerId(
            ApplicationAttemptId.newInstance(applicationId, 1), 1l)
            .toString(), applicationName, conf, applicationId.getId(), true,
        socket.getContainerHostName());
  }

  // Set the Job Name
  int jobid = applicationId.getId();
  String pbsJobName = applicationName.replaceAll("\\s", "");
  if (pbsJobName.length() > 13) {
    pbsJobName = pbsJobName.substring(0, 12);
  }

  String qalterCmd = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_COMMAND_PBS_QALTER,
      HPCConfiguration.DEFAULT_YARN_APPLICATION_HPC_COMMAND_PBS_QALTER);
  Shell
      .execCommand(qalterCmd, String.valueOf(jobid), "-N", "Y#" + pbsJobName);
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:36,代码来源:PBSApplicationClient.java

示例15: getGroups

import org.apache.hadoop.util.Shell; //导入方法依赖的package包/类
static List<String> getGroups() throws IOException {
  List<String> a = new ArrayList<String>();
  String s = Shell.execCommand(Shell.getGroupsCommand());
  for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) {
    a.add(t.nextToken());
  }
  return a;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:9,代码来源:FileContextPermissionBase.java


注:本文中的org.apache.hadoop.util.Shell.execCommand方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。