當前位置: 首頁>>代碼示例>>Java>>正文


Java FileUtil.canExecute方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileUtil.canExecute方法的典型用法代碼示例。如果您正苦於以下問題:Java FileUtil.canExecute方法的具體用法?Java FileUtil.canExecute怎麽用?Java FileUtil.canExecute使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.canExecute方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: checkAccessByFileMethods

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Checks that the current running process can read, write, and execute the
 * given directory by using methods of the File object.
 * 
 * @param dir File to check
 * @throws DiskErrorException if dir is not readable, not writable, or not
 *   executable
 */
private static void checkAccessByFileMethods(File dir)
    throws DiskErrorException {
  if (!FileUtil.canRead(dir)) {
    throw new DiskErrorException("Directory is not readable: "
                                 + dir.toString());
  }

  if (!FileUtil.canWrite(dir)) {
    throw new DiskErrorException("Directory is not writable: "
                                 + dir.toString());
  }

  if (!FileUtil.canExecute(dir)) {
    throw new DiskErrorException("Directory is not executable: "
                                 + dir.toString());
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:26,代碼來源:DiskChecker.java

示例2: setupMockExecutor

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
private void setupMockExecutor(String executorPath, Configuration conf)
    throws IOException {
  //we'll always use the tmpMockExecutor - since
  // PrivilegedOperationExecutor can only be initialized once.

  Files.copy(Paths.get(executorPath), Paths.get(tmpMockExecutor),
      REPLACE_EXISTING);

  File executor = new File(tmpMockExecutor);

  if (!FileUtil.canExecute(executor)) {
    FileUtil.setExecutable(executor, true);
  }
    String executorAbsolutePath = executor.getAbsolutePath();
  conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,
      executorAbsolutePath);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestLinuxContainerExecutorWithMocks.java

示例3: getConfigurationWithMockContainerExecutor

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
private Configuration getConfigurationWithMockContainerExecutor() {
  File f = new File("./src/test/resources/mock-container-executor");
  if(!FileUtil.canExecute(f)) {
    FileUtil.setExecutable(f, true);
  }
  String executorPath = f.getAbsolutePath();
    conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
  return conf;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:TestDockerContainerRuntime.java

示例4: setup

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Before
public void setup() {
  assumeTrue(Shell.LINUX);
  File f = new File("./src/test/resources/mock-container-executor");
  if(!FileUtil.canExecute(f)) {
    FileUtil.setExecutable(f, true);
  }
  String executorPath = f.getAbsolutePath();
  Configuration conf = new Configuration();
  yarnImage = "yarnImage";
  long time = System.currentTimeMillis();
  conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
  conf.set(YarnConfiguration.NM_LOCAL_DIRS, "/tmp/nm-local-dir" + time);
  conf.set(YarnConfiguration.NM_LOG_DIRS, "/tmp/userlogs" + time);
  conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
  conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME , DOCKER_LAUNCH_COMMAND);
  dockerContainerExecutor = new DockerContainerExecutor();
  dirsHandler = new LocalDirsHandlerService();
  dirsHandler.init(conf);
  dockerContainerExecutor.setConf(conf);
  lfs = null;
  try {
    lfs = FileContext.getLocalFSFileContext();
    workDir = new Path("/tmp/temp-"+ System.currentTimeMillis());
    lfs.mkdir(workDir, FsPermission.getDirDefault(), true);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:31,代碼來源:TestDockerContainerExecutorWithMocks.java

示例5: Application

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put(Submitter.PORT, 
          Integer.toString(serverSocket.getLocalPort()));
  
  //Add token to the environment if security is enabled
  Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
      .getCredentials());
  // This password is used as shared secret key between this application and
  // child pipes process
  byte[]  password = jobToken.getPassword();
  String localPasswordFile = new File(".") + Path.SEPARATOR
      + "jobTokenPassword";
  writePasswordToLocalFile(localPasswordFile, password, conf);
  env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
 
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get(Submitter.INTERPRETOR);
  if (interpretor != null) {
    cmd.add(interpretor);
  }
  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  if (!FileUtil.canExecute(new File(executable))) {
    // LinuxTaskController sets +x permissions on all distcache files already.
    // In case of DefaultTaskController, set permissions here.
    FileUtil.chmod(executable, "u+x");
  }
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  // we are starting map/reduce task of the pipes job. this is not a cleanup
  // attempt. 
  TaskAttemptID taskid = 
    TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
                                   false);
  
  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  
  String challenge = getSecurityChallenge();
  String digestToSend = createDigest(password, challenge);
  String digestExpected = createDigest(password, digestToSend);
  
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader, 
      digestExpected);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  
  downlink.authenticate(digestToSend, challenge);
  waitForAuthentication();
  LOG.debug("Authentication succeeded");
  downlink.start();
  downlink.setJobConf(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:81,代碼來源:Application.java

示例6: shouldRun

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Method used to determine if or not node health monitoring service should be
 * started or not. Returns true if following conditions are met:
 * 
 * <ol>
 * <li>Path to Node health check script is not empty</li>
 * <li>Node health check script file exists</li>
 * </ol>
 * 
 * @return true if node health monitoring service can be started.
 */
public static boolean shouldRun(String healthScript) {
  if (healthScript == null || healthScript.trim().isEmpty()) {
    return false;
  }
  File f = new File(healthScript);
  return f.exists() && FileUtil.canExecute(f);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:19,代碼來源:NodeHealthScriptRunner.java


注:本文中的org.apache.hadoop.fs.FileUtil.canExecute方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。