當前位置: 首頁>>代碼示例>>Java>>正文


Java FileUtil.chmod方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileUtil.chmod方法的典型用法代碼示例。如果您正苦於以下問題:Java FileUtil.chmod方法的具體用法?Java FileUtil.chmod怎麽用?Java FileUtil.chmod使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.chmod方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getFileCommand

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
private File getFileCommand(String clazz) throws Exception {
  String classpath = System.getProperty("java.class.path");
  File fCommand = new File(workSpace + File.separator + "cache.sh");
  fCommand.deleteOnExit();
  if (!fCommand.getParentFile().exists()) {
    fCommand.getParentFile().mkdirs();
  }
  fCommand.createNewFile();
  OutputStream os = new FileOutputStream(fCommand);
  os.write("#!/bin/sh \n".getBytes());
  if (clazz == null) {
    os.write(("ls ").getBytes());
  } else {
    os.write(("java -cp " + classpath + " " + clazz).getBytes());
  }
  os.flush();
  os.close();
  FileUtil.chmod(fCommand.getAbsolutePath(), "700");
  return fCommand;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestPipeApplication.java

示例2: testFinalizeErrorReportedToNNStorage

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test(expected=IllegalStateException.class)
public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
  File f = new File(TestEditLog.TEST_DIR + "/filejournaltestError");
  // abort after 10th roll
  NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
                                 10, new AbortSpec(10, 0));
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();

  FileJournalManager jm = new FileJournalManager(conf, sd, storage);
  String sdRootPath = sd.getRoot().getAbsolutePath();
  FileUtil.chmod(sdRootPath, "-w", true);
  try {
    jm.finalizeLogSegment(0, 1);
  } finally {
    FileUtil.chmod(sdRootPath, "+w", true);
    assertTrue(storage.getRemovedStorageDirs().contains(sd));
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:TestFileJournalManager.java

示例3: testVolumeConfig

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Tests for a given volumes to be tolerated and volumes failed.
 */
private void testVolumeConfig(int volumesTolerated, int volumesFailed,
    boolean expectedBPServiceState, boolean manageDfsDirs)
    throws IOException, InterruptedException {
  assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
  final int dnIndex = 0;
  // Fail the current directory since invalid storage directory perms
  // get fixed up automatically on datanode startup.
  File[] dirs = {
      new File(cluster.getInstanceStorageDir(dnIndex, 0), "current"),
      new File(cluster.getInstanceStorageDir(dnIndex, 1), "current") };

  try {
    for (int i = 0; i < volumesFailed; i++) {
      prepareDirToFail(dirs[i]);
    }
    restartDatanodes(volumesTolerated, manageDfsDirs);
    assertEquals(expectedBPServiceState, cluster.getDataNodes().get(0)
        .isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
  } finally {
    for (File dir : dirs) {
      FileUtil.chmod(dir.toString(), "755");
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:28,代碼來源:TestDataNodeVolumeFailureToleration.java

示例4: testFailedVolumeOnStartupIsCounted

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Test that a volume that is considered failed on startup is seen as
 *  a failed volume by the NN.
 */
@Test
public void testFailedVolumeOnStartupIsCounted() throws Exception {
  assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
  final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
  ).getDatanodeManager();
  long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
  File dir = new File(cluster.getInstanceStorageDir(0, 0), "current");

  try {
    prepareDirToFail(dir);
    restartDatanodes(1, false);
    // The cluster is up..
    assertEquals(true, cluster.getDataNodes().get(0)
        .isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
    // but there has been a single volume failure
    DFSTestUtil.waitForDatanodeStatus(dm, 1, 0, 1,
        origCapacity / 2, WAIT_FOR_HEARTBEATS);
  } finally {
    FileUtil.chmod(dir.toString(), "755");
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:TestDataNodeVolumeFailureToleration.java

示例5: cleanTokenPasswordFile

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
private File[] cleanTokenPasswordFile() throws Exception {
  File[] result = new File[2];
  result[0] = new File("./jobTokenPassword");
  if (result[0].exists()) {
    FileUtil.chmod(result[0].getAbsolutePath(), "700");
    assertTrue(result[0].delete());
  }
  result[1] = new File("./.jobTokenPassword.crc");
  if (result[1].exists()) {
    FileUtil.chmod(result[1].getAbsolutePath(), "700");
    result[1].delete();
  }
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TestPipeApplication.java

示例6: testToleratesSomeUnwritableVolumes

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Test
public void testToleratesSomeUnwritableVolumes() throws Throwable {
  FileSystem localFileSystem = FileSystem.getLocal(new Configuration());
  String[] vols = new String[]{TEST_ROOT_DIR + "/0",
      TEST_ROOT_DIR + "/1"};
  
  assertTrue(new File(vols[0]).mkdirs());
  assertEquals(0, FileUtil.chmod(vols[0], "400")); // read only
  try {
    new MRAsyncDiskService(localFileSystem, vols);
  } finally {
    FileUtil.chmod(vols[0], "755"); // make writable again
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TestMRAsyncDiskService.java

示例7: setup

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
@Before
public void setup() throws Exception {
  files = FileContext.getLocalFSFileContext();
  Path workSpacePath = new Path(workSpace.getAbsolutePath());
  files.mkdir(workSpacePath, null, true);
  FileUtil.chmod(workSpace.getAbsolutePath(), "777");
  File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
  files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"),
    false);
  File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
  files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"),
    false);
  String exec_path = System.getProperty("container-executor.path");
  if (exec_path != null && !exec_path.isEmpty()) {
    conf = new Configuration(false);
    conf.setClass("fs.AbstractFileSystem.file.impl",
      org.apache.hadoop.fs.local.LocalFs.class,
      org.apache.hadoop.fs.AbstractFileSystem.class);

    appSubmitter = System.getProperty("application.submitter");
    if (appSubmitter == null || appSubmitter.isEmpty()) {
      appSubmitter = "nobody";
    }

    conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, appSubmitter);
    LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH
        + "=" + exec_path);
    conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
    exec = new LinuxContainerExecutor();
    exec.setConf(conf);
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
    dirsHandler = new LocalDirsHandlerService();
    dirsHandler.init(conf);
    List<String> localDirs = dirsHandler.getLocalDirs();
    for (String dir : localDirs) {
      Path userDir = new Path(dir, ContainerLocalizer.USERCACHE);
      files.mkdir(userDir, new FsPermission("777"), false);
      // $local/filecache
      Path fileDir = new Path(dir, ContainerLocalizer.FILECACHE);
      files.mkdir(fileDir, new FsPermission("777"), false);
    }
  }

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:46,代碼來源:TestLinuxContainerExecutor.java

示例8: Application

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put(Submitter.PORT, 
          Integer.toString(serverSocket.getLocalPort()));
  
  //Add token to the environment if security is enabled
  Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
      .getCredentials());
  // This password is used as shared secret key between this application and
  // child pipes process
  byte[]  password = jobToken.getPassword();
  String localPasswordFile = new File(".") + Path.SEPARATOR
      + "jobTokenPassword";
  writePasswordToLocalFile(localPasswordFile, password, conf);
  env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
 
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get(Submitter.INTERPRETOR);
  if (interpretor != null) {
    cmd.add(interpretor);
  }
  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  if (!FileUtil.canExecute(new File(executable))) {
    // LinuxTaskController sets +x permissions on all distcache files already.
    // In case of DefaultTaskController, set permissions here.
    FileUtil.chmod(executable, "u+x");
  }
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  // we are starting map/reduce task of the pipes job. this is not a cleanup
  // attempt. 
  TaskAttemptID taskid = 
    TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
                                   false);
  
  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  
  String challenge = getSecurityChallenge();
  String digestToSend = createDigest(password, challenge);
  String digestExpected = createDigest(password, digestToSend);
  
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader, 
      digestExpected);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  
  downlink.authenticate(digestToSend, challenge);
  waitForAuthentication();
  LOG.debug("Authentication succeeded");
  downlink.start();
  downlink.setJobConf(conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:81,代碼來源:Application.java

示例9: testCheckpointWithSeparateDirsAfterNameFails

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Test case where the NN is configured with a name-only and an edits-only
 * dir, with storage-restore turned on. In this case, if the name-only dir
 * disappears and comes back, a new checkpoint after it has been restored
 * should function correctly.
 * @throws Exception
 */
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  File base_dir = new File(MiniDFSCluster.getBaseDirectory());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/name-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/edits-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(base_dir, "namesecondary1")).toString());

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
        .manageNameDfsDirs(false).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of its only name dir -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
    currentDir = sd0.getCurrentDir();
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

    // Try to upload checkpoint -- this should fail since there are no
    // valid storage dirs
    try {
      secondary.doCheckpoint();
      fail("Did not fail to checkpoint when there are no valid storage dirs");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "No targets in destination storage", ioe);
    }
    
    // Restore the good dir
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to the restored name dir
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.chmod(currentDir.getAbsolutePath(), "755");
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:73,代碼來源:TestCheckpoint.java

示例10: testValidVolumesAtStartup

import org.apache.hadoop.fs.FileUtil; //導入方法依賴的package包/類
/**
 * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
 * option, ie the DN tolerates a failed-to-use scenario during
 * its start-up.
 */
@Test
public void testValidVolumesAtStartup() throws Exception {
  assumeTrue(!System.getProperty("os.name").startsWith("Windows"));

  // Make sure no DNs are running.
  cluster.shutdownDataNodes();

  // Bring up a datanode with two default data dirs, but with one bad one.
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);

  // We use subdirectories 0 and 1 in order to have only a single
  // data dir's parent inject a failure.
  File tld = new File(MiniDFSCluster.getBaseDirectory(), "badData");
  File dataDir1 = new File(tld, "data1");
  File dataDir1Actual = new File(dataDir1, "1");
  dataDir1Actual.mkdirs();
  // Force an IOE to occur on one of the dfs.data.dir.
  File dataDir2 = new File(tld, "data2");
  prepareDirToFail(dataDir2);
  File dataDir2Actual = new File(dataDir2, "2");

  // Start one DN, with manually managed DN dir
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
  cluster.startDataNodes(conf, 1, false, null, null);
  cluster.waitActive();

  try {
    assertTrue("The DN should have started up fine.",
        cluster.isDataNodeUp());
    DataNode dn = cluster.getDataNodes().get(0);
    String si = DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
    assertTrue("The DN should have started with this directory",
        si.contains(dataDir1Actual.getPath()));
    assertFalse("The DN shouldn't have a bad directory.",
        si.contains(dataDir2Actual.getPath()));
  } finally {
    cluster.shutdownDataNodes();
    FileUtil.chmod(dataDir2.toString(), "755");
  }

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:48,代碼來源:TestDataNodeVolumeFailureToleration.java


注:本文中的org.apache.hadoop.fs.FileUtil.chmod方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。