當前位置: 首頁>>代碼示例>>Java>>正文


Java FileUtil類代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileUtil的典型用法代碼示例。如果您正苦於以下問題:Java FileUtil類的具體用法?Java FileUtil怎麽用?Java FileUtil使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


FileUtil類屬於org.apache.hadoop.fs包,在下文中一共展示了FileUtil類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: copyNameDirs

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
public static void copyNameDirs(Collection<URI> srcDirs, Collection<URI> dstDirs,
    Configuration dstConf) throws IOException {
  URI srcDir = Lists.newArrayList(srcDirs).get(0);
  FileSystem dstFS = FileSystem.getLocal(dstConf).getRaw();
  for (URI dstDir : dstDirs) {
    Preconditions.checkArgument(!dstDir.equals(srcDir),
        "src and dst are the same: " + dstDir);
    File dstDirF = new File(dstDir);
    if (dstDirF.exists()) {
      if (!FileUtil.fullyDelete(dstDirF)) {
        throw new IOException("Unable to delete: " + dstDirF);
      }
    }
    LOG.info("Copying namedir from primary node dir "
        + srcDir + " to " + dstDir);
    FileUtil.copy(
        new File(srcDir),
        dstFS, new Path(dstDir), false, dstConf);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:MiniDFSCluster.java

示例2: shoudBeValidMapReduceWithPartitionerEvaluation

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestTableMapReduceUtil.java

示例3: testShellCommandTimeout

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
public void testShellCommandTimeout() throws Throwable {
  if(Shell.WINDOWS) {
    // setExecutable does not work on Windows
    return;
  }
  String rootDir = new File(System.getProperty(
      "test.build.data", "/tmp")).getAbsolutePath();
  File shellFile = new File(rootDir, "timeout.sh");
  String timeoutCommand = "sleep 4; echo \"hello\"";
  PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
  writer.println(timeoutCommand);
  writer.close();
  FileUtil.setExecutable(shellFile, true);
  Shell.ShellCommandExecutor shexc 
  = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
                                    null, null, 100);
  try {
    shexc.execute();
  } catch (Exception e) {
    //When timing out exception is thrown.
  }
  shellFile.delete();
  assertTrue("Script didnt not timeout" , shexc.isTimedOut());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestShell.java

示例4: createStandaloneEditLog

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
/**
 * Return a standalone instance of FSEditLog that will log into the given
 * log directory. The returned instance is not yet opened.
 */
public static FSEditLog createStandaloneEditLog(File logDir)
    throws IOException {
  assertTrue(logDir.mkdirs() || logDir.exists());
  if (!FileUtil.fullyDeleteContents(logDir)) {
    throw new IOException("Unable to delete contents of " + logDir);
  }
  NNStorage storage = Mockito.mock(NNStorage.class);
  StorageDirectory sd 
    = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);
  List<StorageDirectory> sds = Lists.newArrayList(sd);
  Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
  Mockito.doReturn(sd).when(storage)
    .getStorageDirectory(Matchers.<URI>anyObject());

  FSEditLog editLog = new FSEditLog(new Configuration(), 
                       storage,
                       ImmutableList.of(logDir.toURI()));
  editLog.initJournalsForWrite();
  return editLog;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:FSImageTestUtil.java

示例5: tearDown

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
/**
 * Cleans the resources and closes the instance of datanode
 * @throws IOException if an error occurred
 */
@After
public void tearDown() throws IOException {
  if (dn != null) {
    try {
      dn.shutdown();
    } catch(Exception e) {
      LOG.error("Cannot close: ", e);
    } finally {
      File dir = new File(DATA_DIR);
      if (dir.exists())
        Assert.assertTrue(
            "Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TestBlockRecovery.java

示例6: bulkLoadStoreFile

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
/**
 * Bulk load: Add a specified store file to the specified family. If the source file is on the
 * same different file-system is moved from the source location to the destination location,
 * otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath    {@link Path} to the file to import
 * @param seqNum     Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than "
        + "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:HRegionFileSystem.java

示例7: copyRemoteFiles

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
private Path copyRemoteFiles(Path parentDir, Path originalPath,
    Configuration conf, short replication) throws IOException {
  // check if we do not need to copy the files
  // is jt using the same file system.
  // just checking for uri strings... doing no dns lookups
  // to see if the filesystems are the same. This is not optimal.
  // but avoids name resolution.

  FileSystem remoteFs = null;
  remoteFs = originalPath.getFileSystem(conf);
  if (compareFs(remoteFs, jtFs)) {
    return originalPath;
  }
  // this might have name collisions. copy will throw an exception
  // parse the original path to create new path
  Path newPath = new Path(parentDir, originalPath.getName());
  FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
  jtFs.setReplication(newPath, replication);
  return newPath;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:JobResourceUploader.java

示例8: symlink

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
/**
 * Utility method for creating a symlink and warning on errors.
 *
 * If link is null, does nothing.
 */
private void symlink(File workDir, String target, String link)
    throws IOException {
  if (link != null) {
    link = workDir.toString() + Path.SEPARATOR + link;
    File flink = new File(link);
    if (!flink.exists()) {
      LOG.info(String.format("Creating symlink: %s <- %s", target, link));
      if (0 != FileUtil.symLink(target, link)) {
        LOG.warn(String.format("Failed to create symlink: %s <- %s", target,
            link));
      } else {
        symlinksCreated.add(new File(link));
      }
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:LocalDistributedCacheManager.java

示例9: testReadAndWrite

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
@Test(timeout=10000)
public void testReadAndWrite() throws Exception {
  File path = new File(TEST_BASE, "testReadAndWrite");
  path.mkdirs();
  SharedFileDescriptorFactory factory =
      SharedFileDescriptorFactory.create("woot_",
          new String[] { path.getAbsolutePath() });
  FileInputStream inStream =
      factory.createDescriptor("testReadAndWrite", 4096);
  FileOutputStream outStream = new FileOutputStream(inStream.getFD());
  outStream.write(101);
  inStream.getChannel().position(0);
  Assert.assertEquals(101, inStream.read());
  inStream.close();
  outStream.close();
  FileUtil.fullyDelete(path);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:18,代碼來源:TestSharedFileDescriptorFactory.java

示例10: testDirectoryFallbacks

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
@Test(timeout=60000)
public void testDirectoryFallbacks() throws Exception {
  File nonExistentPath = new File(TEST_BASE, "nonexistent");
  File permissionDeniedPath = new File("/");
  File goodPath = new File(TEST_BASE, "testDirectoryFallbacks");
  goodPath.mkdirs();
  try {
    SharedFileDescriptorFactory.create("shm_", 
        new String[] { nonExistentPath.getAbsolutePath(),
                        permissionDeniedPath.getAbsolutePath() });
    Assert.fail();
  } catch (IOException e) {
  }
  SharedFileDescriptorFactory factory =
      SharedFileDescriptorFactory.create("shm_", 
          new String[] { nonExistentPath.getAbsolutePath(),
                          permissionDeniedPath.getAbsolutePath(),
                          goodPath.getAbsolutePath() } );
  Assert.assertEquals(goodPath.getAbsolutePath(), factory.getPath());
  FileUtil.fullyDelete(goodPath);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:22,代碼來源:TestSharedFileDescriptorFactory.java

示例11: chmod

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
@Override
 public void chmod(String path, int mode) throws IOException {
File f = new File(path);
FsPermission perm = FsPermission.createImmutable((short)mode);
LinkedList<String> args = new LinkedList<String>();
args.add("/usr/bin/setfacl");
args.add("-m");
args.add(
	"u::" + perm.getUserAction().SYMBOL +
	",g::" + perm.getGroupAction().SYMBOL +
	",o::" + perm.getOtherAction().SYMBOL);
args.add(FileUtil.makeShellPath(f, true));	
   org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
   
   // Set default acls on directories so children can inherit.
   if(f.isDirectory()) {
   	args.add(1, "-d");
   	org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
   }
 }
 
開發者ID:intel-hpdd,項目名稱:lustre-connector-for-hadoop,代碼行數:21,代碼來源:LustreFsJavaImpl.java

示例12: startBackupNode

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");
  
  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  
  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestHDFSServerPorts.java

示例13: testMapOnlyNoOutputInternal

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
private void testMapOnlyNoOutputInternal(int version) throws Exception {
  JobConf conf = new JobConf();
  //This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir);
  conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
  conf.setInt(org.apache.hadoop.mapreduce.lib.output.
      FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();    
  
  // setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);
  
  if(committer.needsTaskCommit(tContext)) {
    // do commit
    committer.commitTask(tContext);
  }
  committer.commitJob(jContext);

  // validate output
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:TestFileOutputCommitter.java

示例14: hasSomeData

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
/**
 * @return true if the storage directory should prompt the user prior
 * to formatting (i.e if the directory appears to contain some data)
 * @throws IOException if the SD cannot be accessed due to an IO error
 */
@Override
public boolean hasSomeData() throws IOException {
  // Its alright for a dir not to exist, or to exist (properly accessible)
  // and be completely empty.
  if (!root.exists()) return false;
  
  if (!root.isDirectory()) {
    // a file where you expect a directory should not cause silent
    // formatting
    return true;
  }
  
  if (FileUtil.listFiles(root).length == 0) {
    // Empty dir can format without prompt.
    return false;
  }
  
  return true;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:Storage.java

示例15: createCluster

import org.apache.hadoop.fs.FileUtil; //導入依賴的package包/類
private MiniDFSCluster createCluster() throws HDFSQuasiServiceException {
  MiniDFSCluster hdfsCluster = null;

  File baseDir = new File(getWorkingDir()).getAbsoluteFile();
  FileUtil.fullyDelete(baseDir);
  this.conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

  LOG.info("Using base dir " + baseDir.getAbsolutePath());

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(this.conf);
  builder.numDataNodes(getNumberOfDataNodes());
  try {
    hdfsCluster = builder.build();
  } catch (IOException e) {
    LOG.error("Error in creating mini DFS cluster ", e);
    throw new HDFSQuasiServiceException("Error in creating mini DFS cluster ", e);
  }
  ListIterator<DataNode> itr = hdfsCluster.getDataNodes().listIterator();
  LOG.info("NameNode: " + hdfsCluster.getNameNode().getNameNodeAddressHostPortString());
  while (itr.hasNext()) {
    DataNode dn = itr.next();
    LOG.info("DataNode: " + dn.getDisplayName());
  }
  return hdfsCluster;
}
 
開發者ID:ampool,項目名稱:monarch,代碼行數:26,代碼來源:HDFSQuasiService.java


注:本文中的org.apache.hadoop.fs.FileUtil類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。