当前位置: 首页>>代码示例>>Java>>正文


Java FileUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileUtil的典型用法代码示例。如果您正苦于以下问题:Java FileUtil类的具体用法?Java FileUtil怎么用?Java FileUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FileUtil类属于org.apache.hadoop.fs包,在下文中一共展示了FileUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copyNameDirs

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
public static void copyNameDirs(Collection<URI> srcDirs, Collection<URI> dstDirs,
    Configuration dstConf) throws IOException {
  URI srcDir = Lists.newArrayList(srcDirs).get(0);
  FileSystem dstFS = FileSystem.getLocal(dstConf).getRaw();
  for (URI dstDir : dstDirs) {
    Preconditions.checkArgument(!dstDir.equals(srcDir),
        "src and dst are the same: " + dstDir);
    File dstDirF = new File(dstDir);
    if (dstDirF.exists()) {
      if (!FileUtil.fullyDelete(dstDirF)) {
        throw new IOException("Unable to delete: " + dstDirF);
      }
    }
    LOG.info("Copying namedir from primary node dir "
        + srcDir + " to " + dstDir);
    FileUtil.copy(
        new File(srcDir),
        dstFS, new Path(dstDir), false, dstConf);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:MiniDFSCluster.java

示例2: shoudBeValidMapReduceWithPartitionerEvaluation

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestTableMapReduceUtil.java

示例3: testShellCommandTimeout

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
public void testShellCommandTimeout() throws Throwable {
  if(Shell.WINDOWS) {
    // setExecutable does not work on Windows
    return;
  }
  String rootDir = new File(System.getProperty(
      "test.build.data", "/tmp")).getAbsolutePath();
  File shellFile = new File(rootDir, "timeout.sh");
  String timeoutCommand = "sleep 4; echo \"hello\"";
  PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
  writer.println(timeoutCommand);
  writer.close();
  FileUtil.setExecutable(shellFile, true);
  Shell.ShellCommandExecutor shexc 
  = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
                                    null, null, 100);
  try {
    shexc.execute();
  } catch (Exception e) {
    //When timing out exception is thrown.
  }
  shellFile.delete();
  assertTrue("Script didnt not timeout" , shexc.isTimedOut());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestShell.java

示例4: createStandaloneEditLog

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
/**
 * Return a standalone instance of FSEditLog that will log into the given
 * log directory. The returned instance is not yet opened.
 */
public static FSEditLog createStandaloneEditLog(File logDir)
    throws IOException {
  assertTrue(logDir.mkdirs() || logDir.exists());
  if (!FileUtil.fullyDeleteContents(logDir)) {
    throw new IOException("Unable to delete contents of " + logDir);
  }
  NNStorage storage = Mockito.mock(NNStorage.class);
  StorageDirectory sd 
    = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);
  List<StorageDirectory> sds = Lists.newArrayList(sd);
  Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
  Mockito.doReturn(sd).when(storage)
    .getStorageDirectory(Matchers.<URI>anyObject());

  FSEditLog editLog = new FSEditLog(new Configuration(), 
                       storage,
                       ImmutableList.of(logDir.toURI()));
  editLog.initJournalsForWrite();
  return editLog;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:FSImageTestUtil.java

示例5: tearDown

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
/**
 * Cleans the resources and closes the instance of datanode
 * @throws IOException if an error occurred
 */
@After
public void tearDown() throws IOException {
  if (dn != null) {
    try {
      dn.shutdown();
    } catch(Exception e) {
      LOG.error("Cannot close: ", e);
    } finally {
      File dir = new File(DATA_DIR);
      if (dir.exists())
        Assert.assertTrue(
            "Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestBlockRecovery.java

示例6: bulkLoadStoreFile

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
/**
 * Bulk load: Add a specified store file to the specified family. If the source file is on the
 * same different file-system is moved from the source location to the destination location,
 * otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath    {@link Path} to the file to import
 * @param seqNum     Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than "
        + "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:HRegionFileSystem.java

示例7: copyRemoteFiles

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
private Path copyRemoteFiles(Path parentDir, Path originalPath,
    Configuration conf, short replication) throws IOException {
  // check if we do not need to copy the files
  // is jt using the same file system.
  // just checking for uri strings... doing no dns lookups
  // to see if the filesystems are the same. This is not optimal.
  // but avoids name resolution.

  FileSystem remoteFs = null;
  remoteFs = originalPath.getFileSystem(conf);
  if (compareFs(remoteFs, jtFs)) {
    return originalPath;
  }
  // this might have name collisions. copy will throw an exception
  // parse the original path to create new path
  Path newPath = new Path(parentDir, originalPath.getName());
  FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
  jtFs.setReplication(newPath, replication);
  return newPath;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:JobResourceUploader.java

示例8: symlink

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
/**
 * Utility method for creating a symlink and warning on errors.
 *
 * If link is null, does nothing.
 */
private void symlink(File workDir, String target, String link)
    throws IOException {
  if (link != null) {
    link = workDir.toString() + Path.SEPARATOR + link;
    File flink = new File(link);
    if (!flink.exists()) {
      LOG.info(String.format("Creating symlink: %s <- %s", target, link));
      if (0 != FileUtil.symLink(target, link)) {
        LOG.warn(String.format("Failed to create symlink: %s <- %s", target,
            link));
      } else {
        symlinksCreated.add(new File(link));
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:LocalDistributedCacheManager.java

示例9: testReadAndWrite

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
@Test(timeout=10000)
public void testReadAndWrite() throws Exception {
  File path = new File(TEST_BASE, "testReadAndWrite");
  path.mkdirs();
  SharedFileDescriptorFactory factory =
      SharedFileDescriptorFactory.create("woot_",
          new String[] { path.getAbsolutePath() });
  FileInputStream inStream =
      factory.createDescriptor("testReadAndWrite", 4096);
  FileOutputStream outStream = new FileOutputStream(inStream.getFD());
  outStream.write(101);
  inStream.getChannel().position(0);
  Assert.assertEquals(101, inStream.read());
  inStream.close();
  outStream.close();
  FileUtil.fullyDelete(path);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:TestSharedFileDescriptorFactory.java

示例10: testDirectoryFallbacks

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
@Test(timeout=60000)
public void testDirectoryFallbacks() throws Exception {
  File nonExistentPath = new File(TEST_BASE, "nonexistent");
  File permissionDeniedPath = new File("/");
  File goodPath = new File(TEST_BASE, "testDirectoryFallbacks");
  goodPath.mkdirs();
  try {
    SharedFileDescriptorFactory.create("shm_", 
        new String[] { nonExistentPath.getAbsolutePath(),
                        permissionDeniedPath.getAbsolutePath() });
    Assert.fail();
  } catch (IOException e) {
  }
  SharedFileDescriptorFactory factory =
      SharedFileDescriptorFactory.create("shm_", 
          new String[] { nonExistentPath.getAbsolutePath(),
                          permissionDeniedPath.getAbsolutePath(),
                          goodPath.getAbsolutePath() } );
  Assert.assertEquals(goodPath.getAbsolutePath(), factory.getPath());
  FileUtil.fullyDelete(goodPath);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:22,代码来源:TestSharedFileDescriptorFactory.java

示例11: chmod

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
@Override
 public void chmod(String path, int mode) throws IOException {
File f = new File(path);
FsPermission perm = FsPermission.createImmutable((short)mode);
LinkedList<String> args = new LinkedList<String>();
args.add("/usr/bin/setfacl");
args.add("-m");
args.add(
	"u::" + perm.getUserAction().SYMBOL +
	",g::" + perm.getGroupAction().SYMBOL +
	",o::" + perm.getOtherAction().SYMBOL);
args.add(FileUtil.makeShellPath(f, true));	
   org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
   
   // Set default acls on directories so children can inherit.
   if(f.isDirectory()) {
   	args.add(1, "-d");
   	org.apache.hadoop.fs.util.Shell.runPrivileged(args.toArray(new String[0]));
   }
 }
 
开发者ID:intel-hpdd,项目名称:lustre-connector-for-hadoop,代码行数:21,代码来源:LustreFsJavaImpl.java

示例12: startBackupNode

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");
  
  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  
  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSServerPorts.java

示例13: testMapOnlyNoOutputInternal

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
private void testMapOnlyNoOutputInternal(int version) throws Exception {
  JobConf conf = new JobConf();
  //This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir);
  conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
  conf.setInt(org.apache.hadoop.mapreduce.lib.output.
      FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter();    
  
  // setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);
  
  if(committer.needsTaskCommit(tContext)) {
    // do commit
    committer.commitTask(tContext);
  }
  committer.commitJob(jContext);

  // validate output
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestFileOutputCommitter.java

示例14: hasSomeData

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
/**
 * @return true if the storage directory should prompt the user prior
 * to formatting (i.e if the directory appears to contain some data)
 * @throws IOException if the SD cannot be accessed due to an IO error
 */
@Override
public boolean hasSomeData() throws IOException {
  // Its alright for a dir not to exist, or to exist (properly accessible)
  // and be completely empty.
  if (!root.exists()) return false;
  
  if (!root.isDirectory()) {
    // a file where you expect a directory should not cause silent
    // formatting
    return true;
  }
  
  if (FileUtil.listFiles(root).length == 0) {
    // Empty dir can format without prompt.
    return false;
  }
  
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:Storage.java

示例15: createCluster

import org.apache.hadoop.fs.FileUtil; //导入依赖的package包/类
private MiniDFSCluster createCluster() throws HDFSQuasiServiceException {
  MiniDFSCluster hdfsCluster = null;

  File baseDir = new File(getWorkingDir()).getAbsoluteFile();
  FileUtil.fullyDelete(baseDir);
  this.conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

  LOG.info("Using base dir " + baseDir.getAbsolutePath());

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(this.conf);
  builder.numDataNodes(getNumberOfDataNodes());
  try {
    hdfsCluster = builder.build();
  } catch (IOException e) {
    LOG.error("Error in creating mini DFS cluster ", e);
    throw new HDFSQuasiServiceException("Error in creating mini DFS cluster ", e);
  }
  ListIterator<DataNode> itr = hdfsCluster.getDataNodes().listIterator();
  LOG.info("NameNode: " + hdfsCluster.getNameNode().getNameNodeAddressHostPortString());
  while (itr.hasNext()) {
    DataNode dn = itr.next();
    LOG.info("DataNode: " + dn.getDisplayName());
  }
  return hdfsCluster;
}
 
开发者ID:ampool,项目名称:monarch,代码行数:26,代码来源:HDFSQuasiService.java


注:本文中的org.apache.hadoop.fs.FileUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。