当前位置: 首页>>代码示例>>Java>>正文


Java FileUtil.copy方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileUtil.copy方法的典型用法代码示例。如果您正苦于以下问题:Java FileUtil.copy方法的具体用法?Java FileUtil.copy怎么用?Java FileUtil.copy使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileUtil的用法示例。


在下文中一共展示了FileUtil.copy方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copy

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
private Path copy(Path sCopy, Path dstdir) throws IOException {
  FileSystem sourceFs = sCopy.getFileSystem(conf);
  Path dCopy = new Path(dstdir, "tmp_"+sCopy.getName());
  FileStatus sStat = sourceFs.getFileStatus(sCopy);
  if (sStat.getModificationTime() != resource.getTimestamp()) {
    throw new IOException("Resource " + sCopy +
        " changed on src filesystem (expected " + resource.getTimestamp() +
        ", was " + sStat.getModificationTime());
  }
  if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
    if (!isPublic(sourceFs, sCopy, sStat, statCache)) {
      throw new IOException("Resource " + sCopy +
          " is not publicly accessable and as such cannot be part of the" +
          " public cache.");
    }
  }

  FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
      true, conf);
  return dCopy;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSDownload.java

示例2: moveToDoneNow

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
private void moveToDoneNow(Path fromPath, Path toPath) throws IOException {
  // check if path exists, in case of retries it may not exist
  if (stagingDirFS.exists(fromPath)) {
    LOG.info("Copying " + fromPath.toString() + " to " + toPath.toString());
    // TODO temporarily removing the existing dst
    if (doneDirFS.exists(toPath)) {
      doneDirFS.delete(toPath, true);
    }
    boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath,
        false, getConfig());

    if (copied)
      LOG.info("Copied to done location: " + toPath);
    else 
      LOG.info("copy failed");
    doneDirFS.setPermission(toPath, new FsPermission(
        JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:JobHistoryEventHandler.java

示例3: copyRemoteFiles

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
private Path copyRemoteFiles(Path parentDir, Path originalPath,
    Configuration conf, short replication) throws IOException {
  // check if we do not need to copy the files
  // is jt using the same file system.
  // just checking for uri strings... doing no dns lookups
  // to see if the filesystems are the same. This is not optimal.
  // but avoids name resolution.

  FileSystem remoteFs = null;
  remoteFs = originalPath.getFileSystem(conf);
  if (compareFs(remoteFs, jtFs)) {
    return originalPath;
  }
  // this might have name collisions. copy will throw an exception
  // parse the original path to create new path
  Path newPath = new Path(parentDir, originalPath.getName());
  FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
  jtFs.setReplication(newPath, replication);
  return newPath;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:JobResourceUploader.java

示例4: copyNameDirs

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
public static void copyNameDirs(Collection<URI> srcDirs, Collection<URI> dstDirs,
    Configuration dstConf) throws IOException {
  URI srcDir = Lists.newArrayList(srcDirs).get(0);
  FileSystem dstFS = FileSystem.getLocal(dstConf).getRaw();
  for (URI dstDir : dstDirs) {
    Preconditions.checkArgument(!dstDir.equals(srcDir),
        "src and dst are the same: " + dstDir);
    File dstDirF = new File(dstDir);
    if (dstDirF.exists()) {
      if (!FileUtil.fullyDelete(dstDirF)) {
        throw new IOException("Unable to delete: " + dstDirF);
      }
    }
    LOG.info("Copying namedir from primary node dir "
        + srcDir + " to " + dstDir);
    FileUtil.copy(
        new File(srcDir),
        dstFS, new Path(dstDir), false, dstConf);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:MiniDFSCluster.java

示例5: bulkLoadStoreFile

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Bulk load: Add a specified store file to the specified family. If the source file is on the
 * same different file-system is moved from the source location to the destination location,
 * otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath    {@link Path} to the file to import
 * @param seqNum     Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than "
        + "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:HRegionFileSystem.java

示例6: testFindContainingJarWithPlus

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Test that findContainingJar works correctly even if the
 * path has a "+" sign or spaces in it
 */
@Test
public void testFindContainingJarWithPlus() throws Exception {
  new File(TEST_DIR_WITH_SPECIAL_CHARS).mkdirs();
  Configuration conf = new Configuration();

  FileSystem localfs = FileSystem.getLocal(conf);

  FileUtil.copy(localfs, new Path(JAR_RELATIVE_PATH),
                localfs, new Path(TEST_DIR_WITH_SPECIAL_CHARS, "test.jar"),
                false, true, conf);
  testJarAtPath(TEST_DIR_WITH_SPECIAL_CHARS + File.separator + "test.jar");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestMRCJCJobConf.java

示例7: copy

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
private Path copy(Path sCopy, Path dstdir) throws IOException {
  FileSystem sourceFs = sCopy.getFileSystem(conf);
  Path dCopy = new Path(dstdir, "tmp_" + sCopy.getName());
  FileStatus sStat = sourceFs.getFileStatus(sCopy);
  FileUtil.copy(sourceFs, sStat, FileSystem.getLocal(conf), dCopy, false,
      true, conf);
  return dCopy;
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:9,代码来源:FSDownload.java

示例8: restoreUploadedFiles

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
public static void restoreUploadedFiles(FileSystem fs, Path backupDir, HomeFileConfig homeFileStore, BackupStats backupStats) throws IOException {
  // restore uploaded files
  final Path uploadsBackupDir = new Path(backupDir.toUri().getPath(), "uploads");
  FileSystem fs2 = homeFileStore.createFileSystem();
  fs2.delete(homeFileStore.getUploadsDir(), true);
  FileUtil.copy(fs, uploadsBackupDir, fs2, homeFileStore.getUploadsDir(), false, false, new Configuration());
  backupStats.files = fs.getContentSummary(backupDir).getFileCount();
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:9,代码来源:BackupRestoreUtil.java

示例9: prepareBulkLoad

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Override
public String prepareBulkLoad(final byte[] family, final String srcPath) throws IOException {
  Path p = new Path(srcPath);
  Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));
  if (srcFs == null) {
    srcFs = FileSystem.get(p.toUri(), conf);
  }

  if(!isFile(p)) {
    throw new IOException("Path does not reference a file: " + p);
  }

  // Check to see if the source and target filesystems are the same
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) {
    LOG.debug("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination filesystem. Copying file over to destination staging dir.");
    FileUtil.copy(srcFs, p, fs, stageP, false, conf);
  } else {
    LOG.debug("Moving " + p + " to " + stageP);
    FileStatus origFileStatus = fs.getFileStatus(p);
    origPermissions.put(srcPath, origFileStatus.getPermission());
    if(!fs.rename(p, stageP)) {
      throw new IOException("Failed to move HFile: " + p + " to " + stageP);
    }
  }
  fs.setPermission(stageP, PERM_ALL_ACCESS);
  return stageP.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:SecureBulkLoadEndpoint.java

示例10: uploadFile

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
/**
 * Uploads the file to the shared cache under a temporary name, and returns
 * the result.
 */
@VisibleForTesting
boolean uploadFile(Path sourcePath, Path tempPath) throws IOException {
  return FileUtil.copy(localFs, sourcePath, fs, tempPath, false, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:SharedCacheUploader.java

示例11: testNewNamenodeTakesOverWriter

import org.apache.hadoop.fs.FileUtil; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testNewNamenodeTakesOverWriter() throws Exception {
  File nn1Dir = new File(
      MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
  File nn2Dir = new File(
      MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn2");
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nn1Dir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      mjc.getQuorumJournalURI("myjournal").toString());

  // Start the cluster once to generate the dfs dirs
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0)
    .manageNameDfsDirs(false)
    .checkExitOnShutdown(false)
    .build();

  // Shutdown the cluster before making a copy of the namenode dir
  // to release all file locks, otherwise, the copy will fail on
  // some platforms.
  cluster.shutdown();

  try {
    // Start a second NN pointed to the same quorum.
    // We need to copy the image dir from the first NN -- or else
    // the new NN will just be rejected because of Namespace mismatch.
    FileUtil.fullyDelete(nn2Dir);
    FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(),
        new Path(nn2Dir.getAbsolutePath()), false, conf);

    // Start the cluster again
    cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(0)
      .format(false)
      .manageNameDfsDirs(false)
      .checkExitOnShutdown(false)
      .build();

    cluster.getFileSystem().mkdirs(TEST_PATH);

    Configuration conf2 = new Configuration();
    conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        nn2Dir.getAbsolutePath());
    conf2.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
        mjc.getQuorumJournalURI("myjournal").toString());
    MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2)
      .numDataNodes(0)
      .format(false)
      .manageNameDfsDirs(false)
      .build();
    
    // Check that the new cluster sees the edits made on the old cluster
    try {
      assertTrue(cluster2.getFileSystem().exists(TEST_PATH));
    } finally {
      cluster2.shutdown();
    }
    
    // Check that, if we try to write to the old NN
    // that it aborts.
    try {
      cluster.getFileSystem().mkdirs(new Path("/x"));
      fail("Did not abort trying to write to a fenced NN");
    } catch (RemoteException re) {
      GenericTestUtils.assertExceptionContains(
          "Could not sync enough journals to persistent storage", re);
    }
  } finally {
    //cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestNNWithQJM.java


注:本文中的org.apache.hadoop.fs.FileUtil.copy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。