当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DatanodeUtil的典型用法代码示例。如果您正苦于以下问题:Java DatanodeUtil类的具体用法?Java DatanodeUtil怎么用?Java DatanodeUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeUtil类属于org.apache.hadoop.hdfs.server.datanode包,在下文中一共展示了DatanodeUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: isBPDirEmpty

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:FsVolumeImpl.java

示例2: createUnlinkTmpFile

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
    boolean changeBlockFile, 
    boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDatanodeRestart.java

示例3: verifyBlockDeletedFromDir

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
protected final boolean verifyBlockDeletedFromDir(File dir,
    LocatedBlocks locatedBlocks) {

  for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
    File targetDir =
      DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());

    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
    if (blockFile.exists()) {
      LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
    File metaFile = new File(targetDir,
      DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
        lb.getBlock().getGenerationStamp()));
    if (metaFile.exists()) {
      LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:LazyPersistTestCase.java

示例4: copyBlockFiles

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
/**
 * Copy the block and meta files for the given block to the given destination.
 * @return the new meta and block files.
 * @throws IOException
 */
static File[] copyBlockFiles(long blockId, long genStamp,
                             File srcMeta, File srcFile, File destRoot)
    throws IOException {
  final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
  final File dstFile = new File(destDir, srcFile.getName());
  final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
  computeChecksum(srcMeta, dstMeta, srcFile);

  try {
    Storage.nativeCopyFileUnbuffered(srcFile, dstFile, true);
  } catch (IOException e) {
    throw new IOException("Failed to copy " + srcFile + " to " + dstFile, e);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copied " + srcMeta + " to " + dstMeta +
        " and calculated checksum");
    LOG.debug("Copied " + srcFile + " to " + dstFile);
  }
  return new File[] {dstMeta, dstFile};
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:26,代码来源:FsDatasetImpl.java

示例5: createUnlinkTmpFile

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo,
    boolean changeBlockFile, boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestDatanodeRestart.java

示例6: getOrigFile

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
static File getOrigFile(File unlinkTmpFile) {
  final String name = unlinkTmpFile.getName();
  if (!name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX)) {
    throw new IllegalArgumentException("unlinkTmpFile=" + unlinkTmpFile
        + " does not end with " + DatanodeUtil.UNLINK_BLOCK_SUFFIX);
  }
  final int n = name.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length(); 
  return new File(unlinkTmpFile.getParentFile(), name.substring(0, n));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:FsDatasetUtil.java

示例7: copyBlockFiles

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
/**
 * Copy the block and meta files for the given block to the given destination.
 * @return the new meta and block files.
 * @throws IOException
 */
static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta,
    File srcFile, File destRoot, boolean calculateChecksum)
    throws IOException {
  final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
  final File dstFile = new File(destDir, srcFile.getName());
  final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
  return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:FsDatasetImpl.java

示例8: copyReplicaWithNewBlockIdAndGS

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
private File[] copyReplicaWithNewBlockIdAndGS(
    ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
    throws IOException {
  String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
  FsVolumeReference v = volumes.getNextVolume(
      replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes());
  final File tmpDir = ((FsVolumeImpl) v.getVolume())
      .getBlockPoolSlice(bpid).getTmpDir();
  final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
  final File dstBlockFile = new File(destDir, blockFileName);
  final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
  return copyBlockFiles(replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
      dstMetaFile, dstBlockFile, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:FsDatasetImpl.java

示例9: addBlock

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
File addBlock(Block b, File f) throws IOException {
  File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  if (!blockDir.exists()) {
    if (!blockDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + blockDir);
    }
  }
  File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
  File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
  dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
  return blockFile;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:BlockPoolSlice.java

示例10: activateSavedReplica

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
File activateSavedReplica(Block b, File metaFile, File blockFile)
    throws IOException {
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  FileUtils.moveFile(blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  FileUtils.moveFile(metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
  return targetBlockFile;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:BlockPoolSlice.java

示例11: changeGenStampOfBlock

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
    long newGenStamp) throws IOException {
  File blockFile = getBlockFile(dnIndex, blk);
  File metaFile = FsDatasetUtil.findMetaFile(blockFile);
  return metaFile.renameTo(new File(DatanodeUtil.getMetaName(
      blockFile.getAbsolutePath(), newGenStamp)));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:MiniDFSCluster.java

示例12: ensureLazyPersistBlocksAreSaved

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
/**
 * Make sure at least one non-transient volume has a saved copy of the replica.
 * An infinite loop is used to ensure the async lazy persist tasks are completely
 * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
 * either a successful pass or timeout failure.
 */
protected final void ensureLazyPersistBlocksAreSaved(
    LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
  final String bpid = cluster.getNamesystem().getBlockPoolId();
  List<? extends FsVolumeSpi> volumes =
    cluster.getDataNodes().get(0).getFSDataset().getVolumes();
  final Set<Long> persistedBlockIds = new HashSet<Long>();

  while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
    // Take 1 second sleep before each verification iteration
    Thread.sleep(1000);

    for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
      for (FsVolumeSpi v : volumes) {
        if (v.isTransientStorage()) {
          continue;
        }

        FsVolumeImpl volume = (FsVolumeImpl) v;
        File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();

        long blockId = lb.getBlock().getBlockId();
        File targetDir =
          DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
        File blockFile = new File(targetDir, lb.getBlock().getBlockName());
        if (blockFile.exists()) {
          // Found a persisted copy for this block and added to the Set
          persistedBlockIds.add(blockId);
        }
      }
    }
  }

  // We should have found a persisted copy for each located block.
  assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:LazyPersistTestCase.java

示例13: copyBlockFiles

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
/**
 * Copy the block and meta files for the given block to the given destination.
 * @return the new meta and block files.
 * @throws IOException
 */
static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta,
    File srcFile, File destRoot, boolean calculateChecksum,
    int smallBufferSize, final Configuration conf) throws IOException {
  final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
  final File dstFile = new File(destDir, srcFile.getName());
  final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
  return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum,
      smallBufferSize, conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:FsDatasetImpl.java

示例14: copyReplicaWithNewBlockIdAndGS

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
private File[] copyReplicaWithNewBlockIdAndGS(
    ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
    throws IOException {
  String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
  FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
  final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
  final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
  final File dstBlockFile = new File(destDir, blockFileName);
  final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
  return copyBlockFiles(replicaInfo.getMetaFile(),
      replicaInfo.getBlockFile(),
      dstMetaFile, dstBlockFile, true, smallBufferSize, conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:14,代码来源:FsDatasetImpl.java

示例15: addFinalizedBlock

import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入依赖的package包/类
File addFinalizedBlock(Block b, File f) throws IOException {
  File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  if (!blockDir.exists()) {
    if (!blockDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + blockDir);
    }
  }
  File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
  File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
  dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
  return blockFile;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:BlockPoolSlice.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DatanodeUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。