当前位置: 首页>>代码示例>>Java>>正文


Java BlockLocalPathInfo.getMetaPath方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo.getMetaPath方法的典型用法代码示例。如果您正苦于以下问题:Java BlockLocalPathInfo.getMetaPath方法的具体用法?Java BlockLocalPathInfo.getMetaPath怎么用?Java BlockLocalPathInfo.getMetaPath使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo的用法示例。


在下文中一共展示了BlockLocalPathInfo.getMetaPath方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testBlockRecoveryWithLessMetafile

import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; //导入方法依赖的package包/类
/**
 * Block Recovery when the meta file not having crcs for all chunks in block
 * file
 */
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
      UserGroupInformation.getCurrentUser().getShortUserName());
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Path file = new Path("/testRecoveryFile");
  DistributedFileSystem dfs = cluster.getFileSystem();
  FSDataOutputStream out = dfs.create(file);
  int count = 0;
  while (count < 2 * 1024 * 1024) {
    out.writeBytes("Data");
    count += 4;
  }
  out.hsync();
  // abort the original stream
  ((DFSOutputStream) out.getWrappedStream()).abort();

  LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
      file.toString(), 0, count);
  ExtendedBlock block = locations.get(0).getBlock();
  DataNode dn = cluster.getDataNodes().get(0);
  BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
  File metafile = new File(localPathInfo.getMetaPath());
  assertTrue(metafile.exists());

  // reduce the block meta file size
  RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
  raf.setLength(metafile.length() - 20);
  raf.close();

  // restart DN to make replica to RWR
  DataNodeProperties dnProp = cluster.stopDataNode(0);
  cluster.restartDataNode(dnProp, true);

  // try to recover the lease
  DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem
      .newInstance(cluster.getConfiguration(0));
  count = 0;
  while (++count < 10 && !newdfs.recoverLease(file)) {
    Thread.sleep(1000);
  }
  assertTrue("File should be closed", newdfs.recoverLease(file));

}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestLeaseRecovery.java

示例2: newBlockReader

import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; //导入方法依赖的package包/类
/**
 * The only way this object can be instantiated.
 */
static BlockReaderLocalLegacy newBlockReader(DfsClientConf conf,
    UserGroupInformation userGroupInformation,
    Configuration configuration, String file, ExtendedBlock blk,
    Token<BlockTokenIdentifier> token, DatanodeInfo node,
    long startOffset, long length, StorageType storageType,
    Tracer tracer) throws IOException {
  final ShortCircuitConf scConf = conf.getShortCircuitConf();
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
      .getIpcPort());
  // check the cache first
  BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
  if (pathinfo == null) {
    if (userGroupInformation == null) {
      userGroupInformation = UserGroupInformation.getCurrentUser();
    }
    pathinfo = getBlockPathInfo(userGroupInformation, blk, node,
        configuration, conf.getSocketTimeout(), token,
        conf.isConnectToDnViaHostname(), storageType);
  }

  // check to see if the file exists. It may so happen that the
  // HDFS file has been deleted and this block-lookup is occurring
  // on behalf of a new HDFS file. This time, the block file could
  // be residing in a different portion of the fs.data.dir directory.
  // In this case, we remove this entry from the cache. The next
  // call to this method will re-populate the cache.
  FileInputStream dataIn = null;
  FileInputStream checksumIn = null;
  BlockReaderLocalLegacy localBlockReader = null;
  final boolean skipChecksumCheck = scConf.isSkipShortCircuitChecksums()
      || storageType.isTransient();
  try {
    // get a local file system
    File blkfile = new File(pathinfo.getBlockPath());
    dataIn = new FileInputStream(blkfile);

    LOG.debug("New BlockReaderLocalLegacy for file {} of size {} startOffset "
            + "{} length {} short circuit checksum {}",
        blkfile, blkfile.length(), startOffset, length, !skipChecksumCheck);

    if (!skipChecksumCheck) {
      // get the metadata file
      File metafile = new File(pathinfo.getMetaPath());
      checksumIn = new FileInputStream(metafile);

      final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
          new DataInputStream(checksumIn), blk);
      long firstChunkOffset = startOffset
          - (startOffset % checksum.getBytesPerChecksum());
      localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk,
          startOffset, checksum, true, dataIn, firstChunkOffset, checksumIn,
          tracer);
    } else {
      localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk,
          startOffset, dataIn, tracer);
    }
  } catch (IOException e) {
    // remove from cache
    localDatanodeInfo.removeBlockLocalPathInfo(blk);
    LOG.warn("BlockReaderLocalLegacy: Removing " + blk
        + " from cache because local file " + pathinfo.getBlockPath()
        + " could not be opened.");
    throw e;
  } finally {
    if (localBlockReader == null) {
      if (dataIn != null) {
        dataIn.close();
      }
      if (checksumIn != null) {
        checksumIn.close();
      }
    }
  }
  return localBlockReader;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:79,代码来源:BlockReaderLocalLegacy.java

示例3: newBlockReader

import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; //导入方法依赖的package包/类
/**
 * The only way this object can be instantiated.
 */
static BlockReaderLocalLegacy newBlockReader(DFSClient.Conf conf,
    UserGroupInformation userGroupInformation,
    Configuration configuration, String file, ExtendedBlock blk,
    Token<BlockTokenIdentifier> token, DatanodeInfo node, 
    long startOffset, long length, StorageType storageType)
    throws IOException {
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
      .getIpcPort());
  // check the cache first
  BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
  if (pathinfo == null) {
    if (userGroupInformation == null) {
      userGroupInformation = UserGroupInformation.getCurrentUser();
    }
    pathinfo = getBlockPathInfo(userGroupInformation, blk, node,
        configuration, conf.socketTimeout, token,
        conf.connectToDnViaHostname, storageType);
  }

  // check to see if the file exists. It may so happen that the
  // HDFS file has been deleted and this block-lookup is occurring
  // on behalf of a new HDFS file. This time, the block file could
  // be residing in a different portion of the fs.data.dir directory.
  // In this case, we remove this entry from the cache. The next
  // call to this method will re-populate the cache.
  FileInputStream dataIn = null;
  FileInputStream checksumIn = null;
  BlockReaderLocalLegacy localBlockReader = null;
  boolean skipChecksumCheck = conf.skipShortCircuitChecksums ||
      storageType.isTransient();
  try {
    // get a local file system
    File blkfile = new File(pathinfo.getBlockPath());
    dataIn = new FileInputStream(blkfile);

    if (LOG.isDebugEnabled()) {
      LOG.debug("New BlockReaderLocalLegacy for file " + blkfile + " of size "
          + blkfile.length() + " startOffset " + startOffset + " length "
          + length + " short circuit checksum " + !skipChecksumCheck);
    }

    if (!skipChecksumCheck) {
      // get the metadata file
      File metafile = new File(pathinfo.getMetaPath());
      checksumIn = new FileInputStream(metafile);

      final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
          new DataInputStream(checksumIn), blk);
      long firstChunkOffset = startOffset
          - (startOffset % checksum.getBytesPerChecksum());
      localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token,
          startOffset, length, pathinfo, checksum, true, dataIn,
          firstChunkOffset, checksumIn);
    } else {
      localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token,
          startOffset, length, pathinfo, dataIn);
    }
  } catch (IOException e) {
    // remove from cache
    localDatanodeInfo.removeBlockLocalPathInfo(blk);
    DFSClient.LOG.warn("BlockReaderLocalLegacy: Removing " + blk
        + " from cache because local file " + pathinfo.getBlockPath()
        + " could not be opened.");
    throw e;
  } finally {
    if (localBlockReader == null) {
      if (dataIn != null) {
        dataIn.close();
      }
      if (checksumIn != null) {
        checksumIn.close();
      }
    }
  }
  return localBlockReader;
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:80,代码来源:BlockReaderLocalLegacy.java

示例4: newBlockReader

import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; //导入方法依赖的package包/类
/**
 * The only way this object can be instantiated.
 */
static BlockReaderLocal newBlockReader(Configuration conf,
  String file, Block blk, Token<BlockTokenIdentifier> token, DatanodeInfo node, 
  int socketTimeout, long startOffset, long length) throws IOException {
  
  LocalDatanodeInfo localDatanodeInfo =  getLocalDatanodeInfo(node.getIpcPort());
  // check the cache first
  BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
  if (pathinfo == null) {
    pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token);
  }

  // check to see if the file exists. It may so happen that the
  // HDFS file has been deleted and this block-lookup is occurring
  // on behalf of a new HDFS file. This time, the block file could
  // be residing in a different portion of the fs.data.dir directory.
  // In this case, we remove this entry from the cache. The next
  // call to this method will re-populate the cache.
  FileInputStream dataIn = null;
  FileInputStream checksumIn = null;
  BlockReaderLocal localBlockReader = null;
  boolean skipChecksum = shortCircuitChecksum(conf);
  try {
    // get a local file system
    File blkfile = new File(pathinfo.getBlockPath());
    dataIn = new FileInputStream(blkfile);

    if (LOG.isDebugEnabled()) {
      LOG.debug("New BlockReaderLocal for file " + blkfile + " of size "
          + blkfile.length() + " startOffset " + startOffset + " length "
          + length + " short circuit checksum " + skipChecksum);
    }

    if (!skipChecksum) {
      // get the metadata file
      File metafile = new File(pathinfo.getMetaPath());
      checksumIn = new FileInputStream(metafile);

      // read and handle the common header here. For now just a version
      BlockMetadataHeader header = BlockMetadataHeader
          .readHeader(new DataInputStream(checksumIn));
      short version = header.getVersion();
      if (version != FSDataset.METADATA_VERSION) {
        LOG.warn("Wrong version (" + version + ") for metadata file for "
            + blk + " ignoring ...");
      }
      DataChecksum checksum = header.getChecksum();
      localBlockReader = new BlockReaderLocal(conf, file, blk, token, startOffset, length,
          pathinfo, checksum, true, dataIn, checksumIn);
    } else {
      localBlockReader = new BlockReaderLocal(conf, file, blk, token, startOffset, length,
          pathinfo, dataIn);
    }
  } catch (IOException e) {
    // remove from cache
    localDatanodeInfo.removeBlockLocalPathInfo(blk);
    DFSClient.LOG.warn("BlockReaderLocal: Removing " + blk +
        " from cache because local file " + pathinfo.getBlockPath() +
        " could not be opened.");
    throw e;
  } finally {
    if (localBlockReader == null) {
      if (dataIn != null) {
        dataIn.close();
      }
      if (checksumIn != null) {
        checksumIn.close();
      }
    }  
  }
  return localBlockReader;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:75,代码来源:BlockReaderLocal.java

示例5: testBlockRecoveryWithLessMetafile

import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; //导入方法依赖的package包/类
/**
 * Block Recovery when the meta file not having crcs for all chunks in block
 * file
 */
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
      UserGroupInformation.getCurrentUser().getShortUserName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  Path file = new Path("/testRecoveryFile");
  DistributedFileSystem dfs = cluster.getFileSystem();
  FSDataOutputStream out = dfs.create(file);
  int count = 0;
  while (count < 2 * 1024 * 1024) {
    out.writeBytes("Data");
    count += 4;
  }
  out.hsync();
  // abort the original stream
  ((DFSOutputStream) out.getWrappedStream()).abort();

  LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
      file.toString(), 0, count);
  ExtendedBlock block = locations.get(0).getBlock();
  DataNode dn = cluster.getDataNodes().get(0);
  BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
  File metafile = new File(localPathInfo.getMetaPath());
  assertTrue(metafile.exists());

  // reduce the block meta file size
  RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
  raf.setLength(metafile.length() - 20);
  raf.close();

  // restart DN to make replica to RWR
  DataNodeProperties dnProp = cluster.stopDataNode(0);
  cluster.restartDataNode(dnProp, true);

  // try to recover the lease
  DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem
      .newInstance(cluster.getConfiguration(0));
  count = 0;
  while (++count < 10 && !newdfs.recoverLease(file)) {
    Thread.sleep(1000);
  }
  assertTrue("File should be closed", newdfs.recoverLease(file));

}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:51,代码来源:TestLeaseRecovery.java


注:本文中的org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo.getMetaPath方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。