当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeStorageInfo.toDatanodeInfos方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.toDatanodeInfos方法的典型用法代码示例。如果您正苦于以下问题:Java DatanodeStorageInfo.toDatanodeInfos方法的具体用法?Java DatanodeStorageInfo.toDatanodeInfos怎么用?Java DatanodeStorageInfo.toDatanodeInfos使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo的用法示例。


在下文中一共展示了DatanodeStorageInfo.toDatanodeInfos方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: BlockCommand

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageTypes = new StorageType[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BlockCommand.java

示例2: BlockCommand

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
/**
 * Create BlockCommand for transferring blocks to another datanode
 * @param blocktargetlist    blocks to be transferred 
 */
public BlockCommand(int action, String poolId,
    List<BlockTargetPair> blocktargetlist) {
  super(action);
  this.poolId = poolId;
  blocks = new Block[blocktargetlist.size()]; 
  targets = new DatanodeInfo[blocks.length][];
  targetStorageIDs = new String[blocks.length][];

  for(int i = 0; i < blocks.length; i++) {
    BlockTargetPair p = blocktargetlist.get(i);
    blocks[i] = p.block;
    targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
    targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:20,代码来源:BlockCommand.java

示例3: LocatedBlock

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages,
    long startOffset, boolean corrupt) {
  this(b, DatanodeStorageInfo.toDatanodeInfos(storages),
      DatanodeStorageInfo.toStorageIDs(storages),
      DatanodeStorageInfo.toStorageTypes(storages),
      startOffset, corrupt, EMPTY_LOCS); // startOffset is unknown
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:LocatedBlock.java

示例4: BlockECRecoveryInfo

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
    DatanodeStorageInfo[] targetDnStorageInfo, byte[] liveBlockIndices,
    ErasureCodingPolicy ecPolicy) {
  this(block, sources, DatanodeStorageInfo
      .toDatanodeInfos(targetDnStorageInfo), DatanodeStorageInfo
      .toStorageIDs(targetDnStorageInfo), DatanodeStorageInfo
      .toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecPolicy);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:9,代码来源:BlockECRecoveryCommand.java

示例5: testGetLocatedStripedBlocks

import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; //导入方法依赖的package包/类
@Test
public void testGetLocatedStripedBlocks() throws Exception {
  final Path file = new Path("/file1");
  // create an empty file
  FSDataOutputStream out = null;
  try {
    out = dfs.create(file, (short) 1);
    writeAndFlushStripedOutputStream(
        (DFSStripedOutputStream) out.getWrappedStream(),
        DFS_BYTES_PER_CHECKSUM_DEFAULT);

    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    BlockInfoStriped lastBlk = (BlockInfoStriped) fileNode.getLastBlock();
    DatanodeInfo[] expectedDNs = DatanodeStorageInfo.toDatanodeInfos(
        lastBlk.getUnderConstructionFeature().getExpectedStorageLocations());
    byte[] indices = lastBlk.getUnderConstructionFeature().getBlockIndices();

    LocatedBlocks blks = dfs.getClient().getLocatedBlocks(file.toString(), 0L);
    Assert.assertEquals(1, blks.locatedBlockCount());
    LocatedBlock lblk = blks.get(0);

    Assert.assertTrue(lblk instanceof LocatedStripedBlock);
    DatanodeInfo[] datanodes = lblk.getLocations();
    byte[] blockIndices = ((LocatedStripedBlock) lblk).getBlockIndices();
    Assert.assertEquals(GROUP_SIZE, datanodes.length);
    Assert.assertEquals(GROUP_SIZE, blockIndices.length);
    Assert.assertArrayEquals(indices, blockIndices);
    Assert.assertArrayEquals(expectedDNs, datanodes);
  } finally {
    IOUtils.cleanup(null, out);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:TestAddStripedBlocks.java


注:本文中的org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.toDatanodeInfos方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。