本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.getLocalDatanodeInfo方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.getLocalDatanodeInfo方法的具体用法?Java DFSTestUtil.getLocalDatanodeInfo怎么用?Java DFSTestUtil.getLocalDatanodeInfo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSTestUtil
的用法示例。
在下文中一共展示了DFSTestUtil.getLocalDatanodeInfo方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testConvertBlockRecoveryCommand
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testConvertBlockRecoveryCommand() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
List<RecoveringBlock> blks = ImmutableList.of(
new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
);
BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
cmd2.getRecoveringBlocks());
assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
assertEquals(cmd.toString(), cmd2.toString());
}
示例2: createLocatedBlock
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
AdminStates.NORMAL),
};
String[] storageIDs = {"s1", "s2", "s3", "s4"};
StorageType[] media = {
StorageType.DISK,
StorageType.SSD,
StorageType.DISK,
StorageType.RAM_DISK
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53),
dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
示例3: createLocatedBlockNoStorageMedia
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
示例4: initRecoveringBlocks
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] locs = new DatanodeInfo[] {
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
mockOtherDN };
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
blocks.add(rBlock);
return blocks;
}
示例5: testConvertRecoveringBlock
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testConvertRecoveringBlock() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
assertEquals(b.getBlock(), b1.getBlock());
DatanodeInfo[] dnInfo1 = b1.getLocations();
assertEquals(dnInfo.length, dnInfo1.length);
for (int i=0; i < dnInfo.length; i++) {
compare(dnInfo[0], dnInfo1[0]);
}
}
示例6: testConvertBlockCommand
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testConvertBlockCommand() {
Block[] blocks = new Block[] { new Block(21), new Block(22) };
DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
new DatanodeInfo[2] };
dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
StorageType[][] storageTypes = {{StorageType.DEFAULT},
{StorageType.DEFAULT, StorageType.DEFAULT}};
BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
blocks, dnInfos, storageTypes, storageIDs);
BlockCommandProto bcProto = PBHelper.convert(bc);
BlockCommand bc2 = PBHelper.convert(bcProto);
assertEquals(bc.getAction(), bc2.getAction());
assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
Block[] blocks2 = bc2.getBlocks();
for (int i = 0; i < blocks.length; i++) {
assertEquals(blocks[i], blocks2[i]);
}
DatanodeInfo[][] dnInfos2 = bc2.getTargets();
assertEquals(dnInfos.length, dnInfos2.length);
for (int i = 0; i < dnInfos.length; i++) {
DatanodeInfo[] d1 = dnInfos[i];
DatanodeInfo[] d2 = dnInfos2[i];
assertEquals(d1.length, d2.length);
for (int j = 0; j < d1.length; j++) {
compare(d1[j], d2[j]);
}
}
}