本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.getLocalDatanodeDescriptor方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.getLocalDatanodeDescriptor方法的具体用法?Java DFSTestUtil.getLocalDatanodeDescriptor怎么用?Java DFSTestUtil.getLocalDatanodeDescriptor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSTestUtil
的用法示例。
在下文中一共展示了DFSTestUtil.getLocalDatanodeDescriptor方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetInvalidateBlocks
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Test that getInvalidateBlocks observes the maxlimit.
*/
@Test
public void testGetInvalidateBlocks() throws Exception {
final int MAX_BLOCKS = 10;
final int REMAINING_BLOCKS = 2;
final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
for (int i=0; i<MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length, MAX_LIMIT);
bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length, REMAINING_BLOCKS);
}
示例2: testQueues
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testQueues() {
DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeStorage storage = new DatanodeStorage("STORAGE_ID");
DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage);
msgs.enqueueReportedBlock(storageInfo, block1Gs1, ReplicaState.FINALIZED);
msgs.enqueueReportedBlock(storageInfo, block1Gs2, ReplicaState.FINALIZED);
assertEquals(2, msgs.count());
// Nothing queued yet for block 2
assertNull(msgs.takeBlockQueue(block2Gs1));
assertEquals(2, msgs.count());
Queue<ReportedBlockInfo> q =
msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals(
"ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
"ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
Joiner.on(",").join(q));
assertEquals(0, msgs.count());
// Should be null if we pull again
assertNull(msgs.takeBlockQueue(block1Gs1));
assertEquals(0, msgs.count());
}
示例3: testUseDelHint
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testUseDelHint() {
DatanodeStorageInfo delHint = new DatanodeStorageInfo(
DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id"));
List<DatanodeStorageInfo> moreThan1Racks = Arrays.asList(delHint);
List<StorageType> excessTypes = new ArrayList<StorageType>();
excessTypes.add(StorageType.DEFAULT);
Assert.assertTrue(BlockManager.useDelHint(true, delHint, null,
moreThan1Racks, excessTypes));
excessTypes.remove(0);
excessTypes.add(StorageType.SSD);
Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
moreThan1Racks, excessTypes));
}
示例4: testCorruptReplicaInfo
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testCorruptReplicaInfo() throws IOException,
InterruptedException {
CorruptReplicasMap crm = new CorruptReplicasMap();
// Make sure initial values are returned correctly
assertEquals("Number of corrupt blocks must initially be 0", 0, crm.size());
assertNull("Param n cannot be less than 0", crm.getCorruptReplicaBlockIds(-1, null));
assertNull("Param n cannot be greater than 100", crm.getCorruptReplicaBlockIds(101, null));
long[] l = crm.getCorruptReplicaBlockIds(0, null);
assertNotNull("n = 0 must return non-null", l);
assertEquals("n = 0 must return an empty list", 0, l.length);
// create a list of block_ids. A list is used to allow easy validation of the
// output of getCorruptReplicaBlockIds
int NUM_BLOCK_IDS = 140;
List<Long> block_ids = new LinkedList<Long>();
for (int i=0;i<NUM_BLOCK_IDS;i++) {
block_ids.add((long)i);
}
DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor();
addToCorruptReplicasMap(crm, getBlock(0), dn1);
assertEquals("Number of corrupt blocks not returning correctly",
1, crm.size());
addToCorruptReplicasMap(crm, getBlock(1), dn1);
assertEquals("Number of corrupt blocks not returning correctly",
2, crm.size());
addToCorruptReplicasMap(crm, getBlock(1), dn2);
assertEquals("Number of corrupt blocks not returning correctly",
2, crm.size());
crm.removeFromCorruptReplicasMap(getBlock(1));
assertEquals("Number of corrupt blocks not returning correctly",
1, crm.size());
crm.removeFromCorruptReplicasMap(getBlock(0));
assertEquals("Number of corrupt blocks not returning correctly",
0, crm.size());
for (Long block_id: block_ids) {
addToCorruptReplicasMap(crm, getBlock(block_id), dn1);
}
assertEquals("Number of corrupt blocks not returning correctly",
NUM_BLOCK_IDS, crm.size());
assertTrue("First five block ids not returned correctly ",
Arrays.equals(new long[]{0,1,2,3,4},
crm.getCorruptReplicaBlockIds(5, null)));
LOG.info(crm.getCorruptReplicaBlockIds(10, 7L));
LOG.info(block_ids.subList(7, 18));
assertTrue("10 blocks after 7 not returned correctly ",
Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},
crm.getCorruptReplicaBlockIds(10, 7L)));
}