本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.createDatanodeStorageInfo方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.createDatanodeStorageInfo方法的具体用法?Java DFSTestUtil.createDatanodeStorageInfo怎么用?Java DFSTestUtil.createDatanodeStorageInfo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSTestUtil
的用法示例。
在下文中一共展示了DFSTestUtil.createDatanodeStorageInfo方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testReplaceStorage
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testReplaceStorage() throws Exception {
// Create two dummy storages.
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
final int NUM_BLOCKS = 10;
BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];
// Create a few dummy blocks and add them to the first storage.
for (int i = 0; i < NUM_BLOCKS; ++i) {
blockInfos[i] = new BlockInfoContiguous((short) 3);
storage1.addBlock(blockInfos[i]);
}
// Try to move one of the blocks to a different storage.
boolean added =
storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
Assert.assertThat(added, is(false));
Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
示例2: testAddStorage
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testAddStorage() throws Exception {
BlockInfoContiguous blockInfo = new BlockInfoContiguous((short) 3);
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
boolean added = blockInfo.addStorage(storage);
Assert.assertTrue(added);
Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
}
示例3: testChooseTarget6
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* In this testcase, there are enough total number of nodes, but only
* one rack is actually available.
* @throws Exception
*/
@Test
public void testChooseTarget6() throws Exception {
DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo(
"DS-xxxx", "7.7.7.7", "/d2/r3", "host7");
DatanodeDescriptor newDn = storage.getDatanodeDescriptor();
Set<Node> excludedNodes;
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
excludedNodes = new HashSet<Node>();
excludedNodes.add(dataNodes[0]);
excludedNodes.add(dataNodes[1]);
excludedNodes.add(dataNodes[2]);
excludedNodes.add(dataNodes[3]);
DatanodeStorageInfo[] targets;
// Only two nodes available in a rack. Try picking two nodes. Only one
// should return.
targets = chooseTarget(2, chosenNodes, excludedNodes);
assertEquals(1, targets.length);
// Make three nodes available in a rack.
final BlockManager bm = namenode.getNamesystem().getBlockManager();
bm.getDatanodeManager().getNetworkTopology().add(newDn);
bm.getDatanodeManager().getHeartbeatManager().addDatanode(newDn);
updateHeartbeatWithUsage(newDn,
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
// Try picking three nodes. Only two should return.
excludedNodes.clear();
excludedNodes.add(dataNodes[0]);
excludedNodes.add(dataNodes[1]);
excludedNodes.add(dataNodes[2]);
excludedNodes.add(dataNodes[3]);
chosenNodes.clear();
try {
targets = chooseTarget(3, chosenNodes, excludedNodes);
assertEquals(2, targets.length);
} finally {
bm.getDatanodeManager().getNetworkTopology().remove(newDn);
}
resetHeartbeatForStorages();
}
示例4: testInitializeBlockRecovery
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testInitializeBlockRecovery() throws Exception {
DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();
dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
(short) 3,
BlockUCState.UNDER_CONSTRUCTION,
new DatanodeStorageInfo[] {s1, s2, s3});
// Recovery attempt #1.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
blockInfo.initializeBlockRecovery(1);
BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
// Recovery attempt #2.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
blockInfo.initializeBlockRecovery(2);
blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
// Recovery attempt #3.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
blockInfo.initializeBlockRecovery(3);
blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
// Recovery attempt #4.
// Reset everything. And again pick DN with most recent heart beat.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
blockInfo.initializeBlockRecovery(3);
blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
}
示例5: testBlockListMoveToHead
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testBlockListMoveToHead() throws Exception {
LOG.info("BlockInfo moveToHead tests...");
final int MAX_BLOCKS = 10;
DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
ArrayList<BlockInfoContiguous> blockInfoList = new ArrayList<BlockInfoContiguous>();
int headIndex;
int curIndex;
LOG.info("Building block list...");
for (int i = 0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3));
dd.addBlock(blockInfoList.get(i));
// index of the datanode should be 0
assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
.findStorageInfo(dd));
}
// list length should be equal to the number of blocks we inserted
LOG.info("Checking list length...");
assertEquals("Length should be MAX_BLOCK", MAX_BLOCKS, dd.numBlocks());
Iterator<BlockInfoContiguous> it = dd.getBlockIterator();
int len = 0;
while (it.hasNext()) {
it.next();
len++;
}
assertEquals("There should be MAX_BLOCK blockInfo's", MAX_BLOCKS, len);
headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);
LOG.info("Moving each block to the head of the list...");
for (int i = 0; i < MAX_BLOCKS; i++) {
curIndex = blockInfoList.get(i).findStorageInfo(dd);
headIndex = dd.moveBlockToHead(blockInfoList.get(i), curIndex, headIndex);
// the moved element must be at the head of the list
assertEquals("Block should be at the head of the list now.",
blockInfoList.get(i), dd.getBlockListHeadForTesting());
}
// move head of the list to the head - this should not change the list
LOG.info("Moving head to the head...");
BlockInfoContiguous temp = dd.getBlockListHeadForTesting();
curIndex = 0;
headIndex = 0;
dd.moveBlockToHead(temp, curIndex, headIndex);
assertEquals(
"Moving head to the head of the list shopuld not change the list",
temp, dd.getBlockListHeadForTesting());
// check all elements of the list against the original blockInfoList
LOG.info("Checking elements of the list...");
temp = dd.getBlockListHeadForTesting();
assertNotNull("Head should not be null", temp);
int c = MAX_BLOCKS - 1;
while (temp != null) {
assertEquals("Expected element is not on the list",
blockInfoList.get(c--), temp);
temp = temp.getNext(0);
}
LOG.info("Moving random blocks to the head of the list...");
headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);
Random rand = new Random();
for (int i = 0; i < MAX_BLOCKS; i++) {
int j = rand.nextInt(MAX_BLOCKS);
curIndex = blockInfoList.get(j).findStorageInfo(dd);
headIndex = dd.moveBlockToHead(blockInfoList.get(j), curIndex, headIndex);
// the moved element must be at the head of the list
assertEquals("Block should be at the head of the list now.",
blockInfoList.get(j), dd.getBlockListHeadForTesting());
}
}