本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.toDatanodeDescriptor方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.toDatanodeDescriptor方法的具体用法?Java DFSTestUtil.toDatanodeDescriptor怎么用?Java DFSTestUtil.toDatanodeDescriptor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSTestUtil
的用法示例。
在下文中一共展示了DFSTestUtil.toDatanodeDescriptor方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupCluster
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
final String[] racks = {
"/rack1",
"/rack1",
"/rack1",
"/rack2",
"/rack2",
"/rack2"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
int blockSize = 1024;
dnrList = new ArrayList<DatanodeRegistration>();
dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();
// Register DNs
for (int i=0; i < 6; i++) {
DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
VersionInfo.getVersion());
dnrList.add(dnr);
dnManager.registerDatanode(dnr);
dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
dataNodes[i].updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
0L, 0L, 0, 0, null);
}
}
示例2: setupCluster
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() throws Exception {
Configuration conf = new HdfsConfiguration();
final String[] racks = {
"/d1/r1",
"/d1/r1",
"/d1/r2",
"/d1/r2",
"/d2/r3",
"/d2/r3"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
// create an extra storage for dn5.
DatanodeStorage extraStorage = new DatanodeStorage(
storages[5].getStorageID() + "-extra", DatanodeStorage.State.NORMAL,
StorageType.DEFAULT);
/* DatanodeStorageInfo si = new DatanodeStorageInfo(
storages[5].getDatanodeDescriptor(), extraStorage);
*/
BlockManagerTestUtil.updateStorage(storages[5].getDatanodeDescriptor(),
extraStorage);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]);
bm.getDatanodeManager().getHeartbeatManager().addDatanode(
dataNodes[i]);
}
resetHeartbeatForStorages();
}