本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup.startDataNodes方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSClusterWithNodeGroup.startDataNodes方法的具体用法?Java MiniDFSClusterWithNodeGroup.startDataNodes怎么用?Java MiniDFSClusterWithNodeGroup.startDataNodes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup
的用法示例。
在下文中一共展示了MiniDFSClusterWithNodeGroup.startDataNodes方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testBalancerWithNodeGroup
import org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup; //导入方法依赖的package包/类
/**
* Create a cluster with even distribution, and a new empty node is added to
* the cluster, then test node-group locality for balancer policy.
*/
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
Configuration conf = createConf();
long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};
int numOfDatanodes = capacities.length;
assertEquals(numOfDatanodes, racks.length);
assertEquals(numOfDatanodes, nodeGroups.length);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
.numDataNodes(capacities.length)
.racks(racks)
.simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster = new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client = NameNodeProxies.createProxy(conf,
cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
long totalCapacity = TestBalancer.sum(capacities);
// fill up the cluster to be 20% full
long totalUsedSpace = totalCapacity * 2 / 10;
TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
(short) (numOfDatanodes/2), 0);
long newCapacity = CAPACITY;
String newRack = RACK1;
String newNodeGroup = NODEGROUP2;
// start up an empty node with the same capacity and on NODEGROUP2
cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
new long[] {newCapacity}, new String[]{newNodeGroup});
totalCapacity += newCapacity;
// run balancer and validate results
runBalancer(conf, totalUsedSpace, totalCapacity);
} finally {
cluster.shutdown();
}
}
示例2: testBalancerWithRackLocality
import org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup; //导入方法依赖的package包/类
/**
* Create a cluster with even distribution, and a new empty node is added to
* the cluster, then test rack locality for balancer policy.
*/
@Test(timeout=60000)
public void testBalancerWithRackLocality() throws Exception {
Configuration conf = createConf();
long[] capacities = new long[]{CAPACITY, CAPACITY};
String[] racks = new String[]{RACK0, RACK1};
String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP1};
int numOfDatanodes = capacities.length;
assertEquals(numOfDatanodes, racks.length);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
.numDataNodes(capacities.length)
.racks(racks)
.simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster = new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client = NameNodeProxies.createProxy(conf,
cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
long totalCapacity = TestBalancer.sum(capacities);
// fill up the cluster to be 30% full
long totalUsedSpace = totalCapacity * 3 / 10;
TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
(short) numOfDatanodes, 0);
long newCapacity = CAPACITY;
String newRack = RACK1;
String newNodeGroup = NODEGROUP2;
// start up an empty node with the same capacity and on the same rack
cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
new long[] {newCapacity}, new String[]{newNodeGroup});
totalCapacity += newCapacity;
// run balancer and validate results
runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
DatanodeInfo[] datanodeReport =
client.getDatanodeReport(DatanodeReportType.ALL);
Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>();
for (DatanodeInfo datanode: datanodeReport) {
String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation());
int usedCapacity = (int) datanode.getDfsUsed();
if (rackToUsedCapacity.get(rack) != null) {
rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack));
} else {
rackToUsedCapacity.put(rack, usedCapacity);
}
}
assertEquals(rackToUsedCapacity.size(), 2);
assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1));
} finally {
cluster.shutdown();
}
}