本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getDataNodes方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getDataNodes方法的具体用法?Java MiniDFSCluster.getDataNodes怎么用?Java MiniDFSCluster.getDataNodes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.MiniDFSCluster
的用法示例。
在下文中一共展示了MiniDFSCluster.getDataNodes方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkPendingCachedEmpty
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
* Check that the NameNode is not attempting to cache anything.
*/
private void checkPendingCachedEmpty(MiniDFSCluster cluster)
throws Exception {
cluster.getNamesystem().readLock();
try {
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
for (DataNode dn : cluster.getDataNodes()) {
DatanodeDescriptor descriptor =
datanodeManager.getDatanode(dn.getDatanodeId());
Assert.assertTrue("Pending cached list of " + descriptor +
" is not empty, "
+ Arrays.toString(descriptor.getPendingCached().toArray()),
descriptor.getPendingCached().isEmpty());
}
} finally {
cluster.getNamesystem().readUnlock();
}
}
示例2: testDataNodeMetrics
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testDataNodeMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
final long LONG_FILE_LEN = Integer.MAX_VALUE+1L;
DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
LONG_FILE_LEN, (short)1, 1L);
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
assertCounter("BytesWritten", LONG_FILE_LEN, rb);
assertTrue("Expected non-zero number of incremental block reports",
getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例3: lowerKeyUpdateIntervalAndClearKeys
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private static void lowerKeyUpdateIntervalAndClearKeys(MiniDFSCluster cluster) {
lowerKeyUpdateIntervalAndClearKeys(cluster.getNamesystem(0));
lowerKeyUpdateIntervalAndClearKeys(cluster.getNamesystem(1));
for (DataNode dn : cluster.getDataNodes()) {
dn.clearAllBlockSecretKeys();
}
}
示例4: getTrueReplication
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private int getTrueReplication(MiniDFSCluster cluster, ExtendedBlock block)
throws IOException {
int count = 0;
for (DataNode dn : cluster.getDataNodes()) {
if (DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
block.getBlockPoolId(), block.getBlockId()) != null) {
count++;
}
}
return count;
}
示例5: testSendDataPacketMetrics
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testSendDataPacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
final int interval = 1;
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
// Create and read a 1 byte file
Path tmpfile = new Path("/tmp.txt");
DFSTestUtil.createFile(fs, tmpfile,
(long)1, (short)1, 1L);
DFSTestUtil.readFile(fs, tmpfile);
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
// Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
// signaling the end of the block
assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
// Wait for at least 1 rollover
Thread.sleep((interval + 1) * 1000);
// Check that the sendPacket percentiles rolled to non-zero values
String sec = interval + "s";
assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例6: testReceivePacketMetrics
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testReceivePacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
final int interval = 1;
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path testFile = new Path("/testFlushNanosMetric.txt");
FSDataOutputStream fout = fs.create(testFile);
fout.write(new byte[1]);
fout.hsync();
fout.close();
List<DataNode> datanodes = cluster.getDataNodes();
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
// Expect two flushes, 1 for the flush that occurs after writing,
// 1 that occurs on closing the data and metadata files.
assertCounter("FlushNanosNumOps", 2L, dnMetrics);
// Expect two syncs, one from the hsync, one on close.
assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
// Wait for at least 1 rollover
Thread.sleep((interval + 1) * 1000);
// Check the receivePacket percentiles that should be non-zero
String sec = interval + "s";
assertQuantileGauges("FlushNanos" + sec, dnMetrics);
assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例7: testDataNodeTimeSpend
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
* This function ensures that writing causes TotalWritetime to increment
* and reading causes totalReadTime to move.
* @throws Exception
*/
@Test
public void testDataNodeTimeSpend() throws Exception {
Configuration conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
final long LONG_FILE_LEN = 1024 * 1024 * 10;
long startWriteValue = getLongCounter("TotalWriteTime", rb);
long startReadValue = getLongCounter("TotalReadTime", rb);
for (int x =0; x < 50; x++) {
DFSTestUtil.createFile(fs, new Path("/time.txt."+ x),
LONG_FILE_LEN, (short) 1, Time.monotonicNow());
}
for (int x =0; x < 50; x++) {
String s = DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
}
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
long endReadValue = getLongCounter("TotalReadTime", rbNew);
assertTrue(endReadValue > startReadValue);
assertTrue(endWriteValue > startWriteValue);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例8: testLastContactTime
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@SuppressWarnings({ "unchecked" })
@Test
public void testLastContactTime() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
// Define include file to generate deadNodes metrics
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir,
"build/test/data/temp/TestNameNodeMXBean");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
for(DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
}
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes()
!= 2 ) {
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
}
// get attribute deadnodeinfo
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
Map<String, Map<String, Object>> deadNodes =
(Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
assertTrue(deadNodes.size() > 0);
for (Map<String, Object> deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例9: testReplDueToNodeFailRespectsRackPolicy
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testReplDueToNodeFailRespectsRackPolicy() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 3;
final Path filePath = new Path("/testFile");
// Last datanode is on a different rack
String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
try {
// Create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Make the last datanode look like it failed to heartbeat by
// calling removeDatanode and stopping it.
ArrayList<DataNode> datanodes = cluster.getDataNodes();
int idx = datanodes.size() - 1;
DataNode dataNode = datanodes.get(idx);
DatanodeID dnId = dataNode.getDatanodeId();
cluster.stopDataNode(idx);
dm.removeDatanode(dnId);
// The block should still have sufficient # replicas, across racks.
// The last node may not have contained a replica, but if it did
// it should have been replicated within the same rack.
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Fail the last datanode again, it's also on rack2 so there is
// only 1 rack for all the replicas
datanodes = cluster.getDataNodes();
idx = datanodes.size() - 1;
dataNode = datanodes.get(idx);
dnId = dataNode.getDatanodeId();
cluster.stopDataNode(idx);
dm.removeDatanode(dnId);
// Make sure we have enough live replicas even though we are
// short one rack and therefore need one replica
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
} finally {
cluster.shutdown();
}
}
示例10: testReduceReplFactorDueToRejoinRespectsRackPolicy
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testReduceReplFactorDueToRejoinRespectsRackPolicy()
throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Last datanode is on a different rack
String racks[] = {"/rack1", "/rack1", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Make the last (cross rack) datanode look like it failed
// to heartbeat by stopping it and calling removeDatanode.
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(3, datanodes.size());
DataNode dataNode = datanodes.get(2);
DatanodeID dnId = dataNode.getDatanodeId();
cluster.stopDataNode(2);
dm.removeDatanode(dnId);
// The block gets re-replicated to another datanode so it has a
// sufficient # replicas, but not across racks, so there should
// be 1 rack, and 1 needed replica (even though there are 2 hosts
// available and only 2 replicas required).
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
// Start the "failed" datanode, which has a replica so the block is
// now over-replicated and therefore a replica should be removed but
// not on the restarted datanode as that would violate the rack policy.
String rack2[] = {"/rack2"};
cluster.startDataNodes(conf, 1, true, null, rack2);
cluster.waitActive();
// The block now has sufficient # replicas, across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
示例11: testPendingAndInvalidate
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
* Test if BlockManager can correctly remove corresponding pending records
* when a file is deleted
*
* @throws Exception
*/
@Test
public void testPendingAndInvalidate() throws Exception {
final Configuration CONF = new HdfsConfiguration();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
DATANODE_COUNT).build();
cluster.waitActive();
FSNamesystem namesystem = cluster.getNamesystem();
BlockManager bm = namesystem.getBlockManager();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// 1. create a file
Path filePath = new Path("/tmp.txt");
DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
// 2. disable the heartbeats
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
// 3. mark a couple of blocks as corrupt
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
BlockManagerTestUtil.computeAllPendingWork(bm);
BlockManagerTestUtil.updateState(bm);
assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
.getLocalBlock()), 2);
// 4. delete the file
fs.delete(filePath, true);
// retry at most 10 times, each time sleep for 1s. Note that 10s is much
// less than the default pending record timeout (5~10min)
int retries = 10;
long pendingNum = bm.getPendingReplicationBlocksCount();
while (pendingNum != 0 && retries-- > 0) {
Thread.sleep(1000); // let NN do the deletion
BlockManagerTestUtil.updateState(bm);
pendingNum = bm.getPendingReplicationBlocksCount();
}
assertEquals(pendingNum, 0L);
} finally {
cluster.shutdown();
}
}
示例12: testDataNodeMXBean
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testDataNodeMXBean() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
List<DataNode> datanodes = cluster.getDataNodes();
Assert.assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=DataNode,name=DataNodeInfo");
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
Assert.assertEquals(datanode.getClusterId(), clusterId);
// get attribute "Version"
String version = (String)mbs.getAttribute(mxbeanName, "Version");
Assert.assertEquals(datanode.getVersion(),version);
// get attribute "RpcPort"
String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
Assert.assertEquals(datanode.getRpcPort(),rpcPort);
// get attribute "HttpPort"
String httpPort = (String)mbs.getAttribute(mxbeanName, "HttpPort");
Assert.assertEquals(datanode.getHttpPort(),httpPort);
// get attribute "NamenodeAddresses"
String namenodeAddresses = (String)mbs.getAttribute(mxbeanName,
"NamenodeAddresses");
Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
// get attribute "getVolumeInfo"
String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),
replaceDigits(volumeInfo));
// Ensure mxbean's XceiverCount is same as the DataNode's
// live value.
int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
"XceiverCount");
Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例13: testRoundTripAckMetric
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
* Tests that round-trip acks in a datanode write pipeline are correctly
* measured.
*/
@Test
public void testRoundTripAckMetric() throws Exception {
final int datanodeCount = 2;
final int interval = 1;
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
datanodeCount).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Open a file and get the head of the pipeline
Path testFile = new Path("/testRoundTripAckMetric.txt");
FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
// Slow down the writes to catch the write pipeline
dout.setChunksPerPacket(5);
dout.setArtificialSlowdown(3000);
fsout.write(new byte[10000]);
DatanodeInfo[] pipeline = null;
int count = 0;
while (pipeline == null && count < 5) {
pipeline = dout.getPipeline();
System.out.println("Waiting for pipeline to be created.");
Thread.sleep(1000);
count++;
}
// Get the head node that should be receiving downstream acks
DatanodeInfo headInfo = pipeline[0];
DataNode headNode = null;
for (DataNode datanode : cluster.getDataNodes()) {
if (datanode.getDatanodeId().equals(headInfo)) {
headNode = datanode;
break;
}
}
assertNotNull("Could not find the head of the datanode write pipeline",
headNode);
// Close the file and wait for the metrics to rollover
Thread.sleep((interval + 1) * 1000);
// Check the ack was received
MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
.name());
assertTrue("Expected non-zero number of acks",
getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
+ "s", dnMetrics);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}