本文整理汇总了Java中org.apache.hadoop.hdfs.tools.JMXGet类的典型用法代码示例。如果您正苦于以下问题:Java JMXGet类的具体用法?Java JMXGet怎么用?Java JMXGet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
JMXGet类属于org.apache.hadoop.hdfs.tools包,在下文中一共展示了JMXGet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkPrintAllValues
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
private static boolean checkPrintAllValues(JMXGet jmx) throws Exception {
int size = 0;
byte[] bytes = null;
String pattern = "List of all the available keys:";
PipedOutputStream pipeOut = new PipedOutputStream();
PipedInputStream pipeIn = new PipedInputStream(pipeOut);
System.setErr(new PrintStream(pipeOut));
jmx.printAllValues();
if ((size = pipeIn.available()) != 0) {
bytes = new byte[size];
pipeIn.read(bytes, 0, bytes.length);
}
pipeOut.close();
pipeIn.close();
return bytes != null ? new String(bytes).contains(pattern) : false;
}
示例2: testDataNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to DataNode..
* @throws Exception
*/
@Test
public void testDataNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test"), 2);
JMXGet jmx = new JMXGet();
String serviceName = "DataNode";
jmx.setService(serviceName);
jmx.init();
assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
Set<ObjectName> names = mbsc.queryNames(query, null);
assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
}
示例3: checkPrintAllValues
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
private static boolean checkPrintAllValues(JMXGet jmx) throws Exception {
int size = 0;
byte[] bytes = null;
String pattern = "List of all the available keys:";
PipedOutputStream pipeOut = new PipedOutputStream();
PipedInputStream pipeIn = new PipedInputStream(pipeOut);
PrintStream oldErr = System.err;
System.setErr(new PrintStream(pipeOut));
try {
jmx.printAllValues();
if ((size = pipeIn.available()) != 0) {
bytes = new byte[size];
pipeIn.read(bytes, 0, bytes.length);
}
pipeOut.close();
pipeIn.close();
} finally {
System.setErr(oldErr);
}
return bytes != null ? new String(bytes).contains(pattern) : false;
}
示例4: testDataNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to DataNode..
* @throws Exception
*/
@Test
public void testDataNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/test"),
fileSize, fileSize, blockSize, (short) 2, seed);
JMXGet jmx = new JMXGet();
String serviceName = "DataNode";
jmx.setService(serviceName);
jmx.init();
DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
Set<ObjectName> names = mbsc.queryNames(query, null);
assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
}
示例5: waitForMetric
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
public static void waitForMetric(final JMXGet jmx, final String metricName, final int expectedValue)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
final int currentValue = Integer.parseInt(jmx.getValue(metricName));
LOG.info("Waiting for " + metricName +
" to reach value " + expectedValue +
", current value = " + currentValue);
return currentValue == expectedValue;
} catch (Exception e) {
throw new UnhandledException("Test failed due to unexpected exception", e);
}
}
}, 1000, Integer.MAX_VALUE);
}
示例6: testNameNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to NameNode..
* @throws Exception
*/
public void testNameNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster(0, config, numDatanodes, true, true, null,
null, null);
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
JMXGet jmx = new JMXGet();
jmx.init();
//get some data from different sources
int blocks_corrupted = NameNode.getNameNodeMetrics().
numBlocksCorrupted.get();
assertEquals(Integer.parseInt(
jmx.getValue("NumLiveDataNodes")), 2);
assertEquals(Integer.parseInt(
jmx.getValue("BlocksCorrupted")), blocks_corrupted);
assertEquals(Long.parseLong(
jmx.getValue("NumOpenConnectionsAvgTime")), 0);
cluster.shutdown();
}
示例7: testDataNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to DataNode..
* @throws Exception
*/
public void testDataNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster(0, config, numDatanodes, true, true, null,
null, null);
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test"), 2);
JMXGet jmx = new JMXGet();
jmx.setService("DataNode");
jmx.init();
assertEquals(Integer.parseInt(jmx.getValue("bytes_written")), 0);
cluster.shutdown();
}
示例8: testNameNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to NameNode..
* @throws Exception
*/
@Test
public void testNameNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
JMXGet jmx = new JMXGet();
//jmx.setService("*"); // list all hadoop services
//jmx.init();
//jmx = new JMXGet();
jmx.init(); // default lists namenode mbeans only
//get some data from different source
assertEquals(numDatanodes, Integer.parseInt(
jmx.getValue("NumLiveDataNodes")));
assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
getMetrics("FSNamesystem"));
assertEquals(numDatanodes, Integer.parseInt(
jmx.getValue("NumOpenConnections")));
cluster.shutdown();
}
示例9: testDataNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to DataNode..
* @throws Exception
*/
@Test
public void testDataNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test"), 2);
JMXGet jmx = new JMXGet();
//jmx.setService("*"); // list all hadoop services
//jmx.init();
//jmx = new JMXGet();
jmx.setService("DataNode");
jmx.init();
assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
}
示例10: testDataNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to DataNode..
*
* @throws Exception
*/
@Test
public void testDataNode() throws Exception {
int numDatanodes = 2;
cluster =
new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test"), 2);
JMXGet jmx = new JMXGet();
//jmx.setService("*"); // list all hadoop services
//jmx.init();
//jmx = new JMXGet();
jmx.setService("DataNode");
jmx.init();
Thread.sleep(15000);
assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
}
示例11: testNameNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to NameNode..
* @throws Exception
*/
public void testNameNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
JMXGet jmx = new JMXGet();
jmx.init();
//get some data from different sources
int blocks_corrupted = NameNode.getNameNodeMetrics().
numBlocksCorrupted.get();
assertEquals(Integer.parseInt(
jmx.getValue("NumLiveDataNodes")), 2);
assertEquals(Integer.parseInt(
jmx.getValue("BlocksCorrupted")), blocks_corrupted);
assertEquals(Integer.parseInt(
jmx.getValue("NumOpenConnections")), 0);
cluster.shutdown();
}
示例12: testNameNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to NameNode..
* @throws Exception
*/
public void testNameNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster(0, config, numDatanodes, true, true, null,
null, null);
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
JMXGet jmx = new JMXGet();
jmx.init();
//get some data from different sources
int blocks_corrupted = NameNode.getNameNodeMetrics().
numBlocksCorrupted.get();
assertEquals(Integer.parseInt(
jmx.getValue("NumLiveDataNodes")), 2);
assertEquals(Integer.parseInt(
jmx.getValue("BlocksCorrupted")), blocks_corrupted);
assertEquals(Integer.parseInt(
jmx.getValue("NumOpenConnections")), 0);
cluster.shutdown();
}
示例13: testNameNode
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
/**
* test JMX connection to NameNode..
* @throws Exception
*/
@Test
public void testNameNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
JMXGet jmx = new JMXGet();
String serviceName = "NameNode";
jmx.setService(serviceName);
jmx.init(); // default lists namenode mbeans only
assertTrue("error printAllValues", checkPrintAllValues(jmx));
//get some data from different source
assertEquals(numDatanodes, Integer.parseInt(
jmx.getValue("NumLiveDataNodes")));
assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
getMetrics("FSNamesystem"));
assertEquals(numDatanodes, Integer.parseInt(
jmx.getValue("NumOpenConnections")));
cluster.shutdown();
MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
Set<ObjectName> names = mbsc.queryNames(query, null);
assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
}
示例14: checkOutput
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
private void checkOutput(String[] args, String pattern, PrintStream out,
Class<?> clazz) {
ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
try {
PipedOutputStream pipeOut = new PipedOutputStream();
PipedInputStream pipeIn = new PipedInputStream(pipeOut, PIPE_BUFFER_SIZE);
if (out == System.out) {
System.setOut(new PrintStream(pipeOut));
} else if (out == System.err) {
System.setErr(new PrintStream(pipeOut));
}
if (clazz == DelegationTokenFetcher.class) {
expectDelegationTokenFetcherExit(args);
} else if (clazz == JMXGet.class) {
expectJMXGetExit(args);
} else if (clazz == DFSAdmin.class) {
expectDfsAdminPrint(args);
}
pipeOut.close();
ByteStreams.copy(pipeIn, outBytes);
pipeIn.close();
assertTrue(new String(outBytes.toByteArray()).contains(pattern));
} catch (Exception ex) {
fail("checkOutput error " + ex);
}
}
示例15: expectJMXGetExit
import org.apache.hadoop.hdfs.tools.JMXGet; //导入依赖的package包/类
private static void expectJMXGetExit(String[] args) {
try {
JMXGet.main(args);
fail("should call exit");
} catch (ExitException e) {
ExitUtil.resetFirstExitException();
} catch (Exception ex) {
fail("expectJMXGetExit ex error " + ex);
}
}