当前位置: 首页>>代码示例>>Java>>正文


Java MetricsAsserts类代码示例

本文整理汇总了Java中org.apache.hadoop.test.MetricsAsserts的典型用法代码示例。如果您正苦于以下问题:Java MetricsAsserts类的具体用法?Java MetricsAsserts怎么用?Java MetricsAsserts使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


MetricsAsserts类属于org.apache.hadoop.test包,在下文中一共展示了MetricsAsserts类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: waitForDnMetricValue

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
/**
 * Wait for the named gauge value from the metrics source to reach the
 * desired value.
 *
 * There's an initial delay then a spin cycle of sleep and poll. Because
 * all the tests use a shared FS instance, these tests are not independent;
 * that's why the initial sleep is in there.
 *
 * @param source metrics source
 * @param name gauge name
 * @param expected expected value
 * @return the last metrics record polled
 * @throws Exception if something went wrong.
 */
private MetricsRecordBuilder waitForDnMetricValue(String source,
                                                  String name,
                                                  long expected)
    throws Exception {
  MetricsRecordBuilder rb;
  long gauge;
  //initial wait.
  waitForDeletion();
  //lots of retries are allowed for slow systems; fast ones will still
  //exit early
  int retries = (DATANODE_COUNT + 1) * WAIT_GAUGE_VALUE_RETRIES;
  rb = getMetrics(source);
  gauge = MetricsAsserts.getLongGauge(name, rb);
  while (gauge != expected && (--retries > 0)) {
    Thread.sleep(DFS_REPLICATION_INTERVAL * 500);
    rb = getMetrics(source);
    gauge = MetricsAsserts.getLongGauge(name, rb);
  }
  //at this point the assertion is valid or the retry count ran out
  assertGauge(name, expected, rb);
  return rb;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestNameNodeMetrics.java

示例2: testReadWriteOps

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
/**
 * Test NN ReadOps Count and WriteOps Count
 */
@Test
public void testReadWriteOps() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
      rb);
  Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");

  //Perform create file operation
  createFile(file1_Path, 1024 * 1024,(short)2);

  // Perform read file operation on earlier created file
  readFile(fs, file1_Path);
  MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
  assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
      startWriteCounter);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestNameNodeMetrics.java

示例3: waitForDnMetricValue

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
/**
 * Wait for the named gauge value from the metrics source to reach the
 * desired value.
 * <p/>
 * There's an initial delay then a spin cycle of sleep and poll. Because
 * all the tests use a shared FS instance, these tests are not independent;
 * that's why the initial sleep is in there.
 *
 * @param source
 *     metrics source
 * @param name
 *     gauge name
 * @param expected
 *     expected value
 * @return the last metrics record polled
 * @throws Exception
 *     if something went wrong.
 */
private MetricsRecordBuilder waitForDnMetricValue(String source, String name,
    long expected) throws Exception {
  MetricsRecordBuilder rb;
  long gauge;
  //initial wait.
  waitForDeletion();
  //lots of retries are allowed for slow systems; fast ones will still
  //exit early
  int retries = (DATANODE_COUNT + 1) * WAIT_GAUGE_VALUE_RETRIES;
  rb = getMetrics(source);
  gauge = MetricsAsserts.getLongGauge(name, rb);
  while (gauge != expected && (--retries > 0)) {
    Thread.sleep(DFS_REPLICATION_INTERVAL * 500);
    rb = getMetrics(source);
    gauge = MetricsAsserts.getLongGauge(name, rb);
  }
  //at this point the assertion is valid or the retry count ran out
  assertGauge(name, expected, rb);
  return rb;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:39,代码来源:TestNameNodeMetrics.java

示例4: testRpcMetrics

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
@Test
public void testRpcMetrics() throws Exception {
  Server server;
  TestRpcService proxy = null;

  final int interval = 1;
  conf.setBoolean(CommonConfigurationKeys.
      RPC_METRICS_QUANTILE_ENABLE, true);
  conf.set(CommonConfigurationKeys.
      RPC_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);

  server = setupTestServer(conf, 5);

  try {
    proxy = getClient(addr, conf);

    for (int i = 0; i < 1000; i++) {
      proxy.ping(null, newEmptyRequest());

      proxy.echo(null, newEchoRequest("" + i));
    }
    MetricsRecordBuilder rpcMetrics =
        getMetrics(server.getRpcMetrics().name());
    assertTrue("Expected non-zero rpc queue time",
        getLongCounter("RpcQueueTimeNumOps", rpcMetrics) > 0);
    assertTrue("Expected non-zero rpc processing time",
        getLongCounter("RpcProcessingTimeNumOps", rpcMetrics) > 0);
    MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",
        rpcMetrics);
    MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",
        rpcMetrics);
  } finally {
    stop(server, proxy);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:36,代码来源:TestRPC.java

示例5: testMetricsInitializedOnRMInit

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
@Test
public void testMetricsInitializedOnRMInit() {
  YarnConfiguration conf = new YarnConfiguration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER,
    FifoScheduler.class, ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
  checkApps(metrics, 0, 0, 0, 0, 0, 0, true);
  MetricsAsserts.assertGauge("ReservedContainers", 0, metrics);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestQueueMetrics.java

示例6: testCapacityMetrics

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
/**
 * Test that capacity metrics are exported and pass
 * basic sanity tests.
 */
@Test (timeout = 1800)
public void testCapacityMetrics() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  long capacityTotal = MetricsAsserts.getLongGauge("CapacityTotal", rb);
  assert(capacityTotal != 0);
  long capacityUsed = MetricsAsserts.getLongGauge("CapacityUsed", rb);
  long capacityRemaining =
      MetricsAsserts.getLongGauge("CapacityRemaining", rb);
  long capacityUsedNonDFS =
      MetricsAsserts.getLongGauge("CapacityUsedNonDFS", rb);
  assert(capacityUsed + capacityRemaining + capacityUsedNonDFS ==
      capacityTotal);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestNameNodeMetrics.java

示例7: testTransactionAndCheckpointMetrics

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
/**
 * Test NN checkpoint and transaction-related metrics.
 */
@Test
public void testTransactionAndCheckpointMetrics() throws Exception {
  long lastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
      getMetrics(NS_METRICS));
  
  assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
  assertGauge("LastWrittenTransactionId", 1L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
  
  fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
  updateMetrics();
  
  assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
  assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 2L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
  
  cluster.getNameNodeRpc().rollEditLog();
  updateMetrics();
  
  assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
  assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
  
  cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNodeRpc().saveNamespace();
  cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
  updateMetrics();
  
  long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
      getMetrics(NS_METRICS));
  assertTrue(lastCkptTime < newLastCkptTime);
  assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestNameNodeMetrics.java

示例8: testJournal

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
@Test(timeout=100000)
public void testJournal() throws Exception {
  MetricsRecordBuilder metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);

  IPCLoggerChannel ch = new IPCLoggerChannel(
      conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
  ch.newEpoch(1).get();
  ch.setEpoch(1);
  ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
  ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get();
  
  metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);

  ch.setCommittedTxId(100L);
  ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get();

  metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics);

}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestJournalNode.java

示例9: testRpcMetrics

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
@Test
public void testRpcMetrics() throws Exception {
  Configuration configuration = new Configuration();
  final int interval = 1;
  configuration.setBoolean(CommonConfigurationKeys.
      RPC_METRICS_QUANTILE_ENABLE, true);
  configuration.set(CommonConfigurationKeys.
      RPC_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  final Server server = new RPC.Builder(configuration)
      .setProtocol(TestProtocol.class).setInstance(new TestImpl())
      .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
      .build();
  server.start();
  final TestProtocol proxy = RPC.getProxy(TestProtocol.class,
      TestProtocol.versionID, server.getListenerAddress(), configuration);
  try {
    for (int i=0; i<1000; i++) {
      proxy.ping();
      proxy.echo("" + i);
    }
    MetricsRecordBuilder rpcMetrics =
        getMetrics(server.getRpcMetrics().name());
    assertTrue("Expected non-zero rpc queue time",
        getLongCounter("RpcQueueTimeNumOps", rpcMetrics) > 0);
    assertTrue("Expected non-zero rpc processing time",
        getLongCounter("RpcProcessingTimeNumOps", rpcMetrics) > 0);
    MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",
        rpcMetrics);
    MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",
        rpcMetrics);
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestRPC.java

示例10: testTransactionAndCheckpointMetrics

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
/**
 * Test NN checkpoint and transaction-related metrics.
 */
@Test
public void testTransactionAndCheckpointMetrics() throws Exception {
  long lastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
      getMetrics(NS_METRICS));
  
  assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
  assertGauge("LastWrittenTransactionId", 1L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
  
  fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
  
  assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
  assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 2L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
  
  cluster.getNameNodeRpc().rollEditLog();
  
  assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
  assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
  
  cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNodeRpc().saveNamespace(0, 0);
  cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
  
  long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
      getMetrics(NS_METRICS));
  assertTrue(lastCkptTime < newLastCkptTime);
  assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
  assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:TestNameNodeMetrics.java

示例11: testJournal

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
@Test(timeout=100000)
public void testJournal() throws Exception {
  MetricsRecordBuilder metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
  MetricsAsserts.assertGauge("LastJournalTimestamp", 0L, metrics);

  long beginTimestamp = System.currentTimeMillis();
  IPCLoggerChannel ch = new IPCLoggerChannel(
      conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
  ch.newEpoch(1).get();
  ch.setEpoch(1);
  ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
  ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get();
  
  metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
  long lastJournalTimestamp = MetricsAsserts.getLongGauge(
      "LastJournalTimestamp", metrics);
  assertTrue(lastJournalTimestamp > beginTimestamp);
  beginTimestamp = lastJournalTimestamp;

  ch.setCommittedTxId(100L);
  ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get();

  metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics);
  lastJournalTimestamp = MetricsAsserts.getLongGauge(
      "LastJournalTimestamp", metrics);
  assertTrue(lastJournalTimestamp > beginTimestamp);

}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:41,代码来源:TestJournalNode.java

示例12: testJournal

import org.apache.hadoop.test.MetricsAsserts; //导入依赖的package包/类
@Test(timeout=100000)
public void testJournal() throws Exception {
  MetricsRecordBuilder metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);

  IPCLoggerChannel ch = new IPCLoggerChannel(
      conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
  ch.newEpoch(1).get();
  ch.setEpoch(1);
  ch.startLogSegment(1).get();
  ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get();
  
  metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);

  ch.setCommittedTxId(100L);
  ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get();

  metrics = MetricsAsserts.getMetrics(
      journal.getMetricsForTests().getName());
  MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics);
  MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics);
  MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics);

}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:32,代码来源:TestJournalNode.java


注:本文中的org.apache.hadoop.test.MetricsAsserts类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。