当前位置: 首页>>代码示例>>Java>>正文


Java MetricsSystemImpl类代码示例

本文整理汇总了Java中org.apache.hadoop.metrics2.impl.MetricsSystemImpl的典型用法代码示例。如果您正苦于以下问题:Java MetricsSystemImpl类的具体用法?Java MetricsSystemImpl怎么用?Java MetricsSystemImpl使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


MetricsSystemImpl类属于org.apache.hadoop.metrics2.impl包,在下文中一共展示了MetricsSystemImpl类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testShuffleMetrics

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
/**
 * Validate shuffle connection and input/output metrics.
 *
 * @throws Exception exception
 */
@Test (timeout = 10000)
public void testShuffleMetrics() throws Exception {
  MetricsSystem ms = new MetricsSystemImpl();
  ShuffleHandler sh = new ShuffleHandler(ms);
  ChannelFuture cf = make(stub(ChannelFuture.class).
      returning(true, false).from.isSuccess());

  sh.metrics.shuffleConnections.incr();
  sh.metrics.shuffleOutputBytes.incr(1*MiB);
  sh.metrics.shuffleConnections.incr();
  sh.metrics.shuffleOutputBytes.incr(2*MiB);

  checkShuffleMetrics(ms, 3*MiB, 0 , 0, 2);

  sh.metrics.operationComplete(cf);
  sh.metrics.operationComplete(cf);

  checkShuffleMetrics(ms, 3*MiB, 1, 1, 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestShuffleHandler.java

示例2: testShuffleMetrics

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
@Test (timeout = 10000)
public void testShuffleMetrics() throws Exception {
  MetricsSystem ms = new MetricsSystemImpl();
  ShuffleHandler sh = new ShuffleHandler(ms);
  ChannelFuture cf = make(stub(ChannelFuture.class).
      returning(true, false).from.isSuccess());

  sh.metrics.shuffleConnections.incr();
  sh.metrics.shuffleOutputBytes.incr(1*MiB);
  sh.metrics.shuffleConnections.incr();
  sh.metrics.shuffleOutputBytes.incr(2*MiB);

  checkShuffleMetrics(ms, 3*MiB, 0 , 0, 2);

  sh.metrics.operationComplete(cf);
  sh.metrics.operationComplete(cf);

  checkShuffleMetrics(ms, 3*MiB, 1, 1, 0);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:TestShuffleHandler.java

示例3: testShuffleMetrics

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
/**
 * Validate shuffle connection and input/output metrics.
 *
 * @throws Exception exception
 */
@Test (timeout = 10000)
public void testShuffleMetrics() throws Exception {
  MetricsSystem ms = new MetricsSystemImpl();
  ShuffleHandler sh = new ShuffleHandler(ms);
  ChannelFuture cf = mock(ChannelFuture.class);
  when(cf.isSuccess()).thenReturn(true, false);

  sh.metrics.shuffleConnections.incr();
  sh.metrics.shuffleOutputBytes.incr(1*MiB);
  sh.metrics.shuffleConnections.incr();
  sh.metrics.shuffleOutputBytes.incr(2*MiB);

  checkShuffleMetrics(ms, 3*MiB, 0 , 0, 2);

  sh.metrics.operationComplete(cf);
  sh.metrics.operationComplete(cf);

  checkShuffleMetrics(ms, 3*MiB, 1, 1, 0);
}
 
开发者ID:apache,项目名称:tez,代码行数:25,代码来源:TestShuffleHandler.java

示例4: initMetricsSystem

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
/**
 * Set up the metrics system, start it, and return it.
 * @param path the base path for the sink
 * @param ignoreErrors whether the sink should ignore errors
 * @param allowAppend whether the sink is allowed to append to existing files
 * @param useSecureParams whether to set the principal and keytab properties
 * @return the org.apache.hadoop.metrics2.MetricsSystem
 */
protected MetricsSystem initMetricsSystem(String path, boolean ignoreErrors,
    boolean allowAppend, boolean useSecureParams) {
  // If the prefix is not lower case, the metrics system won't be able to
  // read any of the properties.
  String prefix = methodName.getMethodName().toLowerCase();

  ConfigBuilder builder = new ConfigBuilder().add("*.period", 10000)
      .add(prefix + ".sink.mysink0.class", MockSink.class.getName())
      .add(prefix + ".sink.mysink0.basepath", path)
      .add(prefix + ".sink.mysink0.source", "testsrc")
      .add(prefix + ".sink.mysink0.context", "test1")
      .add(prefix + ".sink.mysink0.ignore-error", ignoreErrors)
      .add(prefix + ".sink.mysink0.allow-append", allowAppend);

  if (useSecureParams) {
      builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY)
      .add(prefix + ".sink.mysink0.principal-key", SINK_PRINCIPAL_KEY);
  }

  builder.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-" + prefix));

  MetricsSystemImpl ms = new MetricsSystemImpl(prefix);

  ms.start();

  return ms;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:36,代码来源:RollingFileSystemSinkTestBase.java

示例5: testMetricsCache

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
@Test 
public void testMetricsCache() {
  MetricsSystem ms = new MetricsSystemImpl("cache");
  ms.start();
  
  try {
    String p1 = "root1";
    String leafQueueName = "root1.leaf";

    QueueMetrics p1Metrics =
        QueueMetrics.forQueue(ms, p1, null, true, conf);
    Queue parentQueue1 = make(stub(Queue.class).returning(p1Metrics).
        from.getMetrics());
    QueueMetrics metrics =
        QueueMetrics.forQueue(ms, leafQueueName, parentQueue1, true, conf);

    Assert.assertNotNull("QueueMetrics for A shoudn't be null", metrics);

    // Re-register to check for cache hit, shouldn't blow up metrics-system...
    // also, verify parent-metrics
    QueueMetrics alterMetrics =
        QueueMetrics.forQueue(ms, leafQueueName, parentQueue1, true, conf);

    Assert.assertNotNull("QueueMetrics for alterMetrics shoudn't be null", 
        alterMetrics);
  } finally {
    ms.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestQueueMetrics.java

示例6: disableLogs

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }
  
  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));
  
  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:SnapshotTestHelper.java

示例7: fileSystemStarted

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
public static synchronized void fileSystemStarted() {
  if (numFileSystems == 0) {
    instance = new MetricsSystemImpl();
    instance.init("azure-file-system");
  }
  numFileSystems++;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:AzureFileSystemMetricsSystem.java

示例8: testContainerMetricsFinished

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
@Test
public void testContainerMetricsFinished() throws InterruptedException {
  MetricsSystemImpl system = new MetricsSystemImpl();
  system.init("test");
  MetricsCollectorImpl collector = new MetricsCollectorImpl();
  ApplicationId appId = ApplicationId.newInstance(1234, 3);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, 4);
  ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
  ContainerMetrics metrics1 = ContainerMetrics.forContainer(system,
      containerId1, 1, 0);
  ContainerId containerId2 = ContainerId.newContainerId(appAttemptId, 2);
  ContainerMetrics metrics2 = ContainerMetrics.forContainer(system,
      containerId2, 1, 0);
  ContainerId containerId3 = ContainerId.newContainerId(appAttemptId, 3);
  ContainerMetrics metrics3 = ContainerMetrics.forContainer(system,
      containerId3, 1, 0);
  metrics1.finished();
  metrics2.finished();
  system.sampleMetrics();
  system.sampleMetrics();
  Thread.sleep(100);
  system.stop();
  // verify metrics1 is unregistered
  assertTrue(metrics1 != ContainerMetrics.forContainer(
      system, containerId1, 1, 0));
  // verify metrics2 is unregistered
  assertTrue(metrics2 != ContainerMetrics.forContainer(
      system, containerId2, 1, 0));
  // verify metrics3 is still registered
  assertTrue(metrics3 == ContainerMetrics.forContainer(
      system, containerId3, 1, 0));
  system.shutdown();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:TestContainerMetrics.java

示例9: disableLogs

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }
  
  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
  
  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:28,代码来源:SnapshotTestHelper.java

示例10: disableLogs

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    setLevel2OFF(LogFactory.getLog(n));
  }
  
  setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
  setLevel2OFF(LogFactory.getLog(BlockManager.class));
  setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
  setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
  setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
  
  setLevel2OFF(DataBlockScanner.LOG);
  setLevel2OFF(HttpServer2.LOG);
  setLevel2OFF(DataNode.LOG);
  setLevel2OFF(BlockPoolSliceStorage.LOG);
  setLevel2OFF(LeaseManager.LOG);
  setLevel2OFF(NameNode.stateChangeLog);
  setLevel2OFF(NameNode.blockStateChangeLog);
  setLevel2OFF(DFSClient.LOG);
  setLevel2OFF(Server.LOG);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:28,代码来源:SnapshotTestHelper.java

示例11: testContainerMetricsFinished

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
@Test
public void testContainerMetricsFinished() throws InterruptedException {
  MetricsSystemImpl system = new MetricsSystemImpl();
  system.init("test");
  MetricsCollectorImpl collector = new MetricsCollectorImpl();
  ApplicationId appId = ApplicationId.newInstance(1234, 3);
  ApplicationAttemptId appAttemptId =
      ApplicationAttemptId.newInstance(appId, 4);
  ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
  ContainerMetrics metrics1 = ContainerMetrics.forContainer(system,
      containerId1, 1, 0);
  ContainerId containerId2 = ContainerId.newContainerId(appAttemptId, 2);
  ContainerMetrics metrics2 = ContainerMetrics.forContainer(system,
      containerId2, 1, 0);
  ContainerId containerId3 = ContainerId.newContainerId(appAttemptId, 3);
  ContainerMetrics metrics3 = ContainerMetrics.forContainer(system,
      containerId3, 1, 0);
  metrics1.finished();
  metrics2.finished();
  system.sampleMetrics();
  system.sampleMetrics();
  Thread.sleep(100);
  // verify metrics1 is unregistered
  assertTrue(metrics1 != ContainerMetrics.forContainer(
      system, containerId1, 1, 0));
  // verify metrics2 is unregistered
  assertTrue(metrics2 != ContainerMetrics.forContainer(
      system, containerId2, 1, 0));
  // verify metrics3 is still registered
  assertTrue(metrics3 == ContainerMetrics.forContainer(
      system, containerId3, 1, 0));
  // YARN-5190: move stop() to the end to verify registering containerId1 and
  // containerId2 won't get MetricsException thrown.
  system.stop();
  system.shutdown();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:37,代码来源:TestContainerMetrics.java

示例12: testMetricsCache

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
@Test
public void testMetricsCache() {
  MetricsSystem ms = new MetricsSystemImpl("cache");
  ms.start();
  
  try {
    String p1 = "root1";
    String leafQueueName = "root1.leaf";
    
    QueueMetrics p1Metrics =
        QueueMetrics.forQueue(ms, p1, null, true, conf);
    Queue parentQueue1 = make(stub(Queue.class).returning(p1Metrics).
        from.getMetrics());
    QueueMetrics metrics =
        QueueMetrics.forQueue(ms, leafQueueName, parentQueue1, true, conf);
    
    Assert.assertNotNull("QueueMetrics for A shoudn't be null", metrics);
    
    // Re-register to check for cache hit, shouldn't blow up metrics-system...
    // also, verify parent-metrics
    QueueMetrics alterMetrics =
        QueueMetrics.forQueue(ms, leafQueueName, parentQueue1, true, conf);
    
    Assert.assertNotNull("QueueMetrics for alterMetrics shoudn't be null",
        alterMetrics);
  } finally {
    ms.shutdown();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:TestQueueMetrics.java

示例13: setUp

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
@Before
public void setUp() {
  ms = new MetricsSystemImpl();
  QueueMetrics.clearQueueMetrics();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestQueueMetrics.java

示例14: testFileSink

import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; //导入依赖的package包/类
@Test(timeout=6000) 
public void testFileSink() throws IOException {
  outFile = getTestTempFile("test-file-sink-", ".out");
  final String outPath = outFile.getAbsolutePath();  
  
  // NB: specify large period to avoid multiple metrics snapshotting: 
  new ConfigBuilder().add("*.period", 10000)
      .add("test.sink.mysink0.class", FileSink.class.getName())
      .add("test.sink.mysink0.filename", outPath)
      // NB: we filter by context to exclude "metricssystem" context metrics:
      .add("test.sink.mysink0.context", "test1")
      .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
  MetricsSystemImpl ms = new MetricsSystemImpl("test");
  ms.start();

  final MyMetrics1 mm1 
    = new MyMetrics1().registerWith(ms);
  new MyMetrics2().registerWith(ms);

  mm1.testMetric1.incr();
  mm1.testMetric2.incr(2);

  ms.publishMetricsNow(); // publish the metrics
  ms.stop();
  ms.shutdown();

  InputStream is = null;
  ByteArrayOutputStream baos = null;
  String outFileContent = null;
  try {
    is = new FileInputStream(outFile);
    baos = new ByteArrayOutputStream((int)outFile.length());
    IOUtils.copyBytes(is, baos, 1024, true);
    outFileContent = new String(baos.toByteArray(), "UTF-8");
  } finally {
    IOUtils.cleanup(null, baos, is);
  }

  // Check the out file content. Should be something like the following:
  //1360244820087 test1.testRecord1: Context=test1, testTag1=testTagValue1, testTag2=testTagValue2, Hostname=myhost, testMetric1=1, testMetric2=2
  //1360244820089 test1.testRecord2: Context=test1, testTag22=testTagValue22, Hostname=myhost
  
  // Note that in the below expression we allow tags and metrics to go in arbitrary order.  
  Pattern expectedContentPattern = Pattern.compile(
      // line #1:
      "^\\d+\\s+test1.testRecord1:\\s+Context=test1,\\s+" +
      "(testTag1=testTagValue1,\\s+testTag2=testTagValue2|testTag2=testTagValue2,\\s+testTag1=testTagValue1)," +
      "\\s+Hostname=.*,\\s+(testMetric1=1,\\s+testMetric2=2|testMetric2=2,\\s+testMetric1=1)" +
      // line #2:
      "$[\\n\\r]*^\\d+\\s+test1.testRecord2:\\s+Context=test1," +
      "\\s+testTag22=testTagValue22,\\s+Hostname=.*$[\\n\\r]*", 
       Pattern.MULTILINE);
   assertTrue(expectedContentPattern.matcher(outFileContent).matches());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:TestFileSink.java


注:本文中的org.apache.hadoop.metrics2.impl.MetricsSystemImpl类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。