当前位置: 首页>>代码示例>>Java>>正文


Java DFSOutputStream.getPipeline方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSOutputStream.getPipeline方法的典型用法代码示例。如果您正苦于以下问题:Java DFSOutputStream.getPipeline方法的具体用法?Java DFSOutputStream.getPipeline怎么用?Java DFSOutputStream.getPipeline使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSOutputStream的用法示例。


在下文中一共展示了DFSOutputStream.getPipeline方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRoundTripAckMetric

import org.apache.hadoop.hdfs.DFSOutputStream; //导入方法依赖的package包/类
/**
 * Tests that round-trip acks in a datanode write pipeline are correctly 
 * measured. 
 */
@Test
public void testRoundTripAckMetric() throws Exception {
  final int datanodeCount = 2;
  final int interval = 1;
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
      datanodeCount).build();
  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    // Open a file and get the head of the pipeline
    Path testFile = new Path("/testRoundTripAckMetric.txt");
    FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
    DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
    // Slow down the writes to catch the write pipeline
    dout.setChunksPerPacket(5);
    dout.setArtificialSlowdown(3000);
    fsout.write(new byte[10000]);
    DatanodeInfo[] pipeline = null;
    int count = 0;
    while (pipeline == null && count < 5) {
      pipeline = dout.getPipeline();
      System.out.println("Waiting for pipeline to be created.");
      Thread.sleep(1000);
      count++;
    }
    // Get the head node that should be receiving downstream acks
    DatanodeInfo headInfo = pipeline[0];
    DataNode headNode = null;
    for (DataNode datanode : cluster.getDataNodes()) {
      if (datanode.getDatanodeId().equals(headInfo)) {
        headNode = datanode;
        break;
      }
    }
    assertNotNull("Could not find the head of the datanode write pipeline", 
        headNode);
    // Close the file and wait for the metrics to rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the ack was received
    MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
        .name());
    assertTrue("Expected non-zero number of acks", 
        getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
    assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
        + "s", dnMetrics);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestDataNodeMetrics.java

示例2: testRoundTripAckMetric

import org.apache.hadoop.hdfs.DFSOutputStream; //导入方法依赖的package包/类
/**
 * Tests that round-trip acks in a datanode write pipeline are correctly
 * measured.
 */
@Test
public void testRoundTripAckMetric() throws Exception {
  final int datanodeCount = 2;
  final int interval = 1;
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
      "" + interval);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    // Open a file and get the head of the pipeline
    Path testFile = new Path("/testRoundTripAckMetric.txt");
    FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
    DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
    // Slow down the writes to catch the write pipeline
    dout.setChunksPerPacket(5);
    dout.setArtificialSlowdown(3000);
    fsout.write(new byte[10000]);
    DatanodeInfo[] pipeline = null;
    int count = 0;
    while (pipeline == null && count < 5) {
      pipeline = dout.getPipeline();
      System.out.println("Waiting for pipeline to be created.");
      Thread.sleep(1000);
      count++;
    }
    // Get the head node that should be receiving downstream acks
    DatanodeInfo headInfo = pipeline[0];
    DataNode headNode = null;
    for (DataNode datanode : cluster.getDataNodes()) {
      if (datanode.getDatanodeId().equals(headInfo)) {
        headNode = datanode;
        break;
      }
    }
    assertNotNull("Could not find the head of the datanode write pipeline",
        headNode);
    // Close the file and wait for the metrics to rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the ack was received
    MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics().name());
    assertTrue("Expected non-zero number of acks",
        getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
    assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s",
        dnMetrics);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:58,代码来源:TestDataNodeMetrics.java


注:本文中的org.apache.hadoop.hdfs.DFSOutputStream.getPipeline方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。