当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.getWrappedStream方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.getWrappedStream方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.getWrappedStream方法的具体用法?Java FSDataOutputStream.getWrappedStream怎么用?Java FSDataOutputStream.getWrappedStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.getWrappedStream方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Override
public void run() {
  System.out.println("Workload starting ");
  for (int i = 0; i < numberOfFiles; i++) {
    Path filename = new Path(id + "." + i);
    try {
      System.out.println("Workload processing file " + filename);
      FSDataOutputStream stm = createFile(fs, filename, replication);
      DFSOutputStream dfstream = (DFSOutputStream)
                                             (stm.getWrappedStream());
      dfstream.setArtificialSlowdown(1000);
      writeFile(stm, myseed);
      stm.close();
      checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
    } catch (Throwable e) {
      System.out.println("Workload exception " + e);
      assertTrue(e.toString(), false);
    }

    // increment the stamp to indicate that another file is done.
    synchronized (this) {
      stamp++;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestDatanodeDeath.java

示例2: assertPartitionsWritten

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Assert that the no. of partitions written matches expectations
 * @param action operation (for use in the assertions)
 * @param out output stream
 * @param expected expected no. of partitions
 */
protected void assertPartitionsWritten(String action, FSDataOutputStream out,
                                       long expected) {
  OutputStream nativeStream = out.getWrappedStream();
  int written = getPartitionsWritten(out);
  if(written !=expected) {
  Assert.fail(action + ": " +
              TestSwiftFileSystemPartitionedUploads.WRONG_PARTITION_COUNT 
              + " + expected: " + expected + " actual: " + written
              + " -- " + nativeStream);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:SwiftFileSystemBaseTest.java

示例3: MockFsDataOutputStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public MockFsDataOutputStream(FSDataOutputStream wrapMe, boolean closeSucceed)
    throws IOException {
  super(wrapMe.getWrappedStream(), null);
  this.closeSucceed = closeSucceed;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:6,代码来源:MockFsDataOutputStream.java

示例4: testRoundTripAckMetric

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Tests that round-trip acks in a datanode write pipeline are correctly 
 * measured. 
 */
@Test
public void testRoundTripAckMetric() throws Exception {
  final int datanodeCount = 2;
  final int interval = 1;
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
      datanodeCount).build();
  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    // Open a file and get the head of the pipeline
    Path testFile = new Path("/testRoundTripAckMetric.txt");
    FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
    DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
    // Slow down the writes to catch the write pipeline
    dout.setChunksPerPacket(5);
    dout.setArtificialSlowdown(3000);
    fsout.write(new byte[10000]);
    DatanodeInfo[] pipeline = null;
    int count = 0;
    while (pipeline == null && count < 5) {
      pipeline = dout.getPipeline();
      System.out.println("Waiting for pipeline to be created.");
      Thread.sleep(1000);
      count++;
    }
    // Get the head node that should be receiving downstream acks
    DatanodeInfo headInfo = pipeline[0];
    DataNode headNode = null;
    for (DataNode datanode : cluster.getDataNodes()) {
      if (datanode.getDatanodeId().equals(headInfo)) {
        headNode = datanode;
        break;
      }
    }
    assertNotNull("Could not find the head of the datanode write pipeline", 
        headNode);
    // Close the file and wait for the metrics to rollover
    Thread.sleep((interval + 1) * 1000);
    // Check the ack was received
    MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
        .name());
    assertTrue("Expected non-zero number of acks", 
        getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
    assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
        + "s", dnMetrics);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestDataNodeMetrics.java

示例5: getSwiftNativeOutputStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private static SwiftNativeOutputStream getSwiftNativeOutputStream(
  FSDataOutputStream outputStream) {
  OutputStream wrappedStream = outputStream.getWrappedStream();
  return (SwiftNativeOutputStream) wrappedStream;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:SwiftNativeFileSystem.java

示例6: getOutputStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate
 * the default behavior (such as setting the maxRecoveryErrorCount value for example (see
 * {@link TestWALReplay#testReplayEditsWrittenIntoWAL()}). This is done using reflection on the
 * underlying HDFS OutputStream.
 * NOTE: This could be removed once Hadoop1 support is removed.
 * @return null if underlying stream is not ready.
 */
@VisibleForTesting
OutputStream getOutputStream() {
  FSDataOutputStream fsdos = this.hdfs_out;
  if (fsdos == null) return null;
  return fsdos.getWrappedStream();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:FSHLog.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.getWrappedStream方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。