当前位置: 首页>>代码示例>>Java>>正文


Java BoundedByteArrayOutputStream类代码示例

本文整理汇总了Java中org.apache.hadoop.io.BoundedByteArrayOutputStream的典型用法代码示例。如果您正苦于以下问题:Java BoundedByteArrayOutputStream类的具体用法?Java BoundedByteArrayOutputStream怎么用?Java BoundedByteArrayOutputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BoundedByteArrayOutputStream类属于org.apache.hadoop.io包,在下文中一共展示了BoundedByteArrayOutputStream类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: InMemoryMapOutput

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:InMemoryMapOutput.java

示例2: InMemoryLinkMapOutput

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryLinkMapOutput(JobConf conf, TaskAttemptID mapId,
		MergeManagerImpl<K, V> merger, int size, CompressionCodec codec,
		boolean primaryMapOutput) {
	super(conf, mapId, merger, size, codec, primaryMapOutput);
	// super(mapId, (long) size, primaryMapOutput);
	this.conf = conf;
	this.merger = merger;
	this.codec = codec;
	byteStream = new BoundedByteArrayOutputStream(size);
	memory = byteStream.getBuffer();
	if (codec != null) {
		decompressor = CodecPool.getDecompressor(codec);
	} else {
		decompressor = null;
	}
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:17,代码来源:InMemoryLinkMapOutput.java

示例3: MapOutput

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
MapOutput(InputAttemptIdentifier attemptIdentifier, MergeManager merger, int size, 
          boolean primaryMapOutput) {
  this.id = ID.incrementAndGet();
  this.attemptIdentifier = attemptIdentifier;
  this.merger = merger;

  type = Type.MEMORY;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();

  this.size = size;
  
  localFS = null;
  disk = null;
  outputPath = null;
  tmpOutputPath = null;
  
  this.primaryMapOutput = primaryMapOutput;
}
 
开发者ID:apache,项目名称:incubator-tez,代码行数:20,代码来源:MapOutput.java

示例4: MapOutput

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, int size, 
          boolean primaryMapOutput) {
  this.id = ID.incrementAndGet();
  this.mapId = mapId;
  this.merger = merger;

  type = Type.MEMORY;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();

  this.size = size;
  
  localFS = null;
  disk = null;
  outputPath = null;
  tmpOutputPath = null;
  
  this.primaryMapOutput = primaryMapOutput;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:20,代码来源:MapOutput.java

示例5: createInMemorySegments

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
private List<TezMerger.Segment> createInMemorySegments(int segmentCount, int keysPerSegment)
    throws IOException {
  List<TezMerger.Segment> segmentList = Lists.newLinkedList();
  Random rnd = new Random();
  DataInputBuffer key = new DataInputBuffer();
  DataInputBuffer value = new DataInputBuffer();
  for (int i = 0; i < segmentCount; i++) {
    BoundedByteArrayOutputStream stream = new BoundedByteArrayOutputStream(10000);
    InMemoryWriter writer = new InMemoryWriter(stream);

    for (int j = 0; j < keysPerSegment; j++) {
      populateData(new IntWritable(rnd.nextInt()), new LongWritable(rnd.nextLong()), key, value);
      writer.append(key, value);
    }
    writer.close();
    InMemoryReader reader = new InMemoryReader(merger, null, stream.getBuffer(), 0, stream.getLimit());

    segmentList.add(new TezMerger.Segment(reader, null));
  }
  return segmentList;
}
 
开发者ID:apache,项目名称:tez,代码行数:22,代码来源:TestTezMerger.java

示例6: fillOutput

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestMergeManager.java

示例7: InMemoryMapOutput

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(conf, merger, mapId, (long)size, primaryMapOutput);
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:InMemoryMapOutput.java

示例8: testInMemoryWriter

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
@Test
//Test InMemoryWriter
public void testInMemoryWriter() throws IOException {
  InMemoryWriter writer = null;
  BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);

  List<KVPair> data = KVDataGen.generateTestData(true, 0);

  //No RLE, No RepeatKeys, no compression
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, false, false, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //No RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, false, true, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, No RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, true, false, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, true, true, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);
}
 
开发者ID:apache,项目名称:incubator-tez,代码行数:32,代码来源:TestIFile.java

示例9: testInMemoryWriter

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
@Test(timeout = 5000)
//Test InMemoryWriter
public void testInMemoryWriter() throws IOException {
  InMemoryWriter writer = null;
  BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);

  List<KVPair> data = KVDataGen.generateTestData(true, 10);

  //No RLE, No RepeatKeys, no compression
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, false, data);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //No RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, true, data);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, No RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout, true);
  writeTestFileUsingDataBuffer(writer, false, data);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout, true);
  writeTestFileUsingDataBuffer(writer, true, data);
  readUsingInMemoryReader(bout.getBuffer(), data);
}
 
开发者ID:apache,项目名称:tez,代码行数:32,代码来源:TestIFile.java

示例10: close

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
@Override
public void close() throws IOException {
  if (closed == true) {
    return;
  }

  try {
    ++errorCount;
    byte[] key = currentKeyBufferOS.getBuffer();
    int len = currentKeyBufferOS.size();
    /**
     * verify length.
     */
    if (expectedLength >= 0 && expectedLength != len) {
      throw new IOException("Incorrect key length: expected="
          + expectedLength + " actual=" + len);
    }

    Utils.writeVInt(blkAppender, len);
    blkAppender.write(key, 0, len);
    if (tfileIndex.getFirstKey() == null) {
      tfileIndex.setFirstKey(key, 0, len);
    }

    if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
      byte[] lastKey = lastKeyBufferOS.getBuffer();
      int lastLen = lastKeyBufferOS.size();
      if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
          lastLen) < 0) {
        throw new IOException("Keys are not added in sorted order");
      }
    }

    BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
    currentKeyBufferOS = lastKeyBufferOS;
    lastKeyBufferOS = tmp;
    --errorCount;
  } finally {
    closed = true;
    state = State.END_KEY;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:43,代码来源:TFile.java

示例11: InMemoryWriter

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryWriter(BoundedByteArrayOutputStream arrayStream) {
  super(null);
  this.out = 
    new DataOutputStream(new IFileOutputStream(arrayStream));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:InMemoryWriter.java

示例12: getArrayStream

import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public BoundedByteArrayOutputStream getArrayStream() {
  return byteStream;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:InMemoryMapOutput.java


注:本文中的org.apache.hadoop.io.BoundedByteArrayOutputStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。