本文整理汇总了Java中org.apache.hadoop.io.BoundedByteArrayOutputStream类的典型用法代码示例。如果您正苦于以下问题:Java BoundedByteArrayOutputStream类的具体用法?Java BoundedByteArrayOutputStream怎么用?Java BoundedByteArrayOutputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BoundedByteArrayOutputStream类属于org.apache.hadoop.io包,在下文中一共展示了BoundedByteArrayOutputStream类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: InMemoryMapOutput
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
MergeManagerImpl<K, V> merger,
int size, CompressionCodec codec,
boolean primaryMapOutput) {
super(mapId, (long)size, primaryMapOutput);
this.conf = conf;
this.merger = merger;
this.codec = codec;
byteStream = new BoundedByteArrayOutputStream(size);
memory = byteStream.getBuffer();
if (codec != null) {
decompressor = CodecPool.getDecompressor(codec);
} else {
decompressor = null;
}
}
示例2: InMemoryLinkMapOutput
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryLinkMapOutput(JobConf conf, TaskAttemptID mapId,
MergeManagerImpl<K, V> merger, int size, CompressionCodec codec,
boolean primaryMapOutput) {
super(conf, mapId, merger, size, codec, primaryMapOutput);
// super(mapId, (long) size, primaryMapOutput);
this.conf = conf;
this.merger = merger;
this.codec = codec;
byteStream = new BoundedByteArrayOutputStream(size);
memory = byteStream.getBuffer();
if (codec != null) {
decompressor = CodecPool.getDecompressor(codec);
} else {
decompressor = null;
}
}
示例3: MapOutput
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
MapOutput(InputAttemptIdentifier attemptIdentifier, MergeManager merger, int size,
boolean primaryMapOutput) {
this.id = ID.incrementAndGet();
this.attemptIdentifier = attemptIdentifier;
this.merger = merger;
type = Type.MEMORY;
byteStream = new BoundedByteArrayOutputStream(size);
memory = byteStream.getBuffer();
this.size = size;
localFS = null;
disk = null;
outputPath = null;
tmpOutputPath = null;
this.primaryMapOutput = primaryMapOutput;
}
示例4: MapOutput
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, int size,
boolean primaryMapOutput) {
this.id = ID.incrementAndGet();
this.mapId = mapId;
this.merger = merger;
type = Type.MEMORY;
byteStream = new BoundedByteArrayOutputStream(size);
memory = byteStream.getBuffer();
this.size = size;
localFS = null;
disk = null;
outputPath = null;
tmpOutputPath = null;
this.primaryMapOutput = primaryMapOutput;
}
示例5: createInMemorySegments
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
private List<TezMerger.Segment> createInMemorySegments(int segmentCount, int keysPerSegment)
throws IOException {
List<TezMerger.Segment> segmentList = Lists.newLinkedList();
Random rnd = new Random();
DataInputBuffer key = new DataInputBuffer();
DataInputBuffer value = new DataInputBuffer();
for (int i = 0; i < segmentCount; i++) {
BoundedByteArrayOutputStream stream = new BoundedByteArrayOutputStream(10000);
InMemoryWriter writer = new InMemoryWriter(stream);
for (int j = 0; j < keysPerSegment; j++) {
populateData(new IntWritable(rnd.nextInt()), new LongWritable(rnd.nextLong()), key, value);
writer.append(key, value);
}
writer.close();
InMemoryReader reader = new InMemoryReader(merger, null, stream.getBuffer(), 0, stream.getLimit());
segmentList.add(new TezMerger.Segment(reader, null));
}
return segmentList;
}
示例6: fillOutput
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
BoundedByteArrayOutputStream stream = output.getArrayStream();
int count = stream.getLimit();
for (int i=0; i < count; ++i) {
stream.write(i);
}
}
示例7: InMemoryMapOutput
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
MergeManagerImpl<K, V> merger,
int size, CompressionCodec codec,
boolean primaryMapOutput) {
super(conf, merger, mapId, (long)size, primaryMapOutput);
this.codec = codec;
byteStream = new BoundedByteArrayOutputStream(size);
memory = byteStream.getBuffer();
if (codec != null) {
decompressor = CodecPool.getDecompressor(codec);
} else {
decompressor = null;
}
}
示例8: testInMemoryWriter
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
@Test
//Test InMemoryWriter
public void testInMemoryWriter() throws IOException {
InMemoryWriter writer = null;
BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);
List<KVPair> data = KVDataGen.generateTestData(true, 0);
//No RLE, No RepeatKeys, no compression
writer = new InMemoryWriter(bout);
writeTestFileUsingDataBuffer(writer, false, false, data, null);
readUsingInMemoryReader(bout.getBuffer(), data);
//No RLE, RepeatKeys, no compression
bout.reset();
writer = new InMemoryWriter(bout);
writeTestFileUsingDataBuffer(writer, false, true, data, null);
readUsingInMemoryReader(bout.getBuffer(), data);
//RLE, No RepeatKeys, no compression
bout.reset();
writer = new InMemoryWriter(bout);
writeTestFileUsingDataBuffer(writer, true, false, data, null);
readUsingInMemoryReader(bout.getBuffer(), data);
//RLE, RepeatKeys, no compression
bout.reset();
writer = new InMemoryWriter(bout);
writeTestFileUsingDataBuffer(writer, true, true, data, null);
readUsingInMemoryReader(bout.getBuffer(), data);
}
示例9: testInMemoryWriter
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
@Test(timeout = 5000)
//Test InMemoryWriter
public void testInMemoryWriter() throws IOException {
InMemoryWriter writer = null;
BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);
List<KVPair> data = KVDataGen.generateTestData(true, 10);
//No RLE, No RepeatKeys, no compression
writer = new InMemoryWriter(bout);
writeTestFileUsingDataBuffer(writer, false, data);
readUsingInMemoryReader(bout.getBuffer(), data);
//No RLE, RepeatKeys, no compression
bout.reset();
writer = new InMemoryWriter(bout);
writeTestFileUsingDataBuffer(writer, true, data);
readUsingInMemoryReader(bout.getBuffer(), data);
//RLE, No RepeatKeys, no compression
bout.reset();
writer = new InMemoryWriter(bout, true);
writeTestFileUsingDataBuffer(writer, false, data);
readUsingInMemoryReader(bout.getBuffer(), data);
//RLE, RepeatKeys, no compression
bout.reset();
writer = new InMemoryWriter(bout, true);
writeTestFileUsingDataBuffer(writer, true, data);
readUsingInMemoryReader(bout.getBuffer(), data);
}
示例10: close
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
@Override
public void close() throws IOException {
if (closed == true) {
return;
}
try {
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
/**
* verify length.
*/
if (expectedLength >= 0 && expectedLength != len) {
throw new IOException("Incorrect key length: expected="
+ expectedLength + " actual=" + len);
}
Utils.writeVInt(blkAppender, len);
blkAppender.write(key, 0, len);
if (tfileIndex.getFirstKey() == null) {
tfileIndex.setFirstKey(key, 0, len);
}
if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
byte[] lastKey = lastKeyBufferOS.getBuffer();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
throw new IOException("Keys are not added in sorted order");
}
}
BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
currentKeyBufferOS = lastKeyBufferOS;
lastKeyBufferOS = tmp;
--errorCount;
} finally {
closed = true;
state = State.END_KEY;
}
}
示例11: InMemoryWriter
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public InMemoryWriter(BoundedByteArrayOutputStream arrayStream) {
super(null);
this.out =
new DataOutputStream(new IFileOutputStream(arrayStream));
}
示例12: getArrayStream
import org.apache.hadoop.io.BoundedByteArrayOutputStream; //导入依赖的package包/类
public BoundedByteArrayOutputStream getArrayStream() {
return byteStream;
}