本文整理汇总了Java中org.apache.hadoop.hbase.codec.Codec.Encoder方法的典型用法代码示例。如果您正苦于以下问题:Java Codec.Encoder方法的具体用法?Java Codec.Encoder怎么用?Java Codec.Encoder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.codec.Codec
的用法示例。
在下文中一共展示了Codec.Encoder方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doCodec
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
static void doCodec(final Codec codec, final Cell [] cells, final int cycles, final int count,
final int initialBufferSize)
throws IOException {
byte [] bytes = null;
Cell [] cellsDecoded = null;
for (int i = 0; i < cycles; i++) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize);
Codec.Encoder encoder = codec.getEncoder(baos);
bytes = runEncoderTest(i, initialBufferSize, baos, encoder, cells);
}
for (int i = 0; i < cycles; i++) {
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
Codec.Decoder decoder = codec.getDecoder(bais);
cellsDecoded = CodecPerformance.runDecoderTest(i, count, decoder);
}
verifyCells(cells, cellsDecoded);
}
示例2: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
MessageCodec cmc = new MessageCodec();
Codec.Encoder encoder = cmc.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = cmc.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
示例3: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
MessageCodec cmc = new MessageCodec();
Codec.Encoder encoder = cmc.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = cmc.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
assertFalse(decoder.advance()); // Second read should trip over the end-of-stream marker and return false
dis.close();
assertEquals(offset, cis.getCount());
}
示例4: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
KeyValueCodec kvc = new KeyValueCodec();
Codec.Encoder encoder = kvc.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = kvc.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
示例5: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
KeyValueCodec kvc = new KeyValueCodec();
Codec.Encoder encoder = kvc.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
final long length = kv.getLength() + Bytes.SIZEOF_INT;
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(length, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = kvc.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(length, cis.getCount());
}
示例6: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
示例7: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
kv.setMvccVersion(Long.MAX_VALUE);
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}
示例8: encodeCellsTo
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec,
CompressionCodec compressor) throws IOException {
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) {
((Configurable) compressor).setConf(this.conf);
}
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
}
encoder.flush();
} catch (BufferOverflowException | IndexOutOfBoundsException e) {
throw new DoNotRetryIOException(e);
} finally {
os.close();
if (poolCompressor != null) {
CodecPool.returnCompressor(poolCompressor);
}
}
}
示例9: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}
示例10: testThree
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testThree() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
MessageCodec cmc = new MessageCodec();
Codec.Encoder encoder = cmc.getEncoder(dos);
final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1"));
final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2"));
final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3"));
encoder.write(kv1);
encoder.write(kv2);
encoder.write(kv3);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = cmc.getDecoder(dis);
assertTrue(decoder.advance());
Cell c = decoder.current();
assertTrue(CellComparator.equals(c, kv1));
assertTrue(decoder.advance());
c = decoder.current();
assertTrue(CellComparator.equals(c, kv2));
assertTrue(decoder.advance());
c = decoder.current();
assertTrue(CellComparator.equals(c, kv3));
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}
示例11: testThree
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testThree() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
KeyValueCodec kvc = new KeyValueCodec();
Codec.Encoder encoder = kvc.getEncoder(dos);
final KeyValue kv1 =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1"));
final KeyValue kv2 =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2"));
final KeyValue kv3 =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3"));
final long length = kv1.getLength() + Bytes.SIZEOF_INT;
encoder.write(kv1);
encoder.write(kv2);
encoder.write(kv3);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(length * 3, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = kvc.getDecoder(dis);
assertTrue(decoder.advance());
KeyValue kv = (KeyValue)decoder.current();
assertTrue(kv1.equals(kv));
assertTrue(decoder.advance());
kv = (KeyValue)decoder.current();
assertTrue(kv2.equals(kv));
assertTrue(decoder.advance());
kv = (KeyValue)decoder.current();
assertTrue(kv3.equals(kv));
assertFalse(decoder.advance());
dis.close();
assertEquals((length * 3), cis.getCount());
}
示例12: testThree
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
@Test
public void testThree() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
final KeyValue kv1 =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1"));
final KeyValue kv2 =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2"));
final KeyValue kv3 =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3"));
encoder.write(kv1);
encoder.write(kv2);
encoder.write(kv3);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertTrue(decoder.advance());
Cell c = decoder.current();
assertTrue(CellComparator.equals(c, kv1));
assertTrue(decoder.advance());
c = decoder.current();
assertTrue(CellComparator.equals(c, kv2));
assertTrue(decoder.advance());
c = decoder.current();
assertTrue(CellComparator.equals(c, kv3));
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}
示例13: buildCellBlock
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec
* @param compressor
* @param cellScanner
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
* flipped and is ready for reading. Use limit to find total size.
* @throws IOException
*/
@SuppressWarnings("resource")
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner)
throws IOException {
if (cellScanner == null) return null;
if (codec == null) throw new CellScannerButNoCodecException();
int bufferSize = this.cellBlockBuildingInitialBufferSize;
if (cellScanner instanceof HeapSize) {
long longSize = ((HeapSize)cellScanner).heapSize();
// Just make sure we don't have a size bigger than an int.
if (longSize > Integer.MAX_VALUE) {
throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
}
bufferSize = ClassSize.align((int)longSize);
} // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
// See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
// total size before creating the buffer. It costs somw small percentage. If we are usually
// within the estimated buffer size, then the cost is not worth it. If we are often well
// outside the guesstimated buffer size, the processing can be done in half the time if we
// go w/ the estimated size rather than let the buffer resize.
ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
OutputStream os = baos;
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
int count = 0;
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
count++;
}
encoder.flush();
// If no cells, don't mess around. Just return null (could be a bunch of existence checking
// gets or something -- stuff that does not return a cell).
if (count == 0) return null;
} finally {
os.close();
if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
}
if (LOG.isTraceEnabled()) {
if (bufferSize < baos.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
"; up hbase.ipc.cellblock.building.initial.buffersize?");
}
}
return baos.getByteBuffer();
}
示例14: buildCellBlock
import org.apache.hadoop.hbase.codec.Codec; //导入方法依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec
* @param compressor
* @Param cellScanner
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
* flipped and is ready for reading. Use limit to find total size.
* @throws IOException
*/
@SuppressWarnings("resource")
ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner)
throws IOException {
if (cellScanner == null) return null;
if (codec == null) throw new CellScannerButNoCodecException();
int bufferSize = this.cellBlockBuildingInitialBufferSize;
if (cellScanner instanceof HeapSize) {
long longSize = ((HeapSize)cellScanner).heapSize();
// Just make sure we don't have a size bigger than an int.
if (longSize > Integer.MAX_VALUE) {
throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
}
bufferSize = ClassSize.align((int)longSize);
} // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
// See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
// total size before creating the buffer. It costs somw small percentage. If we are usually
// within the estimated buffer size, then the cost is not worth it. If we are often well
// outside the guesstimated buffer size, the processing can be done in half the time if we
// go w/ the estimated size rather than let the buffer resize.
ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
OutputStream os = baos;
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
int count = 0;
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
count++;
}
encoder.flush();
// If no cells, don't mess around. Just return null (could be a bunch of existence checking
// gets or something -- stuff that does not return a cell).
if (count == 0) return null;
} finally {
os.close();
if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
}
if (LOG.isTraceEnabled()) {
if (bufferSize < baos.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
"; up hbase.ipc.cellblock.building.initial.buffersize?");
}
}
return baos.getByteBuffer();
}