本文整理汇总了Java中org.apache.hadoop.hbase.codec.Codec类的典型用法代码示例。如果您正苦于以下问题:Java Codec类的具体用法?Java Codec怎么用?Java Codec使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Codec类属于org.apache.hadoop.hbase.codec包,在下文中一共展示了Codec类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doCodec
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
static void doCodec(final Codec codec, final Cell [] cells, final int cycles, final int count,
final int initialBufferSize)
throws IOException {
byte [] bytes = null;
Cell [] cellsDecoded = null;
for (int i = 0; i < cycles; i++) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize);
Codec.Encoder encoder = codec.getEncoder(baos);
bytes = runEncoderTest(i, initialBufferSize, baos, encoder, cells);
}
for (int i = 0; i < cycles; i++) {
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
Codec.Decoder decoder = codec.getDecoder(bais);
cellsDecoded = CodecPerformance.runDecoderTest(i, count, decoder);
}
verifyCells(cells, cellsDecoded);
}
示例2: timerTests
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private static void timerTests(final IPCUtil util, final int count, final int size,
final Codec codec, final CompressionCodec compressor)
throws IOException {
final int cycles = 1000;
StopWatch timer = new StopWatch();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(util, timer, count, size, codec, compressor, false);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false +
", count=" + count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
timer.reset();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(util, timer, count, size, codec, compressor, true);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true +
", count=" + count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
}
示例3: getConnection
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
/**
* Get a connection from the pool, or create a new one and add it to the
* pool. Connections to a given host/port are reused.
*/
protected Connection getConnection(User ticket, Call call, InetSocketAddress addr,
final Codec codec, final CompressionCodec compressor)
throws IOException {
if (!running.get()) throw new StoppedRpcClientException();
Connection connection;
ConnectionId remoteId =
new ConnectionId(ticket, call.md.getService().getName(), addr);
synchronized (connections) {
connection = connections.get(remoteId);
if (connection == null) {
connection = createConnection(remoteId, this.codec, this.compressor);
connections.put(remoteId, connection);
}
}
return connection;
}
示例4: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
MessageCodec cmc = new MessageCodec();
Codec.Encoder encoder = cmc.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = cmc.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
示例5: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
MessageCodec cmc = new MessageCodec();
Codec.Encoder encoder = cmc.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis = new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = cmc.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
assertFalse(decoder.advance()); // Second read should trip over the end-of-stream marker and return false
dis.close();
assertEquals(offset, cis.getCount());
}
示例6: getConnection
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
protected Connection getConnection(User ticket, Call call, InetSocketAddress addr,
int rpcTimeout, final Codec codec, final CompressionCodec compressor)
throws IOException, InterruptedException {
if (!running.get()) throw new StoppedRpcClientException();
Connection connection;
ConnectionId remoteId =
new ConnectionId(ticket, call.md.getService().getName(), addr, rpcTimeout);
synchronized (connections) {
connection = connections.get(remoteId);
if (connection == null) {
connection = createConnection(remoteId, this.codec, this.compressor);
connections.put(remoteId, connection);
}
}
connection.addCall(call);
//we don't invoke the method below inside "synchronized (connections)"
//block above. The reason for that is if the server happens to be slow,
//it will take longer to establish a connection and that will slow the
//entire system down.
//Moreover, if the connection is currently created, there will be many threads
// waiting here; as setupIOstreams is synchronized. If the connection fails with a
// timeout, they will all fail simultaneously. This is checked in setupIOstreams.
connection.setupIOstreams();
return connection;
}
示例7: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
KeyValueCodec kvc = new KeyValueCodec();
Codec.Encoder encoder = kvc.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = kvc.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
示例8: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
KeyValueCodec kvc = new KeyValueCodec();
Codec.Encoder encoder = kvc.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
final long length = kv.getLength() + Bytes.SIZEOF_INT;
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(length, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = kvc.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(length, cis.getCount());
}
示例9: testEmptyWorks
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testEmptyWorks() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
encoder.flush();
dos.close();
long offset = cos.getCount();
assertEquals(0, offset);
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertFalse(decoder.advance());
dis.close();
assertEquals(0, cis.getCount());
}
示例10: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
kv.setMvccVersion(Long.MAX_VALUE);
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}
示例11: buildCellBlock
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private boolean buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner, OutputStreamSupplier supplier) throws IOException {
if (cellScanner == null) {
return false;
}
if (codec == null) {
throw new CellScannerButNoCodecException();
}
int bufferSize = cellBlockBuildingInitialBufferSize;
encodeCellsTo(supplier.get(bufferSize), cellScanner, codec, compressor);
if (LOG.isTraceEnabled() && bufferSize < supplier.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + supplier.size()
+ "; up hbase.ipc.cellblock.building.initial.buffersize?");
}
return true;
}
示例12: encodeCellsTo
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec,
CompressionCodec compressor) throws IOException {
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) {
((Configurable) compressor).setConf(this.conf);
}
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
}
encoder.flush();
} catch (BufferOverflowException | IndexOutOfBoundsException e) {
throw new DoNotRetryIOException(e);
} finally {
os.close();
if (poolCompressor != null) {
CodecPool.returnCompressor(poolCompressor);
}
}
}
示例13: buildCellBlockStream
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec to use for encoding
* @param compressor to use for encoding
* @param cellScanner to encode
* @param pool Pool of ByteBuffers to make use of.
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
* been flipped and is ready for reading. Use limit to find total size. If
* <code>pool</code> was not null, then this returned ByteBuffer came from there and
* should be returned to the pool when done.
* @throws IOException if encoding the cells fail
*/
public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionCodec compressor,
CellScanner cellScanner, ByteBufferPool pool) throws IOException {
if (cellScanner == null) {
return null;
}
if (codec == null) {
throw new CellScannerButNoCodecException();
}
assert pool != null;
ByteBufferListOutputStream bbos = new ByteBufferListOutputStream(pool);
encodeCellsTo(bbos, cellScanner, codec, compressor);
if (bbos.size() == 0) {
bbos.releaseResources();
return null;
}
return bbos;
}
示例14: timerTests
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
private static void timerTests(final CellBlockBuilder builder, final int count, final int size,
final Codec codec, final CompressionCodec compressor) throws IOException {
final int cycles = 1000;
StopWatch timer = new StopWatch();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(builder, timer, count, size, codec, compressor, false);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count="
+ count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
timer.reset();
timer.start();
for (int i = 0; i < cycles; i++) {
timerTest(builder, timer, count, size, codec, compressor, true);
}
timer.stop();
LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count="
+ count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
}
示例15: testOne
import org.apache.hadoop.hbase.codec.Codec; //导入依赖的package包/类
@Test
public void testOne() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CountingOutputStream cos = new CountingOutputStream(baos);
DataOutputStream dos = new DataOutputStream(cos);
Codec codec = new CellCodec();
Codec.Encoder encoder = codec.getEncoder(dos);
final KeyValue kv =
new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v"));
encoder.write(kv);
encoder.flush();
dos.close();
long offset = cos.getCount();
CountingInputStream cis =
new CountingInputStream(new ByteArrayInputStream(baos.toByteArray()));
DataInputStream dis = new DataInputStream(cis);
Codec.Decoder decoder = codec.getDecoder(dis);
assertTrue(decoder.advance()); // First read should pull in the KV
// Second read should trip over the end-of-stream marker and return false
assertFalse(decoder.advance());
dis.close();
assertEquals(offset, cis.getCount());
}