本文整理汇总了Java中org.apache.hadoop.hbase.io.ByteBufferOutputStream类的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferOutputStream类的具体用法?Java ByteBufferOutputStream怎么用?Java ByteBufferOutputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ByteBufferOutputStream类属于org.apache.hadoop.hbase.io包,在下文中一共展示了ByteBufferOutputStream类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doRawSaslReply
import org.apache.hadoop.hbase.io.ByteBufferOutputStream; //导入依赖的package包/类
/**
* No protobuf encoding of raw sasl messages
*/
protected final void doRawSaslReply(SaslStatus status, Writable rv,
String errorClass, String error) throws IOException {
BufferChain bc;
// In my testing, have noticed that sasl messages are usually
// in the ballpark of 100-200. That's why the initial capacity is 256.
try (ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256);
DataOutputStream out = new DataOutputStream(saslResponse)) {
out.writeInt(status.state); // write status
if (status == SaslStatus.SUCCESS) {
rv.write(out);
} else {
WritableUtils.writeString(out, errorClass);
WritableUtils.writeString(out, error);
}
bc = new BufferChain(saslResponse.getByteBuffer());
}
doRespond(() -> bc);
}
示例2: responseConnectionHeader
import org.apache.hadoop.hbase.io.ByteBufferOutputStream; //导入依赖的package包/类
/**
* Send the response for connection header
*/
private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder chrBuilder)
throws FatalConnectionException {
// Response the connection header if Crypto AES is enabled
if (!chrBuilder.hasCryptoCipherMeta()) return;
try {
byte[] connectionHeaderResBytes = chrBuilder.build().toByteArray();
// encrypt the Crypto AES cipher meta data with sasl server, and send to client
byte[] unwrapped = new byte[connectionHeaderResBytes.length + 4];
Bytes.putBytes(unwrapped, 0, Bytes.toBytes(connectionHeaderResBytes.length), 0, 4);
Bytes.putBytes(unwrapped, 4, connectionHeaderResBytes, 0, connectionHeaderResBytes.length);
byte[] wrapped = saslServer.wrap(unwrapped, 0, unwrapped.length);
BufferChain bc;
try (ByteBufferOutputStream response = new ByteBufferOutputStream(wrapped.length + 4);
DataOutputStream out = new DataOutputStream(response)) {
out.writeInt(wrapped.length);
out.write(wrapped);
bc = new BufferChain(response.getByteBuffer());
}
doRespond(() -> bc);
} catch (IOException ex) {
throw new UnsupportedCryptoException(ex.getMessage(), ex);
}
}
示例3: testVarInt
import org.apache.hadoop.hbase.io.ByteBufferOutputStream; //导入依赖的package包/类
@Test
public void testVarInt() throws Exception {
//byte[] thingy = new byte[50];
long[] lengths = {1, 20, 200, 1024, 2048, 4000, 10000, 50000,
100000, 1024 * 1024,
((long) Integer.MAX_VALUE) * 100,
-1, -200, -5000};
for (long value : lengths) {
// do the test:
ByteBufferOutputStream bbos = new ByteBufferOutputStream(12);
CodedOutputStream cos = CodedOutputStream.newInstance(bbos);
long newvalue = (value << 4) | 8;
//cos.writeRawVarint64(newvalue);
cos.writeSInt64NoTag(newvalue);
cos.flush();
ByteBuffer bb = bbos.getByteBuffer();
System.out.println("value: " + value + ", length: " + bb.remaining());
ByteBufferInputStream bbis = new ByteBufferInputStream(bb);
CodedInputStream cis = CodedInputStream.newInstance(bbis);
long outval = cis.readSInt64();
long actual = outval >> 4;
long tag = outval & 0x0F;
System.out.println(" transformed we are: " + outval + " actual: " + actual + " tag: " + tag);
}
}
示例4: buildCellBlock
import org.apache.hadoop.hbase.io.ByteBufferOutputStream; //导入依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec
* @param compressor
* @param cellScanner
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
* flipped and is ready for reading. Use limit to find total size.
* @throws IOException
*/
@SuppressWarnings("resource")
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner)
throws IOException {
if (cellScanner == null) return null;
if (codec == null) throw new CellScannerButNoCodecException();
int bufferSize = this.cellBlockBuildingInitialBufferSize;
if (cellScanner instanceof HeapSize) {
long longSize = ((HeapSize)cellScanner).heapSize();
// Just make sure we don't have a size bigger than an int.
if (longSize > Integer.MAX_VALUE) {
throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
}
bufferSize = ClassSize.align((int)longSize);
} // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
// See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
// total size before creating the buffer. It costs somw small percentage. If we are usually
// within the estimated buffer size, then the cost is not worth it. If we are often well
// outside the guesstimated buffer size, the processing can be done in half the time if we
// go w/ the estimated size rather than let the buffer resize.
ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
OutputStream os = baos;
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
int count = 0;
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
count++;
}
encoder.flush();
// If no cells, don't mess around. Just return null (could be a bunch of existence checking
// gets or something -- stuff that does not return a cell).
if (count == 0) return null;
} finally {
os.close();
if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
}
if (LOG.isTraceEnabled()) {
if (bufferSize < baos.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
"; up hbase.ipc.cellblock.building.initial.buffersize?");
}
}
return baos.getByteBuffer();
}
示例5: buildCellBlock
import org.apache.hadoop.hbase.io.ByteBufferOutputStream; //导入依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec
* @param compressor
* @Param cellScanner
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
* flipped and is ready for reading. Use limit to find total size.
* @throws IOException
*/
@SuppressWarnings("resource")
ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner)
throws IOException {
if (cellScanner == null) return null;
if (codec == null) throw new CellScannerButNoCodecException();
int bufferSize = this.cellBlockBuildingInitialBufferSize;
if (cellScanner instanceof HeapSize) {
long longSize = ((HeapSize)cellScanner).heapSize();
// Just make sure we don't have a size bigger than an int.
if (longSize > Integer.MAX_VALUE) {
throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
}
bufferSize = ClassSize.align((int)longSize);
} // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
// See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
// total size before creating the buffer. It costs somw small percentage. If we are usually
// within the estimated buffer size, then the cost is not worth it. If we are often well
// outside the guesstimated buffer size, the processing can be done in half the time if we
// go w/ the estimated size rather than let the buffer resize.
ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
OutputStream os = baos;
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
int count = 0;
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
count++;
}
encoder.flush();
// If no cells, don't mess around. Just return null (could be a bunch of existence checking
// gets or something -- stuff that does not return a cell).
if (count == 0) return null;
} finally {
os.close();
if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
}
if (LOG.isTraceEnabled()) {
if (bufferSize < baos.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
"; up hbase.ipc.cellblock.building.initial.buffersize?");
}
}
return baos.getByteBuffer();
}
示例6: get
import org.apache.hadoop.hbase.io.ByteBufferOutputStream; //导入依赖的package包/类
@Override
public OutputStream get(int expectedSize) {
baos = new ByteBufferOutputStream(expectedSize);
return baos;
}