本文整理汇总了Java中org.apache.hadoop.io.compress.CodecPool.returnCompressor方法的典型用法代码示例。如果您正苦于以下问题:Java CodecPool.returnCompressor方法的具体用法?Java CodecPool.returnCompressor怎么用?Java CodecPool.returnCompressor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.compress.CodecPool
的用法示例。
在下文中一共展示了CodecPool.returnCompressor方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: close
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/** Close the file. */
@Override
public synchronized void close() throws IOException {
keySerializer.close();
uncompressedValSerializer.close();
if (compressedValSerializer != null) {
compressedValSerializer.close();
}
CodecPool.returnCompressor(compressor);
compressor = null;
if (out != null) {
// Close the underlying stream iff we own it...
if (ownOutputStream) {
out.close();
} else {
out.flush();
}
out = null;
}
}
示例2: close
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
@Override
public void close() throws IOException {
serializer.flush();
serializer.beforeClose();
if (!isFinished) {
cmpOut.finish();
isFinished = true;
}
fsOut.flush();
hflushOrSync(fsOut);
cmpOut.close();
if (compressor != null) {
CodecPool.returnCompressor(compressor);
compressor = null;
}
unregisterCurrentStream();
}
示例3: close
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/** Close the file. */
public synchronized void close() throws IOException {
keySerializer.close();
uncompressedValSerializer.close();
if (compressedValSerializer != null) {
compressedValSerializer.close();
}
CodecPool.returnCompressor(compressor);
compressor = null;
if (out != null) {
// Close the underlying stream iff we own it...
if (ownOutputStream) {
out.close();
} else {
out.flush();
}
out = null;
}
}
示例4: returnCompressor
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
if (compressor != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Return a compressor: " + compressor.hashCode());
}
CodecPool.returnCompressor(compressor);
}
}
示例5: close
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
@Override
public void close() throws IOException {
try {
writer.close();
} finally {
if (compressor != null) {
CodecPool.returnCompressor(compressor);
}
}
}
示例6: close
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void close() throws IOException {
// When IFile writer is created by BackupStore, we do not have
// Key and Value classes set. So, check before closing the
// serializers
if (keyClass != null) {
keySerializer.close();
valueSerializer.close();
}
// Write EOF_MARKER for key/value length
WritableUtils.writeVInt(out, EOF_MARKER);
WritableUtils.writeVInt(out, EOF_MARKER);
decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
//Flush the stream
out.flush();
if (compressOutput) {
// Flush
compressedOut.finish();
compressedOut.resetState();
}
// Close the underlying stream iff we own it...
if (ownOutputStream) {
out.close();
}
else {
// Write the checksum
checksumOut.finish();
}
compressedBytesWritten = rawOut.getPos() - start;
if (compressOutput) {
// Return back the compressor
CodecPool.returnCompressor(compressor);
compressor = null;
}
out = null;
if(writtenRecordsCounter != null) {
writtenRecordsCounter.increment(numRecordsWritten);
}
}
示例7: returnCompressor
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
if (compressor != null) {
if (LOG.isTraceEnabled()) LOG.trace("Returning compressor " + compressor + " to pool.");
CodecPool.returnCompressor(compressor);
}
}
示例8: returnCompressor
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
if (compressor != null) {
CodecPool.returnCompressor(compressor);
}
}
示例9: returnCompressor
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
public void returnCompressor(Compressor compressor) {
if (compressor != null) {
LOG.debug("Return a compressor: " + compressor.hashCode());
CodecPool.returnCompressor(compressor);
}
}
示例10: release
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
private void release() {
if (compressor != null) {
CodecPool.returnCompressor(compressor);
}
}
示例11: buildCellBlock
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec
* @param compressor
* @param cellScanner
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
* flipped and is ready for reading. Use limit to find total size.
* @throws IOException
*/
@SuppressWarnings("resource")
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner)
throws IOException {
if (cellScanner == null) return null;
if (codec == null) throw new CellScannerButNoCodecException();
int bufferSize = this.cellBlockBuildingInitialBufferSize;
if (cellScanner instanceof HeapSize) {
long longSize = ((HeapSize)cellScanner).heapSize();
// Just make sure we don't have a size bigger than an int.
if (longSize > Integer.MAX_VALUE) {
throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
}
bufferSize = ClassSize.align((int)longSize);
} // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
// See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
// total size before creating the buffer. It costs somw small percentage. If we are usually
// within the estimated buffer size, then the cost is not worth it. If we are often well
// outside the guesstimated buffer size, the processing can be done in half the time if we
// go w/ the estimated size rather than let the buffer resize.
ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
OutputStream os = baos;
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
int count = 0;
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
count++;
}
encoder.flush();
// If no cells, don't mess around. Just return null (could be a bunch of existence checking
// gets or something -- stuff that does not return a cell).
if (count == 0) return null;
} finally {
os.close();
if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
}
if (LOG.isTraceEnabled()) {
if (bufferSize < baos.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
"; up hbase.ipc.cellblock.building.initial.buffersize?");
}
}
return baos.getByteBuffer();
}
示例12: release
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
private void release()
{
if (compressor != null) {
CodecPool.returnCompressor(compressor);
}
}
示例13: buildCellBlock
import org.apache.hadoop.io.compress.CodecPool; //导入方法依赖的package包/类
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec
* @param compressor
* @Param cellScanner
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has been
* flipped and is ready for reading. Use limit to find total size.
* @throws IOException
*/
@SuppressWarnings("resource")
ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner)
throws IOException {
if (cellScanner == null) return null;
if (codec == null) throw new CellScannerButNoCodecException();
int bufferSize = this.cellBlockBuildingInitialBufferSize;
if (cellScanner instanceof HeapSize) {
long longSize = ((HeapSize)cellScanner).heapSize();
// Just make sure we don't have a size bigger than an int.
if (longSize > Integer.MAX_VALUE) {
throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE);
}
bufferSize = ClassSize.align((int)longSize);
} // TODO: Else, get estimate on size of buffer rather than have the buffer resize.
// See TestIPCUtil main for experiment where we spin through the Cells getting estimate of
// total size before creating the buffer. It costs somw small percentage. If we are usually
// within the estimated buffer size, then the cost is not worth it. If we are often well
// outside the guesstimated buffer size, the processing can be done in half the time if we
// go w/ the estimated size rather than let the buffer resize.
ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize);
OutputStream os = baos;
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) ((Configurable)compressor).setConf(this.conf);
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
int count = 0;
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
count++;
}
encoder.flush();
// If no cells, don't mess around. Just return null (could be a bunch of existence checking
// gets or something -- stuff that does not return a cell).
if (count == 0) return null;
} finally {
os.close();
if (poolCompressor != null) CodecPool.returnCompressor(poolCompressor);
}
if (LOG.isTraceEnabled()) {
if (bufferSize < baos.size()) {
LOG.trace("Buffer grew from initial bufferSize=" + bufferSize + " to " + baos.size() +
"; up hbase.ipc.cellblock.building.initial.buffersize?");
}
}
return baos.getByteBuffer();
}