本文整理匯總了Java中org.apache.hadoop.io.compress.Compressor.reset方法的典型用法代碼示例。如果您正苦於以下問題:Java Compressor.reset方法的具體用法?Java Compressor.reset怎麽用?Java Compressor.reset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.io.compress.Compressor
的用法示例。
在下文中一共展示了Compressor.reset方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getCompressor
import org.apache.hadoop.io.compress.Compressor; //導入方法依賴的package包/類
public Compressor getCompressor() throws IOException {
CompressionCodec codec = getCodec();
if (codec != null) {
Compressor compressor = CodecPool.getCompressor(codec);
if (compressor != null) {
if (compressor.finished()) {
// Somebody returns the compressor to CodecPool but is still using
// it.
LOG.warn("Compressor obtained from CodecPool already finished()");
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Got a compressor: " + compressor.hashCode());
}
}
/**
* Following statement is necessary to get around bugs in 0.18 where a
* compressor is referenced after returned back to the codec pool.
*/
compressor.reset();
}
return compressor;
}
return null;
}
示例2: getCompressor
import org.apache.hadoop.io.compress.Compressor; //導入方法依賴的package包/類
public Compressor getCompressor() {
CompressionCodec codec = getCodec(conf);
if (codec != null) {
Compressor compressor = CodecPool.getCompressor(codec);
if (LOG.isTraceEnabled()) LOG.trace("Retrieved compressor " + compressor + " from pool.");
if (compressor != null) {
if (compressor.finished()) {
// Somebody returns the compressor to CodecPool but is still using it.
LOG.warn("Compressor obtained from CodecPool is already finished()");
}
compressor.reset();
}
return compressor;
}
return null;
}
示例3: getCompressedSize
import org.apache.hadoop.io.compress.Compressor; //導入方法依賴的package包/類
/**
* Find the size of compressed data assuming that buffer will be compressed
* using given algorithm.
* @param algo compression algorithm
* @param compressor compressor already requested from codec
* @param inputBuffer Array to be compressed.
* @param offset Offset to beginning of the data.
* @param length Length to be compressed.
* @return Size of compressed data in bytes.
* @throws IOException
*/
public static int getCompressedSize(Algorithm algo, Compressor compressor,
byte[] inputBuffer, int offset, int length) throws IOException {
DataOutputStream compressedStream = new DataOutputStream(
new IOUtils.NullOutputStream());
if (compressor != null) {
compressor.reset();
}
OutputStream compressingStream = null;
try {
compressingStream = algo.createCompressionStream(
compressedStream, compressor, 0);
compressingStream.write(inputBuffer, offset, length);
compressingStream.flush();
return compressedStream.size();
} finally {
if (compressingStream != null) compressingStream.close();
}
}
示例4: closeAndRelease
import org.apache.hadoop.io.compress.Compressor; //導入方法依賴的package包/類
@Override
public void closeAndRelease(CompressionOutputStream cout) {
try {
// finish quietly
cout.finish();
} catch (IOException ioexp) {
LOG.error(ioexp.toString(), ioexp);
}
IOUtils.closeQuietly(cout);
if (hasCompressors) {
Compressor comp = usedCompressors.remove(cout);
comp.reset();
compressorQueue.offer(comp);
status.setCounter(COMPRESSOR_STR,
compressorsUsedCount.decrementAndGet());
}
}
示例5: getCompressor
import org.apache.hadoop.io.compress.Compressor; //導入方法依賴的package包/類
public Compressor getCompressor() {
CompressionCodec codec = getCodec(conf);
if (codec != null) {
Compressor compressor = CodecPool.getCompressor(codec);
if (compressor != null) {
if (compressor.finished()) {
// Somebody returns the compressor to CodecPool but is still using
// it.
LOG
.warn("Compressor obtained from CodecPool is already finished()");
// throw new AssertionError(
// "Compressor obtained from CodecPool is already finished()");
}
compressor.reset();
}
return compressor;
}
return null;
}
示例6: getCompressor
import org.apache.hadoop.io.compress.Compressor; //導入方法依賴的package包/類
public Compressor getCompressor() throws IOException {
CompressionCodec codec = getCodec();
if (codec != null) {
Compressor compressor = CodecPool.getCompressor(codec);
if (compressor != null) {
if (compressor.finished()) {
// Somebody returns the compressor to CodecPool but is still using
// it.
LOG.warn("Compressor obtained from CodecPool already finished()");
} else {
LOG.debug("Got a compressor: " + compressor.hashCode());
}
/**
* Following statement is necessary to get around bugs in 0.18 where a
* compressor is referenced after returned back to the codec pool.
*/
compressor.reset();
}
return compressor;
}
return null;
}
示例7: DataSegmentWriter
import org.apache.hadoop.io.compress.Compressor; //導入方法依賴的package包/類
/**
* Create a new data segment from uncompressed data and a codec.
* This is called by the writer.
* @param compressor for reusing a compressor. It can be null.
*/
DataSegmentWriter(SimpleSeekableFormat.Buffer uncompressedData,
CompressionCodec codec,
Compressor compressor) throws IOException {
// Try compress
if (codec != null) {
SimpleSeekableFormat.Buffer compressedData = new SimpleSeekableFormat.Buffer();
OutputStream out;
if (compressor == null) {
compressor = codec.createCompressor();
} else {
compressor.reset();
}
out = codec.createOutputStream(compressedData, compressor);
out.write(uncompressedData.getData(), 0, uncompressedData.getLength());
out.close();
// Don't compress if the result is longer than uncompressed data.
if (compressedData.getLength() + codec.getClass().getName().length()
< uncompressedData.getLength() + 8) {
codecName = codec.getClass().getName();
storedData = compressedData;
} else {
codecName = "";
storedData = uncompressedData;
}
} else {
// no compression
codecName = "";
storedData = uncompressedData;
}
codecNameUTF8 = getCodecNameUTF8(codecName);
// Calculate CRC32 only when there are no compression.
if (codecName.length() == 0) {
CRC32 crc32 = new CRC32();
crc32.update(storedData.getData(), 0, storedData.getLength());
crc32Value = crc32.getValue();
} else {
crc32Value = 0;
}
}