本文整理汇总了Java中org.apache.hadoop.io.compress.CompressionOutputStream类的典型用法代码示例。如果您正苦于以下问题:Java CompressionOutputStream类的具体用法?Java CompressionOutputStream怎么用?Java CompressionOutputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CompressionOutputStream类属于org.apache.hadoop.io.compress包,在下文中一共展示了CompressionOutputStream类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createCompressionStream
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
@Override
public synchronized OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor,
int downStreamBufferSize) throws IOException {
if (!isSupported()) {
throw new IOException(
"LZO codec class not specified. Did you forget to set property "
+ CONF_LZO_CLASS + "?");
}
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
} else {
bos1 = downStream;
}
conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
示例2: create
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
@Override
public CompressionOutputStream create(OutputStream output, long timeout,
TimeUnit unit) throws IOException, InterruptedException {
if (hasCompressors) {
Compressor compressor = compressorQueue.poll(timeout, unit);
if (compressor == null) {
if (adaptiveIncrement) {
LOG.info("Adaptive increment, creating new compressor");
compressor = codec.createCompressor();
} else {
return null;
}
}
CompressionOutputStream cout = codec.createOutputStream(output,
compressor);
usedCompressors.put(cout, compressor);
status.setCounter(COMPRESSOR_STR,
compressorsUsedCount.getAndIncrement());
return cout;
} else {
return codec.createOutputStream(output);
}
}
示例3: closeAndRelease
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
@Override
public void closeAndRelease(CompressionOutputStream cout) {
try {
// finish quietly
cout.finish();
} catch (IOException ioexp) {
LOG.error(ioexp.toString(), ioexp);
}
IOUtils.closeQuietly(cout);
if (hasCompressors) {
Compressor comp = usedCompressors.remove(cout);
comp.reset();
compressorQueue.offer(comp);
status.setCounter(COMPRESSOR_STR,
compressorsUsedCount.decrementAndGet());
}
}
示例4: copy
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
/**
*
* @param source
* @param dest
* @param codec
* @param compressor
* may be null
* @param decomp
* may be null
* @param mark
* @return
* @throws IOException
*/
public static final CompressionOutputStream copy(File source, File dest,
CompressionCodec codec, Compressor compressor, Decompressor decomp,
long mark) throws IOException {
FileInputStream fileInput = new FileInputStream(source);
CompressionInputStream in = (decomp == null) ? codec
.createInputStream(fileInput) : codec.createInputStream(
fileInput, decomp);
FileOutputStream fileOut = new FileOutputStream(dest);
CompressionOutputStream out = (compressor == null) ? codec
.createOutputStream(fileOut) : codec.createOutputStream(
fileOut, compressor);
try {
copy(in, out, mark);
return out;
} finally {
IOUtils.closeQuietly(in);
IOUtils.closeQuietly(fileInput);
}
}
示例5: createCompressionStream
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
public OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor, int downStreamBufferSize)
throws IOException {
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
}
else {
bos1 = downStream;
}
CompressionOutputStream cos =
createPlainCompressionStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
示例6: compress
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
public BytesInput compress(BytesInput bytes) throws IOException {
final BytesInput compressedBytes;
if (codec == null) {
compressedBytes = bytes;
} else {
compressedOutBuffer.reset();
if (compressor != null) {
// null compressor for non-native gzip
compressor.reset();
}
CompressionOutputStream cos = codec.createOutputStream(compressedOutBuffer, compressor);
bytes.writeAllTo(cos);
cos.finish();
cos.close();
compressedBytes = BytesInput.from(compressedOutBuffer);
}
return compressedBytes;
}
示例7: compress
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
public BytesInput compress(BytesInput bytes)
throws IOException
{
final BytesInput compressedBytes;
if (codec == null) {
compressedBytes = bytes;
}
else {
compressedOutBuffer.reset();
if (compressor != null) {
compressor.reset();
}
CompressionOutputStream outputStream = codec.createOutputStream(compressedOutBuffer, compressor);
bytes.writeAllTo(outputStream);
outputStream.finish();
outputStream.close();
compressedBytes = BytesInput.from(compressedOutBuffer);
}
return compressedBytes;
}
示例8: createCompressionStream
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
@Override
public synchronized OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor,
int downStreamBufferSize) throws IOException {
if (!isSupported()) {
throw new IOException(
"LZO codec class not specified. Did you forget to set property "
+ CONF_LZO_CLASS + "?");
}
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
} else {
bos1 = downStream;
}
conf.setInt(IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY,
IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
示例9: createCompressionStream
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
public OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor, int downStreamBufferSize)
throws IOException {
CompressionCodec codec = getCodec(conf);
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
}
else {
bos1 = downStream;
}
((Configurable)codec).getConf().setInt("io.file.buffer.size", 32 * 1024);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
示例10: createCompressionStream
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
@Override
public synchronized OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor,
int downStreamBufferSize) throws IOException {
if (!isSupported()) {
throw new IOException(
"LZO codec class not specified. Did you forget to set property "
+ CONF_LZO_CLASS + "?");
}
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
} else {
bos1 = downStream;
}
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
示例11: compress
import org.apache.hadoop.io.compress.CompressionOutputStream; //导入依赖的package包/类
@Override
public BytesInput compress(BytesInput bytes) throws IOException {
final BytesInput compressedBytes;
if (codec == null) {
compressedBytes = bytes;
} else {
compressedOutBuffer.reset();
if (compressor != null) {
// null compressor for non-native gzip
compressor.reset();
}
CompressionOutputStream cos = codec.createOutputStream(compressedOutBuffer, compressor);
bytes.writeAllTo(cos);
cos.finish();
cos.close();
compressedBytes = BytesInput.from(compressedOutBuffer);
}
return compressedBytes;
}