本文整理汇总了Java中org.apache.hadoop.io.compress.Decompressor类的典型用法代码示例。如果您正苦于以下问题:Java Decompressor类的具体用法?Java Decompressor怎么用?Java Decompressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Decompressor类属于org.apache.hadoop.io.compress包,在下文中一共展示了Decompressor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDecompressor
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
public Decompressor getDecompressor() throws IOException {
CompressionCodec codec = getCodec();
if (codec != null) {
Decompressor decompressor = CodecPool.getDecompressor(codec);
if (decompressor != null) {
if (decompressor.finished()) {
// Somebody returns the decompressor to CodecPool but is still using
// it.
LOG.warn("Deompressor obtained from CodecPool already finished()");
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Got a decompressor: " + decompressor.hashCode());
}
}
/**
* Following statement is necessary to get around bugs in 0.18 where a
* decompressor is referenced after returned back to the codec pool.
*/
decompressor.reset();
}
return decompressor;
}
return null;
}
示例2: testZlibCompressorDecompressorWithConfiguration
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
@Test
public void testZlibCompressorDecompressorWithConfiguration() {
Configuration conf = new Configuration();
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber = 5;
int BYTE_SIZE = 10 * 1024;
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
rawData = generate(BYTE_SIZE);
try {
for (int i = 0; i < tryNumber; i++)
compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
(ZlibDecompressor) zlibDecompressor);
zlibCompressor.reinit(conf);
} catch (Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
示例3: testZlibCompressorDecompressorWithCompressionLevels
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
@Test
public void testZlibCompressorDecompressorWithCompressionLevels() {
Configuration conf = new Configuration();
conf.set("zlib.compress.level","FOUR");
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber = 5;
int BYTE_SIZE = 10 * 1024;
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
rawData = generate(BYTE_SIZE);
try {
for (int i = 0; i < tryNumber; i++)
compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
(ZlibDecompressor) zlibDecompressor);
zlibCompressor.reinit(conf);
} catch (Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
示例4: testZlibCompressorDecompressorSetDictionary
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
@Test
public void testZlibCompressorDecompressorSetDictionary() {
Configuration conf = new Configuration();
if (ZlibFactory.isNativeZlibLoaded(conf)) {
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
checkSetDictionaryNullPointerException(zlibCompressor);
checkSetDictionaryNullPointerException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
示例5: testZlibCompressorDecompressorWithConfiguration
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
@Test
public void testZlibCompressorDecompressorWithConfiguration() {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber = 5;
int BYTE_SIZE = 10 * 1024;
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
rawData = generate(BYTE_SIZE);
try {
for (int i = 0; i < tryNumber; i++)
compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
(ZlibDecompressor) zlibDecompressor);
zlibCompressor.reinit(conf);
} catch (Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
示例6: testZlibCompressorDecompressorSetDictionary
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
@Test
public void testZlibCompressorDecompressorSetDictionary() {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
checkSetDictionaryNullPointerException(zlibCompressor);
checkSetDictionaryNullPointerException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
示例7: getDecompressor
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
public Decompressor getDecompressor() {
CompressionCodec codec = getCodec(conf);
if (codec != null) {
Decompressor decompressor = CodecPool.getDecompressor(codec);
if (LOG.isTraceEnabled()) LOG.trace("Retrieved decompressor " + decompressor
+ " from pool.");
if (decompressor != null) {
if (decompressor.finished()) {
// Somebody returns the decompressor to CodecPool but is still using it.
LOG.warn("Deompressor obtained from CodecPool is already finished()");
}
decompressor.reset();
}
return decompressor;
}
return null;
}
示例8: decompress
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
/**
* Decompresses data from the given stream using the configured compression algorithm. It will
* throw an exception if the dest buffer does not have enough space to hold the decompressed data.
* @param dest the output bytes buffer
* @param destOffset start writing position of the output buffer
* @param bufferedBoundedStream a stream to read compressed data from, bounded to the exact amount
* of compressed data
* @param compressedSize compressed data size, header not included
* @param uncompressedSize uncompressed data size, header not included
* @param compressAlgo compression algorithm used
* @throws IOException
*/
public static void decompress(byte[] dest, int destOffset, InputStream bufferedBoundedStream,
int compressedSize, int uncompressedSize, Compression.Algorithm compressAlgo)
throws IOException {
if (dest.length - destOffset < uncompressedSize) {
throw new IllegalArgumentException("Output buffer does not have enough space to hold "
+ uncompressedSize + " decompressed bytes, available: " + (dest.length - destOffset));
}
Decompressor decompressor = null;
try {
decompressor = compressAlgo.getDecompressor();
InputStream is =
compressAlgo.createDecompressionStream(bufferedBoundedStream, decompressor, 0);
IOUtils.readFully(is, dest, destOffset, uncompressedSize);
is.close();
} finally {
if (decompressor != null) {
compressAlgo.returnDecompressor(decompressor);
}
}
}
示例9: createDecompressionStream
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
@Override
public synchronized InputStream createDecompressionStream(
InputStream downStream, Decompressor decompressor,
int downStreamBufferSize) throws IOException {
if (!isSupported()) {
throw new IOException(
"LZO codec class not specified. Did you forget to set property "
+ CONF_LZO_CLASS + "?");
}
InputStream bis1 = null;
if (downStreamBufferSize > 0) {
bis1 = new BufferedInputStream(downStream, downStreamBufferSize);
} else {
bis1 = downStream;
}
conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
CompressionInputStream cis =
codec.createInputStream(bis1, decompressor);
BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
return bis2;
}
示例10: copy
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
/**
*
* @param source
* @param dest
* @param codec
* @param compressor
* may be null
* @param decomp
* may be null
* @param mark
* @return
* @throws IOException
*/
public static final CompressionOutputStream copy(File source, File dest,
CompressionCodec codec, Compressor compressor, Decompressor decomp,
long mark) throws IOException {
FileInputStream fileInput = new FileInputStream(source);
CompressionInputStream in = (decomp == null) ? codec
.createInputStream(fileInput) : codec.createInputStream(
fileInput, decomp);
FileOutputStream fileOut = new FileOutputStream(dest);
CompressionOutputStream out = (compressor == null) ? codec
.createOutputStream(fileOut) : codec.createOutputStream(
fileOut, compressor);
try {
copy(in, out, mark);
return out;
} finally {
IOUtils.closeQuietly(in);
IOUtils.closeQuietly(fileInput);
}
}
示例11: openFile
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
private InputStream openFile(Path path) throws IOException {
CompressionCodec codec=new CompressionCodecFactory(miniCluster.getConfig()).getCodec(path);
FSDataInputStream fileIn=dfsCluster.getFileSystem().open(path);
// check if compressed
if (codec==null) { // uncompressed
return fileIn;
} else { // compressed
Decompressor decompressor = CodecPool.getDecompressor(codec);
this.openDecompressors.add(decompressor); // to be returned later using close
if (codec instanceof SplittableCompressionCodec) {
long end = dfsCluster.getFileSystem().getFileStatus(path).getLen();
final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
return cIn;
} else {
return codec.createInputStream(fileIn,decompressor);
}
}
}
示例12: openFile
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
public InputStream openFile(Path path) throws IOException {
CompressionCodec codec=compressionCodecs.getCodec(path);
FSDataInputStream fileIn=fs.open(path);
// check if compressed
if (codec==null) { // uncompressed
LOG.debug("Reading from an uncompressed file \""+path+"\"");
return fileIn;
} else { // compressed
Decompressor decompressor = CodecPool.getDecompressor(codec);
this.openDecompressors.add(decompressor); // to be returned later using close
if (codec instanceof SplittableCompressionCodec) {
LOG.debug("Reading from a compressed file \""+path+"\" with splittable compression codec");
long end = fs.getFileStatus(path).getLen();
return ((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
} else {
LOG.debug("Reading from a compressed file \""+path+"\" with non-splittable compression codec");
return codec.createInputStream(fileIn,decompressor);
}
}
}
示例13: openFile
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
private InputStream openFile(Path path) throws IOException {
CompressionCodec codec=new CompressionCodecFactory(conf).getCodec(path);
FSDataInputStream fileIn=dfsCluster.getFileSystem().open(path);
// check if compressed
if (codec==null) { // uncompressed
return fileIn;
} else { // compressed
Decompressor decompressor = CodecPool.getDecompressor(codec);
this.openDecompressors.add(decompressor); // to be returned later using close
if (codec instanceof SplittableCompressionCodec) {
long end = dfsCluster.getFileSystem().getFileStatus(path).getLen();
final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
return cIn;
} else {
return codec.createInputStream(fileIn,decompressor);
}
}
}
开发者ID:ZuInnoTe,项目名称:hadoopcryptoledger,代码行数:19,代码来源:SparkBitcoinBlockCounterSparkMasterIntegrationTest.java
示例14: getBufferedReader
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
public static BufferedReader getBufferedReader(File file, MapredContext context)
throws IOException {
URI fileuri = file.toURI();
Path path = new Path(fileuri);
Configuration conf = context.getJobConf();
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(path);
if (codec == null) {
return new BufferedReader(new FileReader(file));
} else {
Decompressor decompressor = CodecPool.getDecompressor(codec);
FileInputStream fis = new FileInputStream(file);
CompressionInputStream cis = codec.createInputStream(fis, decompressor);
BufferedReader br = new BufferedReaderExt(new InputStreamReader(cis), decompressor);
return br;
}
}
示例15: getDecompressor
import org.apache.hadoop.io.compress.Decompressor; //导入依赖的package包/类
public Decompressor getDecompressor() {
CompressionCodec codec = getCodec(conf);
if (codec != null) {
Decompressor decompressor = CodecPool.getDecompressor(codec);
if (decompressor != null) {
if (decompressor.finished()) {
// Somebody returns the decompressor to CodecPool but is still using
// it.
LOG
.warn("Deompressor obtained from CodecPool is already finished()");
// throw new AssertionError(
// "Decompressor obtained from CodecPool is already finished()");
}
decompressor.reset();
}
return decompressor;
}
return null;
}