本文整理汇总了Java中org.apache.hadoop.io.compress.CompressionInputStream.read方法的典型用法代码示例。如果您正苦于以下问题:Java CompressionInputStream.read方法的具体用法?Java CompressionInputStream.read怎么用?Java CompressionInputStream.read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.compress.CompressionInputStream
的用法示例。
在下文中一共展示了CompressionInputStream.read方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: copy
import org.apache.hadoop.io.compress.CompressionInputStream; //导入方法依赖的package包/类
/**
* Implmements the copy algorithm using a 4k buffer.
*
* @param in
* @param out
* @param mark
* @throws IOException
*/
private final static void copy(CompressionInputStream in,
CompressionOutputStream out, long mark) throws IOException {
int size = Math.min(4096, (int) mark);
byte[] buff = new byte[size];
int len = 0;
int diff = (int) mark;
long count = 0;
do {
len = in.read(buff, 0, Math.min(diff, size));
out.write(buff, 0, len);
count += len;
diff = (int) (mark - count);
} while (diff > 0);
}
示例2: testSnappyCompressionSimple
import org.apache.hadoop.io.compress.CompressionInputStream; //导入方法依赖的package包/类
@Test
public void testSnappyCompressionSimple() throws IOException
{
if (checkNativeSnappy()) {
return;
}
File snappyFile = new File(testMeta.getDir(), "snappyTestFile.snappy");
BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(snappyFile));
Configuration conf = new Configuration();
CompressionCodec codec = (CompressionCodec)ReflectionUtils.newInstance(SnappyCodec.class, conf);
FilterStreamCodec.SnappyFilterStream filterStream = new FilterStreamCodec.SnappyFilterStream(
codec.createOutputStream(os));
int ONE_MB = 1024 * 1024;
String testStr = "TestSnap-16bytes";
for (int i = 0; i < ONE_MB; i++) { // write 16 MBs
filterStream.write(testStr.getBytes());
}
filterStream.flush();
filterStream.close();
CompressionInputStream is = codec.createInputStream(new FileInputStream(snappyFile));
byte[] recovered = new byte[testStr.length()];
int bytesRead = is.read(recovered);
is.close();
assertEquals(testStr, new String(recovered));
}
示例3: createInputStream
import org.apache.hadoop.io.compress.CompressionInputStream; //导入方法依赖的package包/类
@Override
public SplitCompressionInputStream createInputStream(InputStream seekableIn, Decompressor decompressor, long start, long end, READ_MODE readMode) throws IOException {
if (!(seekableIn instanceof Seekable)) {
throw new IOException("seekableIn must be an instance of " +
Seekable.class.getName());
}
if (!BlockCompressedInputStream.isValidFile(new BufferedInputStream(seekableIn))) {
// data is regular gzip, not BGZF
((Seekable)seekableIn).seek(0);
final CompressionInputStream compressionInputStream = createInputStream(seekableIn,
decompressor);
return new SplitCompressionInputStream(compressionInputStream, start, end) {
@Override
public int read(byte[] b, int off, int len) throws IOException {
return compressionInputStream.read(b, off, len);
}
@Override
public void resetState() throws IOException {
compressionInputStream.resetState();
}
@Override
public int read() throws IOException {
return compressionInputStream.read();
}
};
}
BGZFSplitGuesser splitGuesser = new BGZFSplitGuesser(seekableIn);
long adjustedStart = splitGuesser.guessNextBGZFBlockStart(start, end);
((Seekable)seekableIn).seek(adjustedStart);
return new BGZFSplitCompressionInputStream(seekableIn, adjustedStart, end);
}
示例4: runCheck
import org.apache.hadoop.io.compress.CompressionInputStream; //导入方法依赖的package包/类
@Override
public void runCheck() throws Exception {
LOG.info("Checking CODEC ");
checkTrue(codec != null, "No Codec prodivded");
// test codec by writing a stream and reading it
File file = File.createTempFile("testCodec",
"." + codec.getDefaultExtension());
String testString = "This is a test string to test if the codec actually works by writing and reading the same string";
byte[] testBytes = testString.getBytes();
// Compress String
FileOutputStream fileOut = new FileOutputStream(file);
CompressionOutputStream out = codec.createOutputStream(fileOut);
try {
out.write(testString.getBytes());
out.finish();
} finally {
IOUtils.closeQuietly(out);
IOUtils.closeQuietly(fileOut);
}
// Un-Compress String
String returnString = null;
FileInputStream fileIn = new FileInputStream(file);
CompressionInputStream in = codec.createInputStream(fileIn);
try {
byte[] readInBytes = new byte[testBytes.length];
int bytesRead = in.read(readInBytes);
returnString = new String(readInBytes, 0, bytesRead);
}catch(IOException t){
checkTrue(false, "Failed to compress and decompress a simple string with the codec "
+ codec + " provided");
}finally {
IOUtils.closeQuietly(in);
IOUtils.closeQuietly(fileIn);
}
checkTrue(testString.equals(returnString),
"Failed to compress and decompress a simple string with the codec "
+ codec + " provided");
file.deleteOnExit();
LOG.info("DONE");
}
示例5: runCheck
import org.apache.hadoop.io.compress.CompressionInputStream; //导入方法依赖的package包/类
@Override
public void runCheck() throws Exception {
LOG.info("Checking CODEC ");
//test that compression is enabled
//if no compression is to be used this test will pass even if no codec is available
boolean compressionEnabled = configuration.getBoolean(CollectorProperties.WRITER.LOG_COMPRESS_OUTPUT.toString(),
(Boolean)CollectorProperties.WRITER.LOG_COMPRESS_OUTPUT.getDefaultValue());
if(compressionEnabled){
LOG.info("Compression enabled");
LOG.info("Using codec: " + codec);
checkTrue(codec != null, "No Codec prodivded");
// test codec by writing a stream and reading it
File file = File.createTempFile("testCodec",
"." + codec.getDefaultExtension());
String testString = "This is a test string to test if the codec actually works by writing and reading the same string";
byte[] testBytes = testString.getBytes();
// Compress String
FileOutputStream fileOut = new FileOutputStream(file);
CompressionOutputStream out = codec.createOutputStream(fileOut);
try {
out.write(testString.getBytes());
out.finish();
} finally {
IOUtils.closeQuietly(out);
IOUtils.closeQuietly(fileOut);
}
// Un-Compress String
String returnString = null;
FileInputStream fileIn = new FileInputStream(file);
CompressionInputStream in = codec.createInputStream(fileIn);
try {
byte[] readInBytes = new byte[testBytes.length];
int bytesRead = in.read(readInBytes);
returnString = new String(readInBytes, 0, bytesRead);
}catch(IOException t){
checkTrue(false, "Failed to compress and decompress a simple string with the codec "
+ codec + " provided");
}finally {
IOUtils.closeQuietly(in);
IOUtils.closeQuietly(fileIn);
}
checkTrue(testString.equals(returnString),
"Failed to compress and decompress a simple string with the codec "
+ codec + " provided");
file.deleteOnExit();
}else{
LOG.info("No compression is enabled");
}
LOG.info("DONE");
}
示例6: TestSnappyStream
import org.apache.hadoop.io.compress.CompressionInputStream; //导入方法依赖的package包/类
@Test
public void TestSnappyStream() throws IOException {
SnappyCodec codec = new SnappyCodec();
codec.setConf(new Configuration());
int blockSize = 1024;
int inputSize = blockSize * 1024;
byte[] input = new byte[inputSize];
for (int i = 0; i < inputSize; ++i) {
input[i] = (byte) i;
}
ByteArrayOutputStream compressedStream = new ByteArrayOutputStream();
CompressionOutputStream compressor = codec.createOutputStream(compressedStream);
int bytesCompressed = 0;
while (bytesCompressed < inputSize) {
int len = Math.min(inputSize - bytesCompressed, blockSize);
compressor.write(input, bytesCompressed, len);
bytesCompressed += len;
}
compressor.finish();
byte[] rawCompressed = Snappy.compress(input);
byte[] codecCompressed = compressedStream.toByteArray();
// Validate that the result from the codec is the same as if we compressed the
// buffer directly.
assertArrayEquals(rawCompressed, codecCompressed);
ByteArrayInputStream inputStream = new ByteArrayInputStream(codecCompressed);
CompressionInputStream decompressor = codec.createInputStream(inputStream);
byte[] codecDecompressed = new byte[inputSize];
int bytesDecompressed = 0;
int numBytes;
while ((numBytes = decompressor.read(codecDecompressed, bytesDecompressed, blockSize)) != 0) {
bytesDecompressed += numBytes;
if (bytesDecompressed == inputSize) break;
}
byte[] rawDecompressed = Snappy.uncompress(rawCompressed);
assertArrayEquals(input, rawDecompressed);
assertArrayEquals(input, codecDecompressed);
}
示例7: TestSnappyStream
import org.apache.hadoop.io.compress.CompressionInputStream; //导入方法依赖的package包/类
@Test
public void TestSnappyStream() throws IOException {
SnappyCodec codec = new SnappyCodec();
codec.setConf(new Configuration());
int blockSize = 1024;
int inputSize = blockSize * 1024;
byte[] input = new byte[inputSize];
for (int i = 0; i < inputSize; ++i) {
input[i] = (byte)i;
}
ByteArrayOutputStream compressedStream = new ByteArrayOutputStream();
CompressionOutputStream compressor = codec.createOutputStream(compressedStream);
int bytesCompressed = 0;
while (bytesCompressed < inputSize) {
int len = Math.min(inputSize - bytesCompressed, blockSize);
compressor.write(input, bytesCompressed, len);
bytesCompressed += len;
}
compressor.finish();
byte[] rawCompressed = Snappy.compress(input);
byte[] codecCompressed = compressedStream.toByteArray();
// Validate that the result from the codec is the same as if we compressed the
// buffer directly.
assertArrayEquals(rawCompressed, codecCompressed);
ByteArrayInputStream inputStream = new ByteArrayInputStream(codecCompressed);
CompressionInputStream decompressor = codec.createInputStream(inputStream);
byte[] codecDecompressed = new byte[inputSize];
int bytesDecompressed = 0;
int numBytes;
while ((numBytes = decompressor.read(codecDecompressed, bytesDecompressed, blockSize)) != 0) {
bytesDecompressed += numBytes;
if (bytesDecompressed == inputSize) break;
}
byte[] rawDecompressed = Snappy.uncompress(rawCompressed);
assertArrayEquals(input, rawDecompressed);
assertArrayEquals(input, codecDecompressed);
}