当前位置: 首页>>代码示例>>Java>>正文


Java CompressionOutputStream.finish方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.compress.CompressionOutputStream.finish方法的典型用法代码示例。如果您正苦于以下问题:Java CompressionOutputStream.finish方法的具体用法?Java CompressionOutputStream.finish怎么用?Java CompressionOutputStream.finish使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.compress.CompressionOutputStream的用法示例。


在下文中一共展示了CompressionOutputStream.finish方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: closeAndRelease

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Override
public void closeAndRelease(CompressionOutputStream cout) {

	try {
		// finish quietly
		cout.finish();
	} catch (IOException ioexp) {
		LOG.error(ioexp.toString(), ioexp);
	}

	IOUtils.closeQuietly(cout);

	if (hasCompressors) {
		Compressor comp = usedCompressors.remove(cout);
		comp.reset();
		compressorQueue.offer(comp);
		status.setCounter(COMPRESSOR_STR,
				compressorsUsedCount.decrementAndGet());
	}

}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:22,代码来源:CompressionPoolImpl.java

示例2: compress

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
public BytesInput compress(BytesInput bytes) throws IOException {
    final BytesInput compressedBytes;
    if (codec == null) {
        compressedBytes = bytes;
    } else {
        compressedOutBuffer.reset();
        if (compressor != null) {
            // null compressor for non-native gzip
            compressor.reset();
        }
        CompressionOutputStream cos = codec.createOutputStream(compressedOutBuffer, compressor);
        bytes.writeAllTo(cos);
        cos.finish();
        cos.close();
        compressedBytes = BytesInput.from(compressedOutBuffer);
    }
    return compressedBytes;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:19,代码来源:CodecFactory.java

示例3: compress

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
public BytesInput compress(BytesInput bytes)
        throws IOException
{
    final BytesInput compressedBytes;
    if (codec == null) {
        compressedBytes = bytes;
    }
    else {
        compressedOutBuffer.reset();
        if (compressor != null) {
            compressor.reset();
        }
        CompressionOutputStream outputStream = codec.createOutputStream(compressedOutBuffer, compressor);
        bytes.writeAllTo(outputStream);
        outputStream.finish();
        outputStream.close();
        compressedBytes = BytesInput.from(compressedOutBuffer);
    }
    return compressedBytes;
}
 
开发者ID:y-lan,项目名称:presto,代码行数:21,代码来源:ParquetCodecFactory.java

示例4: compress

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Override
public BytesInput compress(BytesInput bytes) throws IOException {
  final BytesInput compressedBytes;
  if (codec == null) {
    compressedBytes = bytes;
  } else {
    compressedOutBuffer.reset();
    if (compressor != null) {
      // null compressor for non-native gzip
      compressor.reset();
    }
    CompressionOutputStream cos = codec.createOutputStream(compressedOutBuffer, compressor);
    bytes.writeAllTo(cos);
    cos.finish();
    cos.close();
    compressedBytes = BytesInput.from(compressedOutBuffer);
  }
  return compressedBytes;
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:20,代码来源:CodecFactory.java

示例5: flush

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Override
public void flush() throws IOException {
  CompressionOutputStream cout = (CompressionOutputStream) out;
  cout.finish();
  cout.flush();
  cout.resetState();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:Compression.java

示例6: send

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
/**
 * Write the protocol header and start bytes.<br/>
 * 4 bytes length of header codec class name.<br/>
 * string which is header codec class name.<br/>
 * 4 bytes length of header.<br/>
 * compressed json object representing the header.<br/>
 * 
 * @throws InterruptedException
 */
public void send(Header header, CompressionCodec codec, DataOutput dataOut)
		throws IOException, InterruptedException {

	CompressionPool pool = compressionPoolFactory.get(codec);

	ByteArrayOutputStream byteOut = new ByteArrayOutputStream(100);

	CompressionOutputStream compressionOut = pool.create(byteOut,
			waitForCompressionResource, TimeUnit.MILLISECONDS);

	try {
		compressionOut.write(header.toJsonString().getBytes());
	} finally {
		compressionOut.finish();
		pool.closeAndRelease(compressionOut);
	}

	byte[] headerBytes = byteOut.toByteArray();

	byte[] compressCodecNameBytes = codec.getClass().getName().getBytes();

	dataOut.writeInt(compressCodecNameBytes.length);
	dataOut.write(compressCodecNameBytes);

	dataOut.writeInt(headerBytes.length);
	dataOut.write(headerBytes);

}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:38,代码来源:ProtocolImpl.java

示例7: main

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
public static void main(String... args) throws Exception {
  String codecClassname = args[0];
  Class<?> codecClass = Class.forName(codecClassname);
  Configuration conf = new Configuration();
  CompressionCodec codec = (CompressionCodec)
    ReflectionUtils.newInstance(codecClass, conf);
  
  CompressionOutputStream out = codec.createOutputStream(System.out);
  IOUtils.copyBytes(System.in, out, 4096, false);
  out.finish();
}
 
开发者ID:xuzhikethinker,项目名称:t4f-data,代码行数:12,代码来源:StreamCompressor.java

示例8: streamContent

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
/**
 * The outputStream will only be sent near to the amount of bytes as
 * specified by the bytesUpperLimit variable.<br/>
 * Its impossible to exactly write out the amount specifed in the
 * bytesUpperLimi but the method will try to respect it by stopping to read
 * lines once this limit has been passed.
 * 
 * @param fileLinePointer
 * @param input
 *            The input stream is expected to already be at the correct line
 *            number, and that the first byte will be that of the start of
 *            the line to read
 * @param output
 *            the stream to send the compressed data to
 * @return boolean true if lines were read, false if none were read because
 *         of EOF.
 * @throws InterruptedException
 */
public boolean streamContent(FileLinePointer fileLinePointer,
		BufferedReader reader, OutputStream output) throws IOException,
		InterruptedException {

	boolean readLines = false;

	// used to send compressed data
	CompressionOutputStream compressionOutput = pool.create(output,
			waitForCompressionResource, TimeUnit.MILLISECONDS);

	if (compressionOutput == null) {
		throw new IOException("No Compression Resource available for "
				+ codec.getClass().getName());
	}

	try {

		// used to read lines from the input stream correctly
		String line = null;
		int byteCount = 0;
		byte[] lineBytes = null;

		int lineCount = 0;
		// read while lines are available and the byteCount is smaller than
		// the
		// bytesUpperLimit
		while ((line = reader.readLine()) != null) {

			readLines = true;
			lineBytes = line.getBytes();
			compressionOutput.write(lineBytes);
			compressionOutput.write(NEW_LINE_BYTES);

			lineCount++;
			byteCount += lineBytes.length + NEW_LINE_BYTES.length;

			// do not put this in the while condition,
			// it will cause lines to be read and skipped
			if (byteCount >= bufferSize)
				break;

		}

		fileLinePointer.incFilePointer(byteCount);
		fileLinePointer.incLineReadPointer(lineCount);

	} finally {
		// cleanup always
		compressionOutput.finish();
		pool.closeAndRelease(compressionOutput);
	}

	return readLines;
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:73,代码来源:FileLineStreamerImpl.java

示例9: runCheck

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Override
public void runCheck() throws Exception {

	LOG.info("Checking CODEC ");

	checkTrue(codec != null, "No Codec prodivded");

	// test codec by writing a stream and reading it
	File file = File.createTempFile("testCodec",
			"." + codec.getDefaultExtension());

	String testString = "This is a test string to test if the codec actually works by writing and reading the same string";
	byte[] testBytes = testString.getBytes();

	// Compress String
	FileOutputStream fileOut = new FileOutputStream(file);
	CompressionOutputStream out = codec.createOutputStream(fileOut);
	try {
		out.write(testString.getBytes());
		out.finish();
	} finally {
		IOUtils.closeQuietly(out);
		IOUtils.closeQuietly(fileOut);
	}

	// Un-Compress String
	String returnString = null;

	FileInputStream fileIn = new FileInputStream(file);
	CompressionInputStream in = codec.createInputStream(fileIn);
	try {
		byte[] readInBytes = new byte[testBytes.length];
		int bytesRead = in.read(readInBytes);
		returnString = new String(readInBytes, 0, bytesRead);
	}catch(IOException t){
		checkTrue(false, "Failed to compress and decompress a simple string with the codec "
				+ codec + " provided");
	}finally {
		IOUtils.closeQuietly(in);
		IOUtils.closeQuietly(fileIn);
	}

	checkTrue(testString.equals(returnString),
			"Failed to compress and decompress a simple string with the codec "
					+ codec + " provided");

	file.deleteOnExit();

	LOG.info("DONE");
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:51,代码来源:CodecCheck.java

示例10: runCheck

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Override
public void runCheck() throws Exception {

	LOG.info("Checking CODEC ");

	//test that compression is enabled
	//if no compression is to be used this test will pass even if no codec is available
	boolean compressionEnabled = configuration.getBoolean(CollectorProperties.WRITER.LOG_COMPRESS_OUTPUT.toString(),
			(Boolean)CollectorProperties.WRITER.LOG_COMPRESS_OUTPUT.getDefaultValue());
	
	if(compressionEnabled){
	
		LOG.info("Compression enabled");
		LOG.info("Using codec: " + codec);
		
		checkTrue(codec != null, "No Codec prodivded");

		// test codec by writing a stream and reading it
		File file = File.createTempFile("testCodec",
				"." + codec.getDefaultExtension());

		String testString = "This is a test string to test if the codec actually works by writing and reading the same string";
		byte[] testBytes = testString.getBytes();

		// Compress String
		FileOutputStream fileOut = new FileOutputStream(file);
		CompressionOutputStream out = codec.createOutputStream(fileOut);
		try {
			out.write(testString.getBytes());
			out.finish();
		} finally {
			IOUtils.closeQuietly(out);
			IOUtils.closeQuietly(fileOut);
		}

		// Un-Compress String
		String returnString = null;

		FileInputStream fileIn = new FileInputStream(file);
		CompressionInputStream in = codec.createInputStream(fileIn);
		try {
			byte[] readInBytes = new byte[testBytes.length];
			int bytesRead = in.read(readInBytes);
			returnString = new String(readInBytes, 0, bytesRead);
		}catch(IOException t){
			checkTrue(false, "Failed to compress and decompress a simple string with the codec "
					+ codec + " provided");
		}finally {
			IOUtils.closeQuietly(in);
			IOUtils.closeQuietly(fileIn);
		}

		checkTrue(testString.equals(returnString),
				"Failed to compress and decompress a simple string with the codec "
						+ codec + " provided");

		file.deleteOnExit();
	}else{
		LOG.info("No compression is enabled");
	}
	
	LOG.info("DONE");
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:64,代码来源:CodecCheck.java

示例11: codecTest

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
private static void codecTest(Configuration conf, int seed, int count, 
                              String codecClass) 
  throws IOException {
  
  // Create the codec
  CompressionCodec codec = null;
  try {
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  LOG.info("Created a Codec object of type: " + codecClass);

  // Generate data
  DataOutputBuffer data = new DataOutputBuffer();
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  for(int i=0; i < count; ++i) {
    generator.next();
    RandomDatum key = generator.getKey();
    RandomDatum value = generator.getValue();
    
    key.write(data);
    value.write(data);
  }
  DataInputBuffer originalData = new DataInputBuffer();
  DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
  originalData.reset(data.getData(), 0, data.getLength());
  
  LOG.info("Generated " + count + " records");
  
  // Compress data
  DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
  CompressionOutputStream deflateFilter = 
    codec.createOutputStream(compressedDataBuffer);
  DataOutputStream deflateOut = 
    new DataOutputStream(new BufferedOutputStream(deflateFilter));
  deflateOut.write(data.getData(), 0, data.getLength());
  deflateOut.flush();
  deflateFilter.finish();
  LOG.info("Finished compressing data");
  
  // De-compress data
  DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
  deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, 
                               compressedDataBuffer.getLength());
  CompressionInputStream inflateFilter = 
    codec.createInputStream(deCompressedDataBuffer);
  DataInputStream inflateIn = 
    new DataInputStream(new BufferedInputStream(inflateFilter));

  // Check
  for(int i=0; i < count; ++i) {
    RandomDatum k1 = new RandomDatum();
    RandomDatum v1 = new RandomDatum();
    k1.readFields(originalIn);
    v1.readFields(originalIn);
    
    RandomDatum k2 = new RandomDatum();
    RandomDatum v2 = new RandomDatum();
    k2.readFields(inflateIn);
    v2.readFields(inflateIn);
  }
  LOG.info("SUCCESS! Completed checking " + count + " records");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:66,代码来源:TestCodec.java

示例12: testCompressionDecompression

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@org.junit.Test
public void testCompressionDecompression() throws Exception {
    System.out.println("Compression/Decompression");

    XZCodec codec = new XZCodec();

    // Generate data
    DataOutputBuffer data = new DataOutputBuffer();
    RandomDatum.Generator generator = new RandomDatum.Generator(seed);
    for (int i = 0; i < count; ++i) {
        generator.next();
        RandomDatum key = generator.getKey();
        RandomDatum value = generator.getValue();
        key.write(data);
        value.write(data);
    }

    // Compress data
    DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
    CompressionOutputStream deflateFilter
            = codec.createOutputStream(compressedDataBuffer);
    DataOutputStream deflateOut
            = new DataOutputStream(new BufferedOutputStream(deflateFilter));
    deflateOut.write(data.getData(), 0, data.getLength());
    deflateOut.flush();
    deflateFilter.finish();

    // De-compress data
    DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
    deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
            compressedDataBuffer.getLength());
    CompressionInputStream inflateFilter
            = codec.createInputStream(deCompressedDataBuffer);
    DataInputStream inflateIn
            = new DataInputStream(new BufferedInputStream(inflateFilter));

    // Check
    DataInputBuffer originalData = new DataInputBuffer();
    originalData.reset(data.getData(), 0, data.getLength());
    DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
    for (int i = 0; i < count; ++i) {
        RandomDatum k1 = new RandomDatum();
        RandomDatum v1 = new RandomDatum();
        k1.readFields(originalIn);
        v1.readFields(originalIn);
        RandomDatum k2 = new RandomDatum();
        RandomDatum v2 = new RandomDatum();
        k2.readFields(inflateIn);
        v2.readFields(inflateIn);
        assertTrue("original and compressed-then-decompressed-output not equal",
                k1.equals(k2) && v1.equals(v2));

        // original and compressed-then-decompressed-output have the same hashCode
        Map<RandomDatum, String> m = new HashMap<>();
        m.put(k1, k1.toString());
        m.put(v1, v1.toString());
        String result = m.get(k2);
        assertEquals("k1 and k2 hashcode not equal", result, k1.toString());
        result = m.get(v2);
        assertEquals("v1 and v2 hashcode not equal", result, v1.toString());
    }

    // De-compress data byte-at-a-time
    originalData.reset(data.getData(), 0, data.getLength());
    deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
            compressedDataBuffer.getLength());
    inflateFilter
            = codec.createInputStream(deCompressedDataBuffer);

    // Check
    originalIn = new DataInputStream(new BufferedInputStream(originalData));
    int expected;
    do {
        expected = originalIn.read();
        assertEquals("Inflated stream read by byte does not match",
                expected, inflateFilter.read());
    } while (expected != -1);
}
 
开发者ID:yongtang,项目名称:hadoop-xz,代码行数:79,代码来源:XZCodecTest.java

示例13: TestSnappyStream

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Test
public void TestSnappyStream() throws IOException {
    SnappyCodec codec = new SnappyCodec();
    codec.setConf(new Configuration());

    int blockSize = 1024;
    int inputSize = blockSize * 1024;

    byte[] input = new byte[inputSize];
    for (int i = 0; i < inputSize; ++i) {
        input[i] = (byte) i;
    }

    ByteArrayOutputStream compressedStream = new ByteArrayOutputStream();

    CompressionOutputStream compressor = codec.createOutputStream(compressedStream);
    int bytesCompressed = 0;
    while (bytesCompressed < inputSize) {
        int len = Math.min(inputSize - bytesCompressed, blockSize);
        compressor.write(input, bytesCompressed, len);
        bytesCompressed += len;
    }
    compressor.finish();

    byte[] rawCompressed = Snappy.compress(input);
    byte[] codecCompressed = compressedStream.toByteArray();

    // Validate that the result from the codec is the same as if we compressed the
    // buffer directly.
    assertArrayEquals(rawCompressed, codecCompressed);

    ByteArrayInputStream inputStream = new ByteArrayInputStream(codecCompressed);
    CompressionInputStream decompressor = codec.createInputStream(inputStream);
    byte[] codecDecompressed = new byte[inputSize];
    int bytesDecompressed = 0;
    int numBytes;
    while ((numBytes = decompressor.read(codecDecompressed, bytesDecompressed, blockSize)) != 0) {
        bytesDecompressed += numBytes;
        if (bytesDecompressed == inputSize) break;
    }

    byte[] rawDecompressed = Snappy.uncompress(rawCompressed);

    assertArrayEquals(input, rawDecompressed);
    assertArrayEquals(input, codecDecompressed);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:47,代码来源:TestSnappyCodec.java

示例14: TestSnappyStream

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Test
public void TestSnappyStream() throws IOException {
  SnappyCodec codec = new SnappyCodec();
  codec.setConf(new Configuration());
  
  int blockSize = 1024;
  int inputSize = blockSize * 1024;
 
  byte[] input = new byte[inputSize];
  for (int i = 0; i < inputSize; ++i) {
    input[i] = (byte)i;
  }

  ByteArrayOutputStream compressedStream = new ByteArrayOutputStream();
  
  CompressionOutputStream compressor = codec.createOutputStream(compressedStream);
  int bytesCompressed = 0;
  while (bytesCompressed < inputSize) {
    int len = Math.min(inputSize - bytesCompressed, blockSize);
    compressor.write(input, bytesCompressed, len);
    bytesCompressed += len;
  }
  compressor.finish();
  
  byte[] rawCompressed = Snappy.compress(input);
  byte[] codecCompressed = compressedStream.toByteArray();
  
  // Validate that the result from the codec is the same as if we compressed the 
  // buffer directly.
  assertArrayEquals(rawCompressed, codecCompressed);

  ByteArrayInputStream inputStream = new ByteArrayInputStream(codecCompressed);    
  CompressionInputStream decompressor = codec.createInputStream(inputStream);
  byte[] codecDecompressed = new byte[inputSize];
  int bytesDecompressed = 0;
  int numBytes;
  while ((numBytes = decompressor.read(codecDecompressed, bytesDecompressed, blockSize)) != 0) {
    bytesDecompressed += numBytes;
    if (bytesDecompressed == inputSize) break;
  }
  
  byte[] rawDecompressed = Snappy.uncompress(rawCompressed);
  
  assertArrayEquals(input, rawDecompressed);
  assertArrayEquals(input, codecDecompressed);
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:47,代码来源:TestSnappyCodec.java


注:本文中的org.apache.hadoop.io.compress.CompressionOutputStream.finish方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。