当前位置: 首页>>代码示例>>Java>>正文


Java CompressionOutputStream.write方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.compress.CompressionOutputStream.write方法的典型用法代码示例。如果您正苦于以下问题:Java CompressionOutputStream.write方法的具体用法?Java CompressionOutputStream.write怎么用?Java CompressionOutputStream.write使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.compress.CompressionOutputStream的用法示例。


在下文中一共展示了CompressionOutputStream.write方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copy

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
/**
 * Implmements the copy algorithm using a 4k buffer.
 * 
 * @param in
 * @param out
 * @param mark
 * @throws IOException
 */
private final static void copy(CompressionInputStream in,
		CompressionOutputStream out, long mark) throws IOException {
	int size = Math.min(4096, (int) mark);
	byte[] buff = new byte[size];
	int len = 0;

	int diff = (int) mark;
	long count = 0;

	do {
		len = in.read(buff, 0, Math.min(diff, size));
		out.write(buff, 0, len);

		count += len;
		diff = (int) (mark - count);

	} while (diff > 0);

}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:28,代码来源:CompressionRollBackHelper.java

示例2: toReport

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
public void toReport(BamQualityControlOptions options, FileSystem fs, Configuration conf, String sampleName) throws IOException {
	for(int i = 0; i < depths.length; i++) {
		Map<String, WrappedIntArray> sampleDepth = depths[i].laneDepth;
		for(String chrName : depths[i].laneDepth.keySet()) {
			StringBuffer cnvDepthFilePath = new StringBuffer();
			cnvDepthFilePath.append(options.getOutputPath());
			cnvDepthFilePath.append("/");
			cnvDepthFilePath.append("cnvDepth");
			cnvDepthFilePath.append("/");
			cnvDepthFilePath.append(sampleName);
			cnvDepthFilePath.append("-lane");
			cnvDepthFilePath.append(i);
			cnvDepthFilePath.append("/");
			cnvDepthFilePath.append(chrName);
			cnvDepthFilePath.append(".dep.gz");
			Path cnvDepthPath = new Path(cnvDepthFilePath.toString());
			FSDataOutputStream cnvDepthStream = fs.create(cnvDepthPath);
			CompressionCodecFactory codecFactory = new CompressionCodecFactory(conf);
	        CompressionCodec codec = codecFactory.getCodec(cnvDepthPath);
	        CompressionOutputStream compressedOutput = codec.createOutputStream(cnvDepthStream);
	        //ChrLaneDepth laneChrDepths = depths[i].laneDepth.get(chrName);
	        //Map<Integer, Integer> depthLanePos = laneChrDepths.depth;
	        int[] depth = sampleDepth.get(chrName).getArray();
	        StringBuilder sb = new StringBuilder();
	        for(int j = 0; j < depth.length; j += 2) {
	        	sb.append(chrName);
	        	sb.append("\t");
				sb.append(depth[j] + 1);
				sb.append("\t");
				sb.append(depth[j + 1]);
				sb.append("\n");
			}
	        compressedOutput.write(sb.toString().getBytes());
	        compressedOutput.close();
	        cnvDepthStream.close();
		}
	}
}
 
开发者ID:BGI-flexlab,项目名称:SOAPgaea,代码行数:39,代码来源:CNVDepthReport.java

示例3: send

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
/**
 * Write the protocol header and start bytes.<br/>
 * 4 bytes length of header codec class name.<br/>
 * string which is header codec class name.<br/>
 * 4 bytes length of header.<br/>
 * compressed json object representing the header.<br/>
 * 
 * @throws InterruptedException
 */
public void send(Header header, CompressionCodec codec, DataOutput dataOut)
		throws IOException, InterruptedException {

	CompressionPool pool = compressionPoolFactory.get(codec);

	ByteArrayOutputStream byteOut = new ByteArrayOutputStream(100);

	CompressionOutputStream compressionOut = pool.create(byteOut,
			waitForCompressionResource, TimeUnit.MILLISECONDS);

	try {
		compressionOut.write(header.toJsonString().getBytes());
	} finally {
		compressionOut.finish();
		pool.closeAndRelease(compressionOut);
	}

	byte[] headerBytes = byteOut.toByteArray();

	byte[] compressCodecNameBytes = codec.getClass().getName().getBytes();

	dataOut.writeInt(compressCodecNameBytes.length);
	dataOut.write(compressCodecNameBytes);

	dataOut.writeInt(headerBytes.length);
	dataOut.write(headerBytes);

}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:38,代码来源:ProtocolImpl.java

示例4: run

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
public void run() {

			try {
				CompressionOutputStream cout = pool.create(out, 1000L,
						TimeUnit.MILLISECONDS);
				try {
					cout.write("TestString".getBytes());
				} finally {
					pool.closeAndRelease(cout);
				}

				ByteArrayInputStream input = new ByteArrayInputStream(
						out.toByteArray());

				CompressionInputStream cin = pool.create(input, 1000L,
						TimeUnit.MILLISECONDS);
				try {

					Reader reader = new InputStreamReader(cin);
					char ch[] = new char[10];
					int len = 0;
					StringBuilder buff = new StringBuilder();

					while ((len = reader.read(ch)) > 0) {
						buff.append(ch, 0, len);
					}

					System.out.println(buff.toString());

				} finally {
					pool.closeAndRelease(cin);
				}
			} catch (Throwable t) {
				t.printStackTrace();
			}

			latch.countDown();
		}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:39,代码来源:TestCompressorPool.java

示例5: streamContent

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
/**
 * The outputStream will only be sent near to the amount of bytes as
 * specified by the bytesUpperLimit variable.<br/>
 * Its impossible to exactly write out the amount specifed in the
 * bytesUpperLimi but the method will try to respect it by stopping to read
 * lines once this limit has been passed.
 * 
 * @param fileLinePointer
 * @param input
 *            The input stream is expected to already be at the correct line
 *            number, and that the first byte will be that of the start of
 *            the line to read
 * @param output
 *            the stream to send the compressed data to
 * @return boolean true if lines were read, false if none were read because
 *         of EOF.
 * @throws InterruptedException
 */
public boolean streamContent(FileLinePointer fileLinePointer,
		BufferedReader reader, OutputStream output) throws IOException,
		InterruptedException {

	boolean readLines = false;

	// used to send compressed data
	CompressionOutputStream compressionOutput = pool.create(output,
			waitForCompressionResource, TimeUnit.MILLISECONDS);

	if (compressionOutput == null) {
		throw new IOException("No Compression Resource available for "
				+ codec.getClass().getName());
	}

	try {

		// used to read lines from the input stream correctly
		String line = null;
		int byteCount = 0;
		byte[] lineBytes = null;

		int lineCount = 0;
		// read while lines are available and the byteCount is smaller than
		// the
		// bytesUpperLimit
		while ((line = reader.readLine()) != null) {

			readLines = true;
			lineBytes = line.getBytes();
			compressionOutput.write(lineBytes);
			compressionOutput.write(NEW_LINE_BYTES);

			lineCount++;
			byteCount += lineBytes.length + NEW_LINE_BYTES.length;

			// do not put this in the while condition,
			// it will cause lines to be read and skipped
			if (byteCount >= bufferSize)
				break;

		}

		fileLinePointer.incFilePointer(byteCount);
		fileLinePointer.incLineReadPointer(lineCount);

	} finally {
		// cleanup always
		compressionOutput.finish();
		pool.closeAndRelease(compressionOutput);
	}

	return readLines;
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:73,代码来源:FileLineStreamerImpl.java

示例6: runCheck

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Override
public void runCheck() throws Exception {

	LOG.info("Checking CODEC ");

	checkTrue(codec != null, "No Codec prodivded");

	// test codec by writing a stream and reading it
	File file = File.createTempFile("testCodec",
			"." + codec.getDefaultExtension());

	String testString = "This is a test string to test if the codec actually works by writing and reading the same string";
	byte[] testBytes = testString.getBytes();

	// Compress String
	FileOutputStream fileOut = new FileOutputStream(file);
	CompressionOutputStream out = codec.createOutputStream(fileOut);
	try {
		out.write(testString.getBytes());
		out.finish();
	} finally {
		IOUtils.closeQuietly(out);
		IOUtils.closeQuietly(fileOut);
	}

	// Un-Compress String
	String returnString = null;

	FileInputStream fileIn = new FileInputStream(file);
	CompressionInputStream in = codec.createInputStream(fileIn);
	try {
		byte[] readInBytes = new byte[testBytes.length];
		int bytesRead = in.read(readInBytes);
		returnString = new String(readInBytes, 0, bytesRead);
	}catch(IOException t){
		checkTrue(false, "Failed to compress and decompress a simple string with the codec "
				+ codec + " provided");
	}finally {
		IOUtils.closeQuietly(in);
		IOUtils.closeQuietly(fileIn);
	}

	checkTrue(testString.equals(returnString),
			"Failed to compress and decompress a simple string with the codec "
					+ codec + " provided");

	file.deleteOnExit();

	LOG.info("DONE");
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:51,代码来源:CodecCheck.java

示例7: runCheck

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Override
public void runCheck() throws Exception {

	LOG.info("Checking CODEC ");

	//test that compression is enabled
	//if no compression is to be used this test will pass even if no codec is available
	boolean compressionEnabled = configuration.getBoolean(CollectorProperties.WRITER.LOG_COMPRESS_OUTPUT.toString(),
			(Boolean)CollectorProperties.WRITER.LOG_COMPRESS_OUTPUT.getDefaultValue());
	
	if(compressionEnabled){
	
		LOG.info("Compression enabled");
		LOG.info("Using codec: " + codec);
		
		checkTrue(codec != null, "No Codec prodivded");

		// test codec by writing a stream and reading it
		File file = File.createTempFile("testCodec",
				"." + codec.getDefaultExtension());

		String testString = "This is a test string to test if the codec actually works by writing and reading the same string";
		byte[] testBytes = testString.getBytes();

		// Compress String
		FileOutputStream fileOut = new FileOutputStream(file);
		CompressionOutputStream out = codec.createOutputStream(fileOut);
		try {
			out.write(testString.getBytes());
			out.finish();
		} finally {
			IOUtils.closeQuietly(out);
			IOUtils.closeQuietly(fileOut);
		}

		// Un-Compress String
		String returnString = null;

		FileInputStream fileIn = new FileInputStream(file);
		CompressionInputStream in = codec.createInputStream(fileIn);
		try {
			byte[] readInBytes = new byte[testBytes.length];
			int bytesRead = in.read(readInBytes);
			returnString = new String(readInBytes, 0, bytesRead);
		}catch(IOException t){
			checkTrue(false, "Failed to compress and decompress a simple string with the codec "
					+ codec + " provided");
		}finally {
			IOUtils.closeQuietly(in);
			IOUtils.closeQuietly(fileIn);
		}

		checkTrue(testString.equals(returnString),
				"Failed to compress and decompress a simple string with the codec "
						+ codec + " provided");

		file.deleteOnExit();
	}else{
		LOG.info("No compression is enabled");
	}
	
	LOG.info("DONE");
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:64,代码来源:CodecCheck.java

示例8: TestSnappyStream

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Test
public void TestSnappyStream() throws IOException {
    SnappyCodec codec = new SnappyCodec();
    codec.setConf(new Configuration());

    int blockSize = 1024;
    int inputSize = blockSize * 1024;

    byte[] input = new byte[inputSize];
    for (int i = 0; i < inputSize; ++i) {
        input[i] = (byte) i;
    }

    ByteArrayOutputStream compressedStream = new ByteArrayOutputStream();

    CompressionOutputStream compressor = codec.createOutputStream(compressedStream);
    int bytesCompressed = 0;
    while (bytesCompressed < inputSize) {
        int len = Math.min(inputSize - bytesCompressed, blockSize);
        compressor.write(input, bytesCompressed, len);
        bytesCompressed += len;
    }
    compressor.finish();

    byte[] rawCompressed = Snappy.compress(input);
    byte[] codecCompressed = compressedStream.toByteArray();

    // Validate that the result from the codec is the same as if we compressed the
    // buffer directly.
    assertArrayEquals(rawCompressed, codecCompressed);

    ByteArrayInputStream inputStream = new ByteArrayInputStream(codecCompressed);
    CompressionInputStream decompressor = codec.createInputStream(inputStream);
    byte[] codecDecompressed = new byte[inputSize];
    int bytesDecompressed = 0;
    int numBytes;
    while ((numBytes = decompressor.read(codecDecompressed, bytesDecompressed, blockSize)) != 0) {
        bytesDecompressed += numBytes;
        if (bytesDecompressed == inputSize) break;
    }

    byte[] rawDecompressed = Snappy.uncompress(rawCompressed);

    assertArrayEquals(input, rawDecompressed);
    assertArrayEquals(input, codecDecompressed);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:47,代码来源:TestSnappyCodec.java

示例9: TestSnappyStream

import org.apache.hadoop.io.compress.CompressionOutputStream; //导入方法依赖的package包/类
@Test
public void TestSnappyStream() throws IOException {
  SnappyCodec codec = new SnappyCodec();
  codec.setConf(new Configuration());
  
  int blockSize = 1024;
  int inputSize = blockSize * 1024;
 
  byte[] input = new byte[inputSize];
  for (int i = 0; i < inputSize; ++i) {
    input[i] = (byte)i;
  }

  ByteArrayOutputStream compressedStream = new ByteArrayOutputStream();
  
  CompressionOutputStream compressor = codec.createOutputStream(compressedStream);
  int bytesCompressed = 0;
  while (bytesCompressed < inputSize) {
    int len = Math.min(inputSize - bytesCompressed, blockSize);
    compressor.write(input, bytesCompressed, len);
    bytesCompressed += len;
  }
  compressor.finish();
  
  byte[] rawCompressed = Snappy.compress(input);
  byte[] codecCompressed = compressedStream.toByteArray();
  
  // Validate that the result from the codec is the same as if we compressed the 
  // buffer directly.
  assertArrayEquals(rawCompressed, codecCompressed);

  ByteArrayInputStream inputStream = new ByteArrayInputStream(codecCompressed);    
  CompressionInputStream decompressor = codec.createInputStream(inputStream);
  byte[] codecDecompressed = new byte[inputSize];
  int bytesDecompressed = 0;
  int numBytes;
  while ((numBytes = decompressor.read(codecDecompressed, bytesDecompressed, blockSize)) != 0) {
    bytesDecompressed += numBytes;
    if (bytesDecompressed == inputSize) break;
  }
  
  byte[] rawDecompressed = Snappy.uncompress(rawCompressed);
  
  assertArrayEquals(input, rawDecompressed);
  assertArrayEquals(input, codecDecompressed);
}
 
开发者ID:apache,项目名称:parquet-mr,代码行数:47,代码来源:TestSnappyCodec.java


注:本文中的org.apache.hadoop.io.compress.CompressionOutputStream.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。