當前位置: 首頁>>代碼示例>>Java>>正文


Java GzipCodec.createOutputStream方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.compress.GzipCodec.createOutputStream方法的典型用法代碼示例。如果您正苦於以下問題:Java GzipCodec.createOutputStream方法的具體用法?Java GzipCodec.createOutputStream怎麽用?Java GzipCodec.createOutputStream使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.compress.GzipCodec的用法示例。


在下文中一共展示了GzipCodec.createOutputStream方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testGzCompressedInput

import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test
public void testGzCompressedInput() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter fastqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	fastqOut.write(twoFastq);
	fastqOut.close();

	// now try to read it
	split = new FileSplit(new Path(tempGz.toURI().toString()), 0, twoFastq.length(), null);
	FastqRecordReader reader = new FastqRecordReader(conf, split);

	boolean retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229.10880 HWI-ST168_161:1:1:1373:2042/1", key.toString());
	assertEquals("TTGGATGATAGGGATTATTTGACTCGAATATTGGAAATAGCTGTTTATATTTTTTAAAAATGGTCTGTAACTGGTGACAGGACGCTTCGAT", fragment.getSequence().toString());

	retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229.10883 HWI-ST168_161:1:1:1796:2044/1", key.toString());
	assertEquals("TGAGCAGATGTGCTAAAGCTGCTTCTCCCCTAGGATCATTTGTACCTACCAGACTCAGGGAAAGGGGTGAGAATTGGGCCGTGGGGCAAGG", fragment.getSequence().toString());
}
 
開發者ID:HadoopGenomics,項目名稱:Hadoop-BAM,代碼行數:24,代碼來源:TestFastqInputFormat.java

示例2: testGzCompressedInput

import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test
public void testGzCompressedInput() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter qseqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	qseqOut.write(twoQseq);
	qseqOut.close();

	// now try to read it
	split = new FileSplit(new Path(tempGz.toURI().toString()), 0, twoQseq.length(), null);
	QseqRecordReader reader = new QseqRecordReader(conf, split);

	boolean retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229:10880:1:1:1373:2042:1", key.toString());
	assertEquals("TTGGATGATAGGGATTATTTGACTCGAATATTGGAAATAGCTGTTTATATTTTTTAAAAATGGTCTGTAACTGGTGACAGGACGCTTCGAT", fragment.getSequence().toString());

	retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229:10883:1:1:1796:2044:2", key.toString());
	assertEquals("TGAGCAGATGTGCTAAAGCTGCTTCTCCCCTAGGATCATTTGTACCTACCAGACTCAGGGAAAGGGGTGAGAATTGGGCCGTGGGGCAAGG", fragment.getSequence().toString());
}
 
開發者ID:HadoopGenomics,項目名稱:Hadoop-BAM,代碼行數:24,代碼來源:TestQseqInputFormat.java

示例3: EmoSplitInputStream

import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
private EmoSplitInputStream(String table, String split)
        throws IOException {
    if (isEmptySplit(split)) {
        _rows = Iterators.emptyIterator();
    } else {
        // Get the DataStore and begin streaming the split's rows.
        CloseableDataStore dataStore = HadoopDataStoreManager.getInstance().getDataStore(_uri, _apiKey, _metricRegistry);
        _closer.register(dataStore);

        _rows = DataStoreStreaming.getSplit(dataStore, table, split, ReadConsistency.STRONG).iterator();
    }

    _buffer.clear();
    _buffer.limit(0);
    GzipCodec gzipCodec = new GzipCodec();
    gzipCodec.setConf(new Configuration());

    // Set up the pipes
    PipedOutputStream pipeRawToGzip = new PipedOutputStream();
    _gzipIn = new PipedInputStream(pipeRawToGzip, 10 * 1024 * 1024);
    _rawOut = gzipCodec.createOutputStream(pipeRawToGzip);
    _closer.register(_gzipIn);
    _closer.register(pipeRawToGzip);

    // Start the asynchronous buffering thread
    _bufferThread = new Thread(new Runnable() {
        @Override
        public void run() {
            streamAndCompressInput();
        }
    });
    _bufferThread.start();
}
 
開發者ID:bazaarvoice,項目名稱:emodb,代碼行數:34,代碼來源:EmoFileSystem.java

示例4: testCompressedSplit

import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test(expected=RuntimeException.class)
public void testCompressedSplit() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter fastqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	fastqOut.write(twoFastq);
	fastqOut.close();

	// now try to read it starting from the middle
	split = new FileSplit(new Path(tempGz.toURI().toString()), 10, twoFastq.length(), null);
	FastqRecordReader reader = new FastqRecordReader(conf, split);
}
 
開發者ID:HadoopGenomics,項目名稱:Hadoop-BAM,代碼行數:14,代碼來源:TestFastqInputFormat.java

示例5: testCompressedSplit

import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test(expected=RuntimeException.class)
public void testCompressedSplit() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter qseqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	qseqOut.write(twoQseq);
	qseqOut.close();

	// now try to read it starting from the middle
	split = new FileSplit(new Path(tempGz.toURI().toString()), 10, twoQseq.length(), null);
	QseqRecordReader reader = new QseqRecordReader(conf, split);
}
 
開發者ID:HadoopGenomics,項目名稱:Hadoop-BAM,代碼行數:14,代碼來源:TestQseqInputFormat.java

示例6: writeTestData

import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Override
public void writeTestData(File file, int recordCounts, int columnCount,
        String colSeparator) throws IOException {

    // write random test data
    GzipCodec gzipCodec = new GzipCodec();
    CompressionOutputStream out = gzipCodec
            .createOutputStream(new FileOutputStream(file));
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
            out));

    try {

        for (int r = 0; r < recordCounts; r++) {
            // foreach row write n columns

            for (int c = 0; c < columnCount; c++) {

                if (c != 0) {
                    writer.append(colSeparator);
                }

                writer.append(String.valueOf(Math.random()));

            }
            writer.append("\n");

        }

    } finally {
        writer.close();
        out.close();
    }

}
 
開發者ID:sigmoidanalytics,項目名稱:spork-streaming,代碼行數:36,代碼來源:TestAllLoader.java


注:本文中的org.apache.hadoop.io.compress.GzipCodec.createOutputStream方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。