当前位置: 首页>>代码示例>>Java>>正文


Java GzipCodec.createOutputStream方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.compress.GzipCodec.createOutputStream方法的典型用法代码示例。如果您正苦于以下问题:Java GzipCodec.createOutputStream方法的具体用法?Java GzipCodec.createOutputStream怎么用?Java GzipCodec.createOutputStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.compress.GzipCodec的用法示例。


在下文中一共展示了GzipCodec.createOutputStream方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGzCompressedInput

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Test
public void testGzCompressedInput() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter fastqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	fastqOut.write(twoFastq);
	fastqOut.close();

	// now try to read it
	split = new FileSplit(new Path(tempGz.toURI().toString()), 0, twoFastq.length(), null);
	FastqRecordReader reader = new FastqRecordReader(conf, split);

	boolean retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229.10880 HWI-ST168_161:1:1:1373:2042/1", key.toString());
	assertEquals("TTGGATGATAGGGATTATTTGACTCGAATATTGGAAATAGCTGTTTATATTTTTTAAAAATGGTCTGTAACTGGTGACAGGACGCTTCGAT", fragment.getSequence().toString());

	retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229.10883 HWI-ST168_161:1:1:1796:2044/1", key.toString());
	assertEquals("TGAGCAGATGTGCTAAAGCTGCTTCTCCCCTAGGATCATTTGTACCTACCAGACTCAGGGAAAGGGGTGAGAATTGGGCCGTGGGGCAAGG", fragment.getSequence().toString());
}
 
开发者ID:HadoopGenomics,项目名称:Hadoop-BAM,代码行数:24,代码来源:TestFastqInputFormat.java

示例2: testGzCompressedInput

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Test
public void testGzCompressedInput() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter qseqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	qseqOut.write(twoQseq);
	qseqOut.close();

	// now try to read it
	split = new FileSplit(new Path(tempGz.toURI().toString()), 0, twoQseq.length(), null);
	QseqRecordReader reader = new QseqRecordReader(conf, split);

	boolean retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229:10880:1:1:1373:2042:1", key.toString());
	assertEquals("TTGGATGATAGGGATTATTTGACTCGAATATTGGAAATAGCTGTTTATATTTTTTAAAAATGGTCTGTAACTGGTGACAGGACGCTTCGAT", fragment.getSequence().toString());

	retval = reader.next(key, fragment);
	assertTrue(retval);
	assertEquals("ERR020229:10883:1:1:1796:2044:2", key.toString());
	assertEquals("TGAGCAGATGTGCTAAAGCTGCTTCTCCCCTAGGATCATTTGTACCTACCAGACTCAGGGAAAGGGGTGAGAATTGGGCCGTGGGGCAAGG", fragment.getSequence().toString());
}
 
开发者ID:HadoopGenomics,项目名称:Hadoop-BAM,代码行数:24,代码来源:TestQseqInputFormat.java

示例3: EmoSplitInputStream

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
private EmoSplitInputStream(String table, String split)
        throws IOException {
    if (isEmptySplit(split)) {
        _rows = Iterators.emptyIterator();
    } else {
        // Get the DataStore and begin streaming the split's rows.
        CloseableDataStore dataStore = HadoopDataStoreManager.getInstance().getDataStore(_uri, _apiKey, _metricRegistry);
        _closer.register(dataStore);

        _rows = DataStoreStreaming.getSplit(dataStore, table, split, ReadConsistency.STRONG).iterator();
    }

    _buffer.clear();
    _buffer.limit(0);
    GzipCodec gzipCodec = new GzipCodec();
    gzipCodec.setConf(new Configuration());

    // Set up the pipes
    PipedOutputStream pipeRawToGzip = new PipedOutputStream();
    _gzipIn = new PipedInputStream(pipeRawToGzip, 10 * 1024 * 1024);
    _rawOut = gzipCodec.createOutputStream(pipeRawToGzip);
    _closer.register(_gzipIn);
    _closer.register(pipeRawToGzip);

    // Start the asynchronous buffering thread
    _bufferThread = new Thread(new Runnable() {
        @Override
        public void run() {
            streamAndCompressInput();
        }
    });
    _bufferThread.start();
}
 
开发者ID:bazaarvoice,项目名称:emodb,代码行数:34,代码来源:EmoFileSystem.java

示例4: testCompressedSplit

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Test(expected=RuntimeException.class)
public void testCompressedSplit() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter fastqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	fastqOut.write(twoFastq);
	fastqOut.close();

	// now try to read it starting from the middle
	split = new FileSplit(new Path(tempGz.toURI().toString()), 10, twoFastq.length(), null);
	FastqRecordReader reader = new FastqRecordReader(conf, split);
}
 
开发者ID:HadoopGenomics,项目名称:Hadoop-BAM,代码行数:14,代码来源:TestFastqInputFormat.java

示例5: testCompressedSplit

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Test(expected=RuntimeException.class)
public void testCompressedSplit() throws IOException
{
	// write gzip-compressed data
	GzipCodec codec = new GzipCodec();
	PrintWriter qseqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
	qseqOut.write(twoQseq);
	qseqOut.close();

	// now try to read it starting from the middle
	split = new FileSplit(new Path(tempGz.toURI().toString()), 10, twoQseq.length(), null);
	QseqRecordReader reader = new QseqRecordReader(conf, split);
}
 
开发者ID:HadoopGenomics,项目名称:Hadoop-BAM,代码行数:14,代码来源:TestQseqInputFormat.java

示例6: writeTestData

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Override
public void writeTestData(File file, int recordCounts, int columnCount,
        String colSeparator) throws IOException {

    // write random test data
    GzipCodec gzipCodec = new GzipCodec();
    CompressionOutputStream out = gzipCodec
            .createOutputStream(new FileOutputStream(file));
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
            out));

    try {

        for (int r = 0; r < recordCounts; r++) {
            // foreach row write n columns

            for (int c = 0; c < columnCount; c++) {

                if (c != 0) {
                    writer.append(colSeparator);
                }

                writer.append(String.valueOf(Math.random()));

            }
            writer.append("\n");

        }

    } finally {
        writer.close();
        out.close();
    }

}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:36,代码来源:TestAllLoader.java


注:本文中的org.apache.hadoop.io.compress.GzipCodec.createOutputStream方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。