本文整理匯總了Java中org.apache.hadoop.io.compress.GzipCodec.createOutputStream方法的典型用法代碼示例。如果您正苦於以下問題:Java GzipCodec.createOutputStream方法的具體用法?Java GzipCodec.createOutputStream怎麽用?Java GzipCodec.createOutputStream使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.io.compress.GzipCodec
的用法示例。
在下文中一共展示了GzipCodec.createOutputStream方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testGzCompressedInput
import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test
public void testGzCompressedInput() throws IOException
{
// write gzip-compressed data
GzipCodec codec = new GzipCodec();
PrintWriter fastqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
fastqOut.write(twoFastq);
fastqOut.close();
// now try to read it
split = new FileSplit(new Path(tempGz.toURI().toString()), 0, twoFastq.length(), null);
FastqRecordReader reader = new FastqRecordReader(conf, split);
boolean retval = reader.next(key, fragment);
assertTrue(retval);
assertEquals("ERR020229.10880 HWI-ST168_161:1:1:1373:2042/1", key.toString());
assertEquals("TTGGATGATAGGGATTATTTGACTCGAATATTGGAAATAGCTGTTTATATTTTTTAAAAATGGTCTGTAACTGGTGACAGGACGCTTCGAT", fragment.getSequence().toString());
retval = reader.next(key, fragment);
assertTrue(retval);
assertEquals("ERR020229.10883 HWI-ST168_161:1:1:1796:2044/1", key.toString());
assertEquals("TGAGCAGATGTGCTAAAGCTGCTTCTCCCCTAGGATCATTTGTACCTACCAGACTCAGGGAAAGGGGTGAGAATTGGGCCGTGGGGCAAGG", fragment.getSequence().toString());
}
示例2: testGzCompressedInput
import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test
public void testGzCompressedInput() throws IOException
{
// write gzip-compressed data
GzipCodec codec = new GzipCodec();
PrintWriter qseqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
qseqOut.write(twoQseq);
qseqOut.close();
// now try to read it
split = new FileSplit(new Path(tempGz.toURI().toString()), 0, twoQseq.length(), null);
QseqRecordReader reader = new QseqRecordReader(conf, split);
boolean retval = reader.next(key, fragment);
assertTrue(retval);
assertEquals("ERR020229:10880:1:1:1373:2042:1", key.toString());
assertEquals("TTGGATGATAGGGATTATTTGACTCGAATATTGGAAATAGCTGTTTATATTTTTTAAAAATGGTCTGTAACTGGTGACAGGACGCTTCGAT", fragment.getSequence().toString());
retval = reader.next(key, fragment);
assertTrue(retval);
assertEquals("ERR020229:10883:1:1:1796:2044:2", key.toString());
assertEquals("TGAGCAGATGTGCTAAAGCTGCTTCTCCCCTAGGATCATTTGTACCTACCAGACTCAGGGAAAGGGGTGAGAATTGGGCCGTGGGGCAAGG", fragment.getSequence().toString());
}
示例3: EmoSplitInputStream
import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
private EmoSplitInputStream(String table, String split)
throws IOException {
if (isEmptySplit(split)) {
_rows = Iterators.emptyIterator();
} else {
// Get the DataStore and begin streaming the split's rows.
CloseableDataStore dataStore = HadoopDataStoreManager.getInstance().getDataStore(_uri, _apiKey, _metricRegistry);
_closer.register(dataStore);
_rows = DataStoreStreaming.getSplit(dataStore, table, split, ReadConsistency.STRONG).iterator();
}
_buffer.clear();
_buffer.limit(0);
GzipCodec gzipCodec = new GzipCodec();
gzipCodec.setConf(new Configuration());
// Set up the pipes
PipedOutputStream pipeRawToGzip = new PipedOutputStream();
_gzipIn = new PipedInputStream(pipeRawToGzip, 10 * 1024 * 1024);
_rawOut = gzipCodec.createOutputStream(pipeRawToGzip);
_closer.register(_gzipIn);
_closer.register(pipeRawToGzip);
// Start the asynchronous buffering thread
_bufferThread = new Thread(new Runnable() {
@Override
public void run() {
streamAndCompressInput();
}
});
_bufferThread.start();
}
示例4: testCompressedSplit
import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test(expected=RuntimeException.class)
public void testCompressedSplit() throws IOException
{
// write gzip-compressed data
GzipCodec codec = new GzipCodec();
PrintWriter fastqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
fastqOut.write(twoFastq);
fastqOut.close();
// now try to read it starting from the middle
split = new FileSplit(new Path(tempGz.toURI().toString()), 10, twoFastq.length(), null);
FastqRecordReader reader = new FastqRecordReader(conf, split);
}
示例5: testCompressedSplit
import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Test(expected=RuntimeException.class)
public void testCompressedSplit() throws IOException
{
// write gzip-compressed data
GzipCodec codec = new GzipCodec();
PrintWriter qseqOut = new PrintWriter( new BufferedOutputStream( codec.createOutputStream( new FileOutputStream(tempGz) ) ) );
qseqOut.write(twoQseq);
qseqOut.close();
// now try to read it starting from the middle
split = new FileSplit(new Path(tempGz.toURI().toString()), 10, twoQseq.length(), null);
QseqRecordReader reader = new QseqRecordReader(conf, split);
}
示例6: writeTestData
import org.apache.hadoop.io.compress.GzipCodec; //導入方法依賴的package包/類
@Override
public void writeTestData(File file, int recordCounts, int columnCount,
String colSeparator) throws IOException {
// write random test data
GzipCodec gzipCodec = new GzipCodec();
CompressionOutputStream out = gzipCodec
.createOutputStream(new FileOutputStream(file));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
out));
try {
for (int r = 0; r < recordCounts; r++) {
// foreach row write n columns
for (int c = 0; c < columnCount; c++) {
if (c != 0) {
writer.append(colSeparator);
}
writer.append(String.valueOf(Math.random()));
}
writer.append("\n");
}
} finally {
writer.close();
out.close();
}
}