当前位置: 首页>>代码示例>>Java>>正文


Java GzipCodec.setConf方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.compress.GzipCodec.setConf方法的典型用法代码示例。如果您正苦于以下问题:Java GzipCodec.setConf方法的具体用法?Java GzipCodec.setConf怎么用?Java GzipCodec.setConf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.compress.GzipCodec的用法示例。


在下文中一共展示了GzipCodec.setConf方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {

	conf = new SystemConfiguration();

	// Create LZO Codec
	org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
	GzipCodec gzipCodec = new GzipCodec();
	gzipCodec.setConf(hadoopConf);

	codec = gzipCodec;

	// Write out test file
	baseDir = new File(".", "target/testClientServerFailures/");
	baseDir.mkdirs();

	fileToStream = new File(baseDir, "test.txt");

	if (fileToStream.exists())
		fileToStream.delete();

	fileToStream.createNewFile();

	FileWriter writer = new FileWriter(fileToStream);
	BufferedWriter buffWriter = new BufferedWriter(writer);
	try {
		for (int i = 0; i < testLineCount; i++) {

			buffWriter.write(testString);
			buffWriter.write('\n');
		}
	} finally {
		buffWriter.close();
		writer.close();
	}
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:37,代码来源:TestFilesSendWorkerServerFailures.java

示例2: setUp

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {

	// Create LZO Codec
	Configuration conf = new Configuration();
	GzipCodec gzipCodec = new GzipCodec();
	gzipCodec.setConf(conf);

	codec = gzipCodec;

	// Write out test file
	baseDir = new File(".", "target/testSendClientFiles/");
	baseDir.mkdirs();

	fileToStream = new File(baseDir, "test.txt");

	if (fileToStream.exists())
		fileToStream.delete();

	fileToStream.createNewFile();

	FileWriter writer = new FileWriter(fileToStream);
	BufferedWriter buffWriter = new BufferedWriter(writer);
	try {
		for (int i = 0; i < testLineCount; i++) {

			buffWriter.write(testString);
			buffWriter.write('\n');
		}
	} finally {
		buffWriter.close();
		writer.close();
	}

	// wait for file to be created
	while (!fileToStream.exists())
		;
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:39,代码来源:TestFilesSendWorker.java

示例3: setUp

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {

	// Create LZO Codec
	Configuration conf = new Configuration();
	GzipCodec gzipCodec = new GzipCodec();
	gzipCodec.setConf(conf);

	codec = gzipCodec;

	// Write out test file
	baseDir = new File(".", "target/testSendClientFilesConflict/");
	baseDir.mkdirs();

	fileToStream = new File(baseDir, "test.txt");

	if (fileToStream.exists())
		fileToStream.delete();

	fileToStream.createNewFile();

	FileWriter writer = new FileWriter(fileToStream);
	BufferedWriter buffWriter = new BufferedWriter(writer);
	try {
		for (int i = 0; i < testLineCount; i++) {

			buffWriter.write(testString);
			buffWriter.write('\n');
		}
	} finally {
		buffWriter.close();
		writer.close();
	}

	// wait for file to be created
	while (!fileToStream.exists())
		;
}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:39,代码来源:TestFilesSendWorkerConflict.java

示例4: EmoSplitInputStream

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
private EmoSplitInputStream(String table, String split)
        throws IOException {
    if (isEmptySplit(split)) {
        _rows = Iterators.emptyIterator();
    } else {
        // Get the DataStore and begin streaming the split's rows.
        CloseableDataStore dataStore = HadoopDataStoreManager.getInstance().getDataStore(_uri, _apiKey, _metricRegistry);
        _closer.register(dataStore);

        _rows = DataStoreStreaming.getSplit(dataStore, table, split, ReadConsistency.STRONG).iterator();
    }

    _buffer.clear();
    _buffer.limit(0);
    GzipCodec gzipCodec = new GzipCodec();
    gzipCodec.setConf(new Configuration());

    // Set up the pipes
    PipedOutputStream pipeRawToGzip = new PipedOutputStream();
    _gzipIn = new PipedInputStream(pipeRawToGzip, 10 * 1024 * 1024);
    _rawOut = gzipCodec.createOutputStream(pipeRawToGzip);
    _closer.register(_gzipIn);
    _closer.register(pipeRawToGzip);

    // Start the asynchronous buffering thread
    _bufferThread = new Thread(new Runnable() {
        @Override
        public void run() {
            streamAndCompressInput();
        }
    });
    _bufferThread.start();
}
 
开发者ID:bazaarvoice,项目名称:emodb,代码行数:34,代码来源:EmoFileSystem.java

示例5: buildCodec

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
private GzipCodec buildCodec(Configuration conf) {
  GzipCodec gzcodec = new ReusableStreamGzipCodec();
  gzcodec.setConf(new Configuration(conf));
  return gzcodec;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:6,代码来源:Compression.java

示例6: setUp

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {

	bootstrap = new Bootstrap();
	bootstrap.loadProfiles(CommandLineProcessorFactory.PROFILE.DB,
			CommandLineProcessorFactory.PROFILE.AGENT);

	conf = new SystemConfiguration();

	// Create LZO Codec
	org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
	GzipCodec gzipCodec = new GzipCodec();
	gzipCodec.setConf(hadoopConf);

	codec = gzipCodec;

	// Write out test file
	baseDir = new File(".", "target/testSendClientFiles/");
	baseDir.mkdirs();

	fileToStream = new File(baseDir, "test.txt");

	if (fileToStream.exists())
		fileToStream.delete();

	fileToStream.createNewFile();

	FileWriter writer = new FileWriter(fileToStream);
	BufferedWriter buffWriter = new BufferedWriter(writer);
	try {
		for (int i = 0; i < testLineCount; i++) {

			buffWriter.write(testString);
			buffWriter.write('\n');
		}
	} finally {
		buffWriter.close();
		writer.close();
	}

}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:42,代码来源:TestSendClientFiles.java

示例7: setUp

import org.apache.hadoop.io.compress.GzipCodec; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {

	String arch = System.getProperty("os.arch");
	String libPath = null;
	if (arch.contains("i386")) {
		libPath = new File(".", "src/main/resources/native/Linux-i386-32")
				.getAbsolutePath();
	} else {
		libPath = new File(".", "src/main/resources/native/Linux-amd64-64")
				.getAbsolutePath();
	}

	System.setProperty("java.library.path", libPath);

	// Create LZO Codec
	Configuration conf = new Configuration();
	GzipCodec gzipCodec = new GzipCodec();
	gzipCodec.setConf(conf);

	codec = gzipCodec;

	// Write out test file
	baseDir = new File(".", "target/fileLineStreamerTest/");
	baseDir.mkdirs();

	fileToStream = new File(baseDir, "test.txt");

	if (fileToStream.exists())
		fileToStream.delete();

	fileToStream.createNewFile();

	FileWriter writer = new FileWriter(fileToStream);
	BufferedWriter buffWriter = new BufferedWriter(writer);
	try {
		for (int i = 0; i < testLineCount; i++) {

			buffWriter.write(testString);
			buffWriter.write('\n');
		}
	} finally {
		buffWriter.close();
		writer.close();
	}

}
 
开发者ID:gerritjvv,项目名称:bigstreams,代码行数:48,代码来源:TestFileLineStreamerImpl.java


注:本文中的org.apache.hadoop.io.compress.GzipCodec.setConf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。