本文整理汇总了Java中org.apache.hadoop.io.compress.zlib.ZlibFactory.isNativeZlibLoaded方法的典型用法代码示例。如果您正苦于以下问题:Java ZlibFactory.isNativeZlibLoaded方法的具体用法?Java ZlibFactory.isNativeZlibLoaded怎么用?Java ZlibFactory.isNativeZlibLoaded使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.compress.zlib.ZlibFactory
的用法示例。
在下文中一共展示了ZlibFactory.isNativeZlibLoaded方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: isAvailable
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
/**
* Method for compressor availability check
*/
private static <T extends Compressor, E extends Decompressor> boolean isAvailable(TesterPair<T, E> pair) {
Compressor compressor = pair.compressor;
if (compressor.getClass().isAssignableFrom(Lz4Compressor.class)
&& (NativeCodeLoader.isNativeCodeLoaded()))
return true;
else if (compressor.getClass().isAssignableFrom(BuiltInZlibDeflater.class)
&& NativeCodeLoader.isNativeCodeLoaded())
return true;
else if (compressor.getClass().isAssignableFrom(ZlibCompressor.class)) {
return ZlibFactory.isNativeZlibLoaded(new Configuration());
}
else if (compressor.getClass().isAssignableFrom(SnappyCompressor.class)
&& isNativeSnappyLoadable())
return true;
return false;
}
示例2: testCodecPoolGzipReuse
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testCodecPoolGzipReuse() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
return;
}
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
Compressor c1 = CodecPool.getCompressor(gzc);
Compressor c2 = CodecPool.getCompressor(dfc);
CodecPool.returnCompressor(c1);
CodecPool.returnCompressor(c2);
assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
示例3: testCodecInitWithCompressionLevel
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testCodecInitWithCompressionLevel() throws Exception {
Configuration conf = new Configuration();
if (ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.info("testCodecInitWithCompressionLevel with native");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.GzipCodec");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.DefaultCodec");
} else {
LOG.warn("testCodecInitWithCompressionLevel for native skipped"
+ ": native libs not loaded");
}
conf = new Configuration();
// don't use native libs
ZlibFactory.setNativeZlibLoaded(false);
codecTestWithNOCompression( conf,
"org.apache.hadoop.io.compress.DefaultCodec");
}
示例4: testCodecInitWithCompressionLevel
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testCodecInitWithCompressionLevel() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.info("testCodecInitWithCompressionLevel with native");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.GzipCodec");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.DefaultCodec");
} else {
LOG.warn("testCodecInitWithCompressionLevel for native skipped"
+ ": native libs not loaded");
}
conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
codecTestWithNOCompression( conf,
"org.apache.hadoop.io.compress.DefaultCodec");
}
示例5: createWriter
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
/**
* Construct the preferred type of 'raw' SequenceFile Writer.
* @param out The stream on top which the writer is to be constructed.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compress Compress data?
* @param blockCompress Compress blocks?
* @param metadata The metadata of the file.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
*/
private static Writer
createWriter(Configuration conf, FSDataOutputStream out,
Class keyClass, Class valClass, boolean compress, boolean blockCompress,
CompressionCodec codec, Metadata metadata)
throws IOException {
if (codec != null && (codec instanceof GzipCodec) &&
!NativeCodeLoader.isNativeCodeLoaded() &&
!ZlibFactory.isNativeZlibLoaded(conf)) {
throw new IllegalArgumentException("SequenceFile doesn't work with " +
"GzipCodec without native-hadoop code!");
}
Writer writer = null;
if (!compress) {
writer = new Writer(conf, out, keyClass, valClass, metadata);
} else if (compress && !blockCompress) {
writer = new RecordCompressWriter(conf, out, keyClass, valClass, codec, metadata);
} else {
writer = new BlockCompressWriter(conf, out, keyClass, valClass, codec, metadata);
}
return writer;
}
示例6: createWriter
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
/**
* Construct the preferred type of 'raw' SequenceFile Writer.
* @param conf The configuration.
* @param out The stream on top which the writer is to be constructed.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @param metadata The metadata of the file.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
*/
public static Writer
createWriter(Configuration conf, FSDataOutputStream out,
Class keyClass, Class valClass, CompressionType compressionType,
CompressionCodec codec, Metadata metadata)
throws IOException {
if ((codec instanceof GzipCodec) &&
!NativeCodeLoader.isNativeCodeLoaded() &&
!ZlibFactory.isNativeZlibLoaded(conf)) {
throw new IllegalArgumentException("SequenceFile doesn't work with " +
"GzipCodec without native-hadoop code!");
}
Writer writer = null;
if (compressionType == CompressionType.NONE) {
writer = new Writer(conf, out, keyClass, valClass, metadata);
} else if (compressionType == CompressionType.RECORD) {
writer = new RecordCompressWriter(conf, out, keyClass, valClass, codec, metadata);
} else if (compressionType == CompressionType.BLOCK){
writer = new BlockCompressWriter(conf, out, keyClass, valClass, codec, metadata);
}
return writer;
}
示例7: testCodecInitWithCompressionLevel
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
public void testCodecInitWithCompressionLevel() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("io.native.lib.available", true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.info("testCodecInitWithCompressionLevel with native");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.GzipCodec");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.DefaultCodec");
} else {
LOG.warn("testCodecInitWithCompressionLevel for native skipped"
+ ": native libs not loaded");
}
conf = new Configuration();
conf.setBoolean("io.native.lib.available", false);
codecTestWithNOCompression( conf,
"org.apache.hadoop.io.compress.DefaultCodec");
}
示例8: testCodecPoolCompressorReinit
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testCodecPoolCompressorReinit() throws Exception {
Configuration conf = new Configuration();
if (ZlibFactory.isNativeZlibLoaded(conf)) {
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
gzipReinitTest(conf, gzc);
} else {
LOG.warn("testCodecPoolCompressorReinit skipped: native libs not loaded");
}
// don't use native libs
ZlibFactory.setNativeZlibLoaded(false);
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
gzipReinitTest(conf, dfc);
}
示例9: testCodecPoolCompressorReinit
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testCodecPoolCompressorReinit() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
gzipReinitTest(conf, gzc);
} else {
LOG.warn("testCodecPoolCompressorReinit skipped: native libs not loaded");
}
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
gzipReinitTest(conf, dfc);
}
示例10: testNativeGzipConcat
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testNativeGzipConcat() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("skipped: native libs not loaded");
return;
}
GzipConcatTest(conf, GzipCodec.GzipZlibDecompressor.class);
}
示例11: createOutputStream
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
if (ZlibFactory.isNativeZlibLoaded(getConf())) {
return super.createOutputStream(out);
}
return new ReusableGzipOutputStream(out);
}
示例12: testCodecPoolGzipReuse
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
public void testCodecPoolGzipReuse() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("hadoop.native.lib", true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
return;
}
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
Compressor c1 = CodecPool.getCompressor(gzc);
Compressor c2 = CodecPool.getCompressor(dfc);
CodecPool.returnCompressor(c1);
CodecPool.returnCompressor(c2);
assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
示例13: setupOutput
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
protected void setupOutput(final Job job, final SampleDataForSplitPoints operation, final Store store) throws IOException {
job.setOutputFormatClass(SequenceFileOutputFormat.class);
SequenceFileOutputFormat.setOutputPath(job, new Path(operation.getOutputPath()));
if (null != operation.getCompressionCodec()) {
if (GzipCodec.class.isAssignableFrom(operation.getCompressionCodec()) && !NativeCodeLoader.isNativeCodeLoaded() && !ZlibFactory.isNativeZlibLoaded(job.getConfiguration())) {
LOGGER.warn("SequenceFile doesn't work with GzipCodec without native-hadoop code!");
} else {
SequenceFileOutputFormat.setCompressOutput(job, true);
SequenceFileOutputFormat.setOutputCompressorClass(job, operation.getCompressionCodec());
SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK);
}
}
}
示例14: testGzipCodecCompressionData
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testGzipCodecCompressionData() throws IOException {
if (storeType == StoreType.RCFILE) {
if( ZlibFactory.isNativeZlibLoaded(conf)) {
storageCompressionTest(storeType, GzipCodec.class);
}
} else {
storageCompressionTest(storeType, GzipCodec.class);
}
}
示例15: testGzipCodecCompressionData
import org.apache.hadoop.io.compress.zlib.ZlibFactory; //导入方法依赖的package包/类
@Test
public void testGzipCodecCompressionData() throws IOException {
if (storeType == StoreType.RCFILE) {
if( ZlibFactory.isNativeZlibLoaded(conf)) {
storageCompressionTest(storeType, GzipCodec.class);
}
} else if (storeType == StoreType.SEQUENCEFILE) {
if( ZlibFactory.isNativeZlibLoaded(conf)) {
storageCompressionTest(storeType, GzipCodec.class);
}
} else {
storageCompressionTest(storeType, GzipCodec.class);
}
}