本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.Compression.getCompressionAlgorithmByName方法的典型用法代码示例。如果您正苦于以下问题:Java Compression.getCompressionAlgorithmByName方法的具体用法?Java Compression.getCompressionAlgorithmByName怎么用?Java Compression.getCompressionAlgorithmByName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.Compression
的用法示例。
在下文中一共展示了Compression.getCompressionAlgorithmByName方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: colDescFromThrift
import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
* This utility method creates a new Hbase HColumnDescriptor object based on a
* Thrift ColumnDescriptor "struct".
*
* @param in
* Thrift ColumnDescriptor object
* @return HColumnDescriptor
* @throws IllegalArgument
*/
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
throws IllegalArgument {
Compression.Algorithm comp =
Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
StoreFile.BloomType bt =
BloomType.valueOf(in.bloomFilterType);
if (in.name == null || !in.name.hasRemaining()) {
throw new IllegalArgument("column name is empty");
}
byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
HColumnDescriptor col = new HColumnDescriptor(parsedName)
.setMaxVersions(in.maxVersions)
.setCompressionType(comp)
.setInMemory(in.inMemory)
.setBlockCacheEnabled(in.blockCacheEnabled)
.setTimeToLive(in.timeToLive)
.setBloomFilterType(bt);
return col;
}
示例2: testCompression
import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
public static boolean testCompression(String codec) {
codec = codec.toLowerCase();
Compression.Algorithm a;
try {
a = Compression.getCompressionAlgorithmByName(codec);
} catch (IllegalArgumentException e) {
LOG.warn("Codec type: " + codec + " is not known");
return false;
}
try {
testCompression(a);
return true;
} catch (IOException ignored) {
LOG.warn("Can't instantiate codec: " + codec, ignored);
return false;
}
}
示例3: colDescFromThrift
import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
* This utility method creates a new Hbase HColumnDescriptor object based on a
* Thrift ColumnDescriptor "struct".
*
* @param in
* Thrift ColumnDescriptor object
* @return HColumnDescriptor
* @throws IllegalArgument
*/
static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in)
throws IllegalArgument {
Compression.Algorithm comp =
Compression.getCompressionAlgorithmByName(in.compression.toLowerCase());
StoreFile.BloomType bt =
BloomType.valueOf(in.bloomFilterType);
if (in.name == null || !in.name.hasRemaining()) {
throw new IllegalArgument("column name is empty");
}
byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
HColumnDescriptor col = new HColumnDescriptor(parsedName,
in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled,
in.timeToLive, bt.toString());
return col;
}
示例4: getSupportedCompressionAlgorithms
import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
* Get supported compression algorithms.
* @return supported compression algorithms.
*/
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
for (String algoName : allAlgos) {
try {
Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
algo.getCompressor();
supportedAlgos.add(algo);
} catch (Throwable t) {
// this algo is not available
}
}
return supportedAlgos.toArray(new Compression.Algorithm[0]);
}
示例5: DataBlockEncodingTool
import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
* @param compressionAlgorithmName What kind of algorithm should be used
* as baseline for comparison (e.g. lzo, gz).
*/
public DataBlockEncodingTool(String compressionAlgorithmName) {
this.compressionAlgorithmName = compressionAlgorithmName;
this.compressionAlgorithm = Compression.getCompressionAlgorithmByName(
compressionAlgorithmName);
this.compressor = this.compressionAlgorithm.getCompressor();
this.decompressor = this.compressionAlgorithm.getDecompressor();
}
示例6: Writer
import org.apache.hadoop.hbase.io.hfile.Compression; //导入方法依赖的package包/类
/**
* Constructor that takes a Path.
* @param fs
* @param path
* @param blocksize
* @param compress
* @param comparator
* @throws IOException
* @throws IOException
*/
public Writer(FileSystem fs, Path path, short replication, int blocksize,
String compress, final IndexKeyComparator comparator)
throws IOException {
this(fs, path, replication, blocksize,
compress == null? DEFAULT_COMPRESSION_ALGORITHM:
Compression.getCompressionAlgorithmByName(compress),
comparator);
}