本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf方法的典型用法代码示例。如果您正苦于以下问题:Java DataBlockEncoding.valueOf方法的具体用法?Java DataBlockEncoding.valueOf怎么用?Java DataBlockEncoding.valueOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
的用法示例。
在下文中一共展示了DataBlockEncoding.valueOf方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createFromFileInfo
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
FileInfo fileInfo) throws IOException {
DataBlockEncoding encoding = DataBlockEncoding.NONE;
byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
if (dataBlockEncodingType != null) {
String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
try {
encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
} catch (IllegalArgumentException ex) {
throw new IOException("Invalid data block encoding type in file info: "
+ dataBlockEncodingStr, ex);
}
}
if (encoding == DataBlockEncoding.NONE) {
return NoOpDataBlockEncoder.INSTANCE;
}
return new HFileDataBlockEncoderImpl(encoding);
}
示例2: parseColumnFamilyOptions
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
private void parseColumnFamilyOptions(CommandLine cmd) {
String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
DataBlockEncoding.valueOf(dataBlockEncodingStr);
String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
Compression.Algorithm.valueOf(compressStr);
String bloomStr = cmd.getOptionValue(OPT_BLOOM);
bloomType = bloomStr == null ? BloomType.ROW :
BloomType.valueOf(bloomStr);
inMemoryCF = cmd.hasOption(OPT_INMEMORY);
if (cmd.hasOption(OPT_ENCRYPTION)) {
cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
}
}
示例3: createTable
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
private void createTable() throws Exception {
deleteTable();
LOG.info("Creating table");
Configuration conf = util.getConfiguration();
String encodingKey = String.format(ENCODING_KEY, this.getClass().getSimpleName());
DataBlockEncoding blockEncoding = DataBlockEncoding.valueOf(conf.get(encodingKey, "FAST_DIFF"));
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
for (byte[] cf : dataGen.getColumnFamilies()) {
HColumnDescriptor hcd = new HColumnDescriptor(cf);
hcd.setDataBlockEncoding(blockEncoding);
htd.addFamily(hcd);
}
int serverCount = util.getHBaseClusterInterface().getClusterStatus().getServersSize();
byte[][] splits = new RegionSplitter.HexStringSplit().split(serverCount * REGIONS_PER_SERVER);
util.getHBaseAdmin().createTable(htd, splits);
LOG.info("Created table");
}
示例4: parseColumnFamilyOptions
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
protected void parseColumnFamilyOptions(CommandLine cmd) {
String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
DataBlockEncoding.valueOf(dataBlockEncodingStr);
if (dataBlockEncodingAlgo == DataBlockEncoding.NONE && encodeInCacheOnly) {
throw new IllegalArgumentException("-" + OPT_ENCODE_IN_CACHE_ONLY + " " +
"does not make sense when data block encoding is not used");
}
String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
Compression.Algorithm.valueOf(compressStr);
String bloomStr = cmd.getOptionValue(OPT_BLOOM);
bloomType = bloomStr == null ? null :
StoreFile.BloomType.valueOf(bloomStr);
inMemoryCF = cmd.hasOption(OPT_INMEMORY);
}
示例5: parseColumnFamilyOptions
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
private void parseColumnFamilyOptions(CommandLine cmd) {
String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
DataBlockEncoding.valueOf(dataBlockEncodingStr);
String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
Compression.Algorithm.valueOf(compressStr);
String bloomStr = cmd.getOptionValue(OPT_BLOOM);
bloomType = bloomStr == null ? null :
BloomType.valueOf(bloomStr);
inMemoryCF = cmd.hasOption(OPT_INMEMORY);
if (cmd.hasOption(OPT_ENCRYPTION)) {
cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
}
}
示例6: main
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
* Command line interface:
* @param args
* @throws IOException if there is a bug while reading from disk
*/
public static void main(final String[] args) throws Exception {
Option encodingOption = new Option("e", "blockEncoding", true,
"Data block encoding; Default: FAST_DIFF");
encodingOption.setRequired(false);
options.addOption(encodingOption);
Option ratioOption = new Option("r", "selectionRatio", true,
"Ratio of selected rows using essential column family");
ratioOption.setRequired(false);
options.addOption(ratioOption);
Option widthOption = new Option("w", "valueWidth", true,
"Width of value for non-essential column family");
widthOption.setRequired(false);
options.addOption(widthOption);
CommandLineParser parser = new GnuParser();
CommandLine cmd = parser.parse(options, args);
if (args.length < 1) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("TestJoinedScanners", options, true);
}
if (cmd.hasOption("e")) {
blockEncoding = DataBlockEncoding.valueOf(cmd.getOptionValue("e"));
}
if (cmd.hasOption("r")) {
selectionRatio = Integer.parseInt(cmd.getOptionValue("r"));
}
if (cmd.hasOption("w")) {
valueWidth = Integer.parseInt(cmd.getOptionValue("w"));
}
// run the test
TestJoinedScanners test = new TestJoinedScanners();
test.testJoinedScanners();
}
示例7: getDataBlockEncoding
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
/**
* @return the data block encoding algorithm used in block cache and
* optionally on disk
*/
public DataBlockEncoding getDataBlockEncoding() {
String type = getValue(DATA_BLOCK_ENCODING);
if (type == null) {
type = DEFAULT_DATA_BLOCK_ENCODING;
}
return DataBlockEncoding.valueOf(type);
}
示例8: createFromFileInfo
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
public static HFileDataBlockEncoder createFromFileInfo(
FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
throws IOException {
boolean hasPreferredCacheEncoding = preferredEncodingInCache != null
&& preferredEncodingInCache != DataBlockEncoding.NONE;
byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
if (dataBlockEncodingType == null && !hasPreferredCacheEncoding) {
return NoOpDataBlockEncoder.INSTANCE;
}
DataBlockEncoding onDisk;
if (dataBlockEncodingType == null) {
onDisk = DataBlockEncoding.NONE;
}else {
String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
try {
onDisk = DataBlockEncoding.valueOf(dataBlockEncodingStr);
} catch (IllegalArgumentException ex) {
throw new IOException("Invalid data block encoding type in file info: "
+ dataBlockEncodingStr, ex);
}
}
DataBlockEncoding inCache;
if (onDisk == DataBlockEncoding.NONE) {
// This is an "in-cache-only" encoding or fully-unencoded scenario.
// Either way, we use the given encoding (possibly NONE) specified by
// the column family in cache.
inCache = preferredEncodingInCache;
} else {
// Leave blocks in cache encoded the same way as they are on disk.
// If we switch encoding type for the CF or the in-cache-only encoding
// flag, old files will keep their encoding both on disk and in cache,
// but new files will be generated with the new encoding.
inCache = onDisk;
}
return new HFileDataBlockEncoderImpl(onDisk, inCache);
}
示例9: createColumnFamily
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; //导入方法依赖的package包/类
public static HColumnDescriptor createColumnFamily(KylinConfig kylinConfig, String cfName, boolean isMemoryHungry) {
HColumnDescriptor cf = new HColumnDescriptor(cfName);
cf.setMaxVersions(1);
if (isMemoryHungry) {
cf.setBlocksize(kylinConfig.getHbaseDefaultBlockSize());
} else {
cf.setBlocksize(kylinConfig.getHbaseSmallFamilyBlockSize());
}
String hbaseDefaultCC = kylinConfig.getHbaseDefaultCompressionCodec().toLowerCase();
switch (hbaseDefaultCC) {
case "snappy": {
logger.info("hbase will use snappy to compress data");
cf.setCompressionType(Algorithm.SNAPPY);
break;
}
case "lzo": {
logger.info("hbase will use lzo to compress data");
cf.setCompressionType(Algorithm.LZO);
break;
}
case "gz":
case "gzip": {
logger.info("hbase will use gzip to compress data");
cf.setCompressionType(Algorithm.GZ);
break;
}
case "lz4": {
logger.info("hbase will use lz4 to compress data");
cf.setCompressionType(Algorithm.LZ4);
break;
}
case "none":
default: {
logger.info("hbase will not use any compression algorithm to compress data");
cf.setCompressionType(Algorithm.NONE);
}
}
try {
String encodingStr = kylinConfig.getHbaseDefaultEncoding();
DataBlockEncoding encoding = DataBlockEncoding.valueOf(encodingStr);
cf.setDataBlockEncoding(encoding);
} catch (Exception e) {
logger.info("hbase will not use any encoding", e);
cf.setDataBlockEncoding(DataBlockEncoding.NONE);
}
cf.setInMemory(false);
cf.setBloomFilterType(BloomType.NONE);
return cf;
}