本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl类的典型用法代码示例。如果您正苦于以下问题:Java HFileDataBlockEncoderImpl类的具体用法?Java HFileDataBlockEncoderImpl怎么用?Java HFileDataBlockEncoderImpl使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HFileDataBlockEncoderImpl类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了HFileDataBlockEncoderImpl类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: majorCompactionWithDataBlockEncoding
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
throws Exception {
Map<Store, HFileDataBlockEncoder> replaceBlockCache =
new HashMap<Store, HFileDataBlockEncoder>();
for (Store store : r.getStores()) {
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(store, blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
inCache;
((HStore)store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
}
majorCompaction();
// restore settings
for (Entry<Store, HFileDataBlockEncoder> entry : replaceBlockCache.entrySet()) {
((HStore)entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
}
}
示例2: majorCompactionWithDataBlockEncoding
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly) throws Exception {
Map<Store, HFileDataBlockEncoder> replaceBlockCache =
new HashMap<Store, HFileDataBlockEncoder>();
for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
Store store = pair.getValue();
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(pair.getValue(), blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE : inCache;
store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk, inCache));
}
majorCompaction();
// restore settings
for (Entry<Store, HFileDataBlockEncoder> entry : replaceBlockCache.entrySet()) {
entry.getKey().setDataBlockEncoderInTest(entry.getValue());
}
}
示例3: main
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Command line interface:
* @param args Takes one argument - file size.
* @throws IOException if there is a bug while reading from disk
*/
public static void main(final String[] args) throws IOException {
if (args.length < 1) {
printUsage();
System.exit(-1);
}
Path path = new Path(args[0]);
List<HFileDataBlockEncoder> encoders =
new ArrayList<HFileDataBlockEncoder>();
encoders.add(new HFileDataBlockEncoderImpl(DataBlockEncoding.NONE));
for (DataBlockEncoding encodingAlgo : DataBlockEncoding.values()) {
encoders.add(new HFileDataBlockEncoderImpl(DataBlockEncoding.NONE,
encodingAlgo));
}
EncodedSeekPerformanceTest utility = new EncodedSeekPerformanceTest();
utility.runTests(path, encoders);
System.exit(0);
}
示例4: majorCompactionWithDataBlockEncoding
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
throws Exception {
Map<HStore, HFileDataBlockEncoder> replaceBlockCache =
new HashMap<HStore, HFileDataBlockEncoder>();
for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
HStore store = (HStore) pair.getValue();
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(store, blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
inCache;
store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
}
majorCompaction();
// restore settings
for (Entry<HStore, HFileDataBlockEncoder> entry :
replaceBlockCache.entrySet()) {
entry.getKey().setDataBlockEncoderInTest(entry.getValue());
}
}
示例5: majorCompactionWithDataBlockEncoding
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
throws Exception {
Map<Store, HFileDataBlockEncoder> replaceBlockCache =
new HashMap<Store, HFileDataBlockEncoder>();
for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
Store store = pair.getValue();
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(pair.getValue(), blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
inCache;
store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
onDisk, inCache));
}
majorCompaction();
// restore settings
for (Entry<Store, HFileDataBlockEncoder> entry :
replaceBlockCache.entrySet()) {
entry.getKey().setDataBlockEncoderInTest(entry.getValue());
}
}
示例6: majorCompactionWithDataBlockEncoding
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
throws Exception {
Map<HStore, HFileDataBlockEncoder> replaceBlockCache = new HashMap<>();
for (HStore store : r.getStores()) {
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(store, blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
inCache;
((HStore)store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
}
majorCompaction();
// restore settings
for (Entry<HStore, HFileDataBlockEncoder> entry : replaceBlockCache.entrySet()) {
((HStore)entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
}
}
示例7: prepValueForStoreFileWriter
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
private void prepValueForStoreFileWriter(Configuration conf,
String columnFamily, HTableDescriptor largeTD,
String outputPath) throws IOException {
familyDescriptor = largeTD.getFamily(Bytes
.toBytes(columnFamily));
blocksize = familyDescriptor.getBlocksize();
fs = FileSystem.get(conf);
this.columnFamily =columnFamily;
cacheConf = new CacheConfig(conf);
compression = familyDescriptor.getCompression();
bloomFilterType = familyDescriptor.getBloomFilterType();
dataBlockEncoder = new HFileDataBlockEncoderImpl(
familyDescriptor.getDataBlockEncodingOnDisk(),
familyDescriptor.getDataBlockEncoding());
this.outputPath = outputPath;
}
示例8: majorCompactionWithDataBlockEncoding
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
throws Exception {
Map<HStore, HFileDataBlockEncoder> replaceBlockCache =
new HashMap<HStore, HFileDataBlockEncoder>();
for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
HStore store = (HStore) pair.getValue();
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(store, blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
inCache;
store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
onDisk, inCache));
}
majorCompaction();
// restore settings
for (Entry<HStore, HFileDataBlockEncoder> entry :
replaceBlockCache.entrySet()) {
entry.getKey().setDataBlockEncoderInTest(entry.getValue());
}
}
示例9: main
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Command line interface:
* @param args Takes one argument - file size.
* @throws IOException if there is a bug while reading from disk
*/
public static void main(final String[] args) throws IOException {
if (args.length < 1) {
printUsage();
System.exit(-1);
}
Path path = new Path(args[0]);
List<HFileDataBlockEncoder> encoders =
new ArrayList<HFileDataBlockEncoder>();
for (DataBlockEncoding encodingAlgo : DataBlockEncoding.values()) {
encoders.add(new HFileDataBlockEncoderImpl(DataBlockEncoding.NONE,
encodingAlgo));
}
EncodedSeekPerformanceTest utility = new EncodedSeekPerformanceTest();
utility.runTests(path, encoders);
System.exit(0);
}
示例10: testDataBlockEncodingMetaData
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Check if data block encoding information is saved correctly in HFile's
* file info.
*/
public void testDataBlockEncodingMetaData() throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
Path path = new Path(dir, "1234567890");
DataBlockEncoding dataBlockEncoderAlgo =
DataBlockEncoding.FAST_DIFF;
HFileDataBlockEncoder dataBlockEncoder =
new HFileDataBlockEncoderImpl(
dataBlockEncoderAlgo);
cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
.withChecksumType(CKTYPE)
.withBytesPerCheckSum(CKBYTES)
.withDataBlockEncoding(dataBlockEncoderAlgo)
.build();
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
.withFilePath(path)
.withMaxKeyCount(2000)
.withFileContext(meta)
.build();
writer.close();
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
cacheConf, BloomType.NONE);
StoreFile.Reader reader = storeFile.createReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
示例11: testDataBlockEncodingMetaData
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Check if data block encoding information is saved correctly in HFile's
* file info.
*/
public void testDataBlockEncodingMetaData() throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
Path path = new Path(dir, "1234567890");
DataBlockEncoding dataBlockEncoderAlgo =
DataBlockEncoding.FAST_DIFF;
HFileDataBlockEncoder dataBlockEncoder =
new HFileDataBlockEncoderImpl(
dataBlockEncoderAlgo,
dataBlockEncoderAlgo);
cacheConf = new CacheConfig(conf);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
HFile.DEFAULT_BLOCKSIZE)
.withFilePath(path)
.withDataBlockEncoder(dataBlockEncoder)
.withMaxKeyCount(2000)
.withChecksumType(CKTYPE)
.withBytesPerChecksum(CKBYTES)
.build();
writer.close();
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
cacheConf, BloomType.NONE, dataBlockEncoder);
StoreFile.Reader reader = storeFile.createReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
示例12: testDataBlockEncodingMetaData
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Check if data block encoding information is saved correctly in HFile's
* file info.
*/
@Test
public void testDataBlockEncodingMetaData() throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path dir = new Path(new Path(testDir, "7e0102"), "familyname");
Path path = new Path(dir, "1234567890");
DataBlockEncoding dataBlockEncoderAlgo =
DataBlockEncoding.FAST_DIFF;
HFileDataBlockEncoder dataBlockEncoder =
new HFileDataBlockEncoderImpl(
dataBlockEncoderAlgo);
cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
.withChecksumType(CKTYPE)
.withBytesPerCheckSum(CKBYTES)
.withDataBlockEncoding(dataBlockEncoderAlgo)
.build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
.withFilePath(path)
.withMaxKeyCount(2000)
.withFileContext(meta)
.build();
writer.close();
HStoreFile storeFile =
new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
storeFile.initReader();
StoreFileReader reader = storeFile.getReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
示例13: testDataBlockEncodingMetaData
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Check if data block encoding information is saved correctly in HFile's
* file info.
*/
public void testDataBlockEncodingMetaData() throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
Path path = new Path(dir, "1234567890");
DataBlockEncoding dataBlockEncoderAlgo =
DataBlockEncoding.FAST_DIFF;
HFileDataBlockEncoder dataBlockEncoder =
new HFileDataBlockEncoderImpl(
dataBlockEncoderAlgo);
cacheConf = new CacheConfig(conf);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
HConstants.DEFAULT_BLOCKSIZE)
.withFilePath(path)
.withDataBlockEncoder(dataBlockEncoder)
.withMaxKeyCount(2000)
.withChecksumType(CKTYPE)
.withBytesPerChecksum(CKBYTES)
.build();
writer.close();
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
cacheConf, BloomType.NONE);
StoreFile.Reader reader = storeFile.createReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
示例14: testDataBlockEncodingMetaData
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Check if data block encoding information is saved correctly in HFile's
* file info.
*/
public void testDataBlockEncodingMetaData() throws IOException {
Path dir = new Path(new Path(this.testDir, "regionname"), "familyname");
Path path = new Path(dir, "1234567890");
DataBlockEncoding dataBlockEncoderAlgo =
DataBlockEncoding.FAST_DIFF;
HFileDataBlockEncoder dataBlockEncoder =
new HFileDataBlockEncoderImpl(
dataBlockEncoderAlgo,
dataBlockEncoderAlgo);
cacheConf = new CacheConfig(conf);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
HFile.DEFAULT_BLOCKSIZE)
.withFilePath(path)
.withDataBlockEncoder(dataBlockEncoder)
.withMaxKeyCount(2000)
.withChecksumType(CKTYPE)
.withBytesPerChecksum(CKBYTES)
.build();
writer.close();
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
cacheConf, BloomType.NONE, dataBlockEncoder);
StoreFile.Reader reader = storeFile.createReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
示例15: HStore
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; //导入依赖的package包/类
/**
* Constructor
* @param region
* @param family HColumnDescriptor for this column
* @param confParam configuration object
* failed. Can be null.
* @throws IOException
*/
protected HStore(final HRegion region, final HColumnDescriptor family,
final Configuration confParam) throws IOException {
HRegionInfo info = region.getRegionInfo();
this.fs = region.getRegionFileSystem();
// Assemble the store's home directory and Ensure it exists.
fs.createStoreDir(family.getNameAsString());
this.region = region;
this.family = family;
// 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
// CompoundConfiguration will look for keys in reverse order of addition, so we'd
// add global config first, then table and cf overrides, then cf metadata.
this.conf = new CompoundConfiguration()
.add(confParam)
.addStringMap(region.getTableDesc().getConfiguration())
.addStringMap(family.getConfiguration())
.addWritableMap(family.getValues());
this.blocksize = family.getBlocksize();
this.dataBlockEncoder =
new HFileDataBlockEncoderImpl(family.getDataBlockEncoding());
this.comparator = info.getComparator();
// used by ScanQueryMatcher
long timeToPurgeDeletes =
Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
LOG.trace("Time to purge deletes set to " + timeToPurgeDeletes +
"ms in store " + this);
// Get TTL
long ttl = determineTTLFromFamily(family);
// Why not just pass a HColumnDescriptor in here altogether? Even if have
// to clone it?
scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator);
this.memstore = new MemStore(conf, this.comparator);
this.offPeakHours = OffPeakHours.getInstance(conf);
// Setting up cache configuration for this family
this.cacheConf = new CacheConfig(conf, family);
this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false);
this.blockingFileCount =
conf.getInt(BLOCKING_STOREFILES_KEY, DEFAULT_BLOCKING_STOREFILE_COUNT);
this.compactionCheckMultiplier = conf.getInt(
COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY, DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER);
if (this.compactionCheckMultiplier <= 0) {
LOG.error("Compaction check period multiplier must be positive, setting default: "
+ DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER);
this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER;
}
if (HStore.closeCheckInterval == 0) {
HStore.closeCheckInterval = conf.getInt(
"hbase.hstore.close.check.interval", 10*1000*1000 /* 10 MB */);
}
this.storeEngine = StoreEngine.create(this, this.conf, this.comparator);
this.storeEngine.getStoreFileManager().loadFiles(loadStoreFiles());
// Initialize checksum type from name. The names are CRC32, CRC32C, etc.
this.checksumType = getChecksumType(conf);
// initilize bytes per checksum
this.bytesPerChecksum = getBytesPerChecksum(conf);
flushRetriesNumber = conf.getInt(
"hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER);
pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE);
if (flushRetriesNumber <= 0) {
throw new IllegalArgumentException(
"hbase.hstore.flush.retries.number must be > 0, not "
+ flushRetriesNumber);
}
}