本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.BloomType.NONE属性的典型用法代码示例。如果您正苦于以下问题:Java BloomType.NONE属性的具体用法?Java BloomType.NONE怎么用?Java BloomType.NONE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.hadoop.hbase.regionserver.BloomType
的用法示例。
在下文中一共展示了BloomType.NONE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initRowKeyList
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
ScanRange.ScanRangeList rangeList) throws IOException {
// init
StoreFile bucketStoreFile =
new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
cacheConf, BloomType.NONE);
StoreFile secondaryStoreFile =
new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
cacheConf, BloomType.NONE);
StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
// get hit buckets
MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
// scan rowkeys based on the buckets
List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
// deinit
bucketScanner.close();
bucketStoreFile.closeReader(true);
secondaryScanner.close();
secondaryStoreFile.closeReader(true);
return rowkeyList;
}
示例2: validateMobFile
/**
* Validates a mob file by opening and closing it.
* @param conf The current configuration.
* @param fs The current file system.
* @param path The path where the mob file is saved.
* @param cacheConfig The current cache config.
*/
private static void validateMobFile(Configuration conf, FileSystem fs, Path path,
CacheConfig cacheConfig, boolean primaryReplica) throws IOException {
HStoreFile storeFile = null;
try {
storeFile = new HStoreFile(fs, path, conf, cacheConfig, BloomType.NONE, primaryReplica);
storeFile.initReader();
} catch (IOException e) {
LOG.error("Failed to open mob file[" + path + "], keep it in temp directory.", e);
throw e;
} finally {
if (storeFile != null) {
storeFile.closeStoreFile(false);
}
}
}
示例3: testGetScanner
@Test
public void testGetScanner() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs)
.withOutputDir(testDir)
.withFileContext(meta)
.build();
MobTestUtil.writeStoreFile(writer, testName.getMethodName());
MobFile mobFile =
new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true));
assertNotNull(mobFile.getScanner());
assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
示例4: verifyEncryption
private boolean verifyEncryption(TableName tableName, String familyName) throws IOException {
Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
boolean hasFiles = false;
if (fs.exists(mobDirPath)) {
FileStatus[] files = fs.listStatus(mobDirPath);
hasFiles = files != null && files.length > 0;
Assert.assertTrue(hasFiles);
Path path = files[0].getPath();
CacheConfig cacheConf = new CacheConfig(conf);
HStoreFile sf = new HStoreFile(TEST_UTIL.getTestFileSystem(), path, conf, cacheConf,
BloomType.NONE, true);
sf.initReader();
HFile.Reader reader = sf.getReader().getHFileReader();
byte[] encryptionKey = reader.getTrailer().getEncryptionKey();
Assert.assertTrue(null != encryptionKey);
Assert.assertTrue(reader.getFileContext().getEncryptionContext().getCipher().getName()
.equals(HConstants.CIPHER_AES));
}
return hasFiles;
}
示例5: countDelCellsInDelFiles
/**
* Gets the number of del cell in the del files
* @param paths the del file paths
* @return the cell size
*/
private int countDelCellsInDelFiles(List<Path> paths) throws IOException {
List<HStoreFile> sfs = new ArrayList<>();
int size = 0;
for (Path path : paths) {
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
sfs.add(sf);
}
List<KeyValueScanner> scanners = new ArrayList<>(StoreFileScanner.getScannersForStoreFiles(sfs,
false, true, false, false, HConstants.LATEST_TIMESTAMP));
long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
long ttl = HStore.determineTTLFromFamily(hcd);
ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparatorImpl.COMPARATOR);
StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_RETAIN_DELETES, scanners);
List<Cell> results = new ArrayList<>();
boolean hasMore = true;
while (hasMore) {
hasMore = scanner.next(results);
size += results.size();
results.clear();
}
scanner.close();
return size;
}
示例6: validateMobFile
/**
* Validates a mob file by opening and closing it.
* @param conf The current configuration.
* @param fs The current file system.
* @param path The path where the mob file is saved.
* @param cacheConfig The current cache config.
*/
private static void validateMobFile(Configuration conf, FileSystem fs, Path path,
CacheConfig cacheConfig) throws IOException {
StoreFile storeFile = null;
try {
storeFile = new StoreFile(fs, path, conf, cacheConfig, BloomType.NONE);
storeFile.createReader();
} catch (IOException e) {
LOG.error("Fail to open mob file[" + path + "], keep it in temp directory.", e);
throw e;
} finally {
if (storeFile != null) {
storeFile.closeReader(false);
}
}
}
示例7: testGetScanner
@Test
public void testGetScanner() throws Exception {
FileSystem fs = FileSystem.get(conf);
Path testDir = FSUtils.getRootDir(conf);
Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
.withOutputDir(outputDir)
.withFileContext(meta)
.build();
MobTestUtil.writeStoreFile(writer, getName());
MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
conf, cacheConf, BloomType.NONE));
assertNotNull(mobFile.getScanner());
assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
示例8: createGeneralBloomAtWrite
/**
* Creates a new general (Row or RowCol) Bloom filter at the time of
* {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
*
* @param conf
* @param cacheConf
* @param bloomType
* @param maxKeys an estimate of the number of keys we expect to insert.
* Irrelevant if compound Bloom filters are enabled.
* @param writer the HFile writer
* @return the new Bloom filter, or null in case Bloom filters are disabled
* or when failed to create one.
*/
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
CacheConfig cacheConf, BloomType bloomType, int maxKeys,
HFile.Writer writer) {
if (!isGeneralBloomEnabled(conf)) {
LOG.trace("Bloom filters are disabled by configuration for "
+ writer.getPath()
+ (conf == null ? " (configuration is null)" : ""));
return null;
} else if (bloomType == BloomType.NONE) {
LOG.trace("Bloom filter is turned off for the column family");
return null;
}
float err = getErrorRate(conf);
// In case of row/column Bloom filter lookups, each lookup is an OR if two
// separate lookups. Therefore, if each lookup's false positive rate is p,
// the resulting false positive rate is err = 1 - (1 - p)^2, and
// p = 1 - sqrt(1 - err).
if (bloomType == BloomType.ROWCOL) {
err = (float) (1 - Math.sqrt(1 - err));
}
int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
MAX_ALLOWED_FOLD_FACTOR);
// Do we support compound bloom filters?
// In case of compound Bloom filters we ignore the maxKeys hint.
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
bloomType == BloomType.ROWCOL ? KeyValue.COMPARATOR : KeyValue.RAW_COMPARATOR);
writer.addInlineBlockWriter(bloomWriter);
return bloomWriter;
}
示例9: create
public static CachedMobFile create(FileSystem fs, Path path, Configuration conf,
CacheConfig cacheConf) throws IOException {
// XXX: primaryReplica is only used for constructing the key of block cache so it is not a
// critical problem if we pass the wrong value, so here we always pass true. Need to fix later.
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
return new CachedMobFile(sf);
}
示例10: createGeneralBloomAtWrite
/**
* Creates a new general (Row or RowCol) Bloom filter at the time of
* {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
*
* @param conf
* @param cacheConf
* @param bloomType
* @param maxKeys an estimate of the number of keys we expect to insert.
* Irrelevant if compound Bloom filters are enabled.
* @param writer the HFile writer
* @return the new Bloom filter, or null in case Bloom filters are disabled
* or when failed to create one.
*/
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
CacheConfig cacheConf, BloomType bloomType, int maxKeys,
HFile.Writer writer) {
if (!isGeneralBloomEnabled(conf)) {
LOG.trace("Bloom filters are disabled by configuration for "
+ writer.getPath()
+ (conf == null ? " (configuration is null)" : ""));
return null;
} else if (bloomType == BloomType.NONE) {
LOG.trace("Bloom filter is turned off for the column family");
return null;
}
float err = getErrorRate(conf);
// In case of row/column Bloom filter lookups, each lookup is an OR if two
// separate lookups. Therefore, if each lookup's false positive rate is p,
// the resulting false positive rate is err = 1 - (1 - p)^2, and
// p = 1 - sqrt(1 - err).
if (bloomType == BloomType.ROWCOL) {
err = (float) (1 - Math.sqrt(1 - err));
}
int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
MAX_ALLOWED_FOLD_FACTOR);
// Do we support compound bloom filters?
// In case of compound Bloom filters we ignore the maxKeys hint.
CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
bloomType == BloomType.ROWCOL ? CellComparatorImpl.COMPARATOR : null, bloomType);
writer.addInlineBlockWriter(bloomWriter);
return bloomWriter;
}
示例11: compactMobFilePartition
/**
* Compacts a partition of selected small mob files and all the del files.
* @param request The compaction request.
* @param partition A compaction partition.
* @param delFiles The del files.
* @param connection The connection to use.
* @param table The current table.
* @return The paths of new mob files after compactions.
* @throws IOException if IO failure is encountered
*/
private List<Path> compactMobFilePartition(PartitionedMobCompactionRequest request,
CompactionPartition partition,
List<HStoreFile> delFiles,
Connection connection,
Table table) throws IOException {
if (MobUtils.isMobFileExpired(column, EnvironmentEdgeManager.currentTime(),
partition.getPartitionId().getDate())) {
// If the files in the partition are expired, do not compact them and directly
// return an empty list.
return Collections.emptyList();
}
List<Path> newFiles = new ArrayList<>();
List<FileStatus> files = partition.listFiles();
int offset = 0;
Path bulkloadPathOfPartition = new Path(bulkloadPath, partition.getPartitionId().toString());
Path bulkloadColumnPath = new Path(bulkloadPathOfPartition, column.getNameAsString());
while (offset < files.size()) {
int batch = compactionBatchSize;
if (files.size() - offset < compactionBatchSize) {
batch = files.size() - offset;
}
if (batch == 1 && delFiles.isEmpty()) {
// only one file left and no del files, do not compact it,
// and directly add it to the new files.
newFiles.add(files.get(offset).getPath());
offset++;
continue;
}
// clean the bulkload directory to avoid loading old files.
fs.delete(bulkloadPathOfPartition, true);
// add the selected mob files and del files into filesToCompact
List<HStoreFile> filesToCompact = new ArrayList<>();
for (int i = offset; i < batch + offset; i++) {
HStoreFile sf = new HStoreFile(fs, files.get(i).getPath(), conf, compactionCacheConfig,
BloomType.NONE, true);
filesToCompact.add(sf);
}
filesToCompact.addAll(delFiles);
// compact the mob files in a batch.
compactMobFilesInBatch(request, partition, connection, table, filesToCompact, batch,
bulkloadPathOfPartition, bulkloadColumnPath, newFiles);
// move to the next batch.
offset += batch;
}
LOG.info("Compaction is finished. The number of mob files is changed from " + files.size()
+ " to " + newFiles.size());
return newFiles;
}
示例12: testReadKeyValue
@Test
public void testReadKeyValue() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs)
.withOutputDir(testDir)
.withFileContext(meta)
.build();
String caseName = testName.getMethodName();
MobTestUtil.writeStoreFile(writer, caseName);
MobFile mobFile =
new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true));
byte[] family = Bytes.toBytes(caseName);
byte[] qualify = Bytes.toBytes(caseName);
// Test the start key
byte[] startKey = Bytes.toBytes("aa"); // The start key bytes
KeyValue expectedKey =
new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
KeyValue seekKey = expectedKey.createKeyOnly(false);
Cell cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the end key
byte[] endKey = Bytes.toBytes("zz"); // The end key bytes
expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
seekKey = expectedKey.createKeyOnly(false);
cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the random key
byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
seekKey = expectedKey.createKeyOnly(false);
cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is less than the start key
byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa"
expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is more than the end key
byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz"
seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
cell = mobFile.readCell(seekKey, false);
assertNull(cell);
}
示例13: create
public static CachedMobFile create(FileSystem fs, Path path, Configuration conf,
CacheConfig cacheConf) throws IOException {
StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE);
return new CachedMobFile(sf);
}
示例14: testReadKeyValue
@Test
public void testReadKeyValue() throws Exception {
FileSystem fs = FileSystem.get(conf);
Path testDir = FSUtils.getRootDir(conf);
Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
.withOutputDir(outputDir)
.withFileContext(meta)
.build();
String caseName = getName();
MobTestUtil.writeStoreFile(writer, caseName);
MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
conf, cacheConf, BloomType.NONE));
byte[] family = Bytes.toBytes(caseName);
byte[] qualify = Bytes.toBytes(caseName);
// Test the start key
byte[] startKey = Bytes.toBytes("aa"); // The start key bytes
KeyValue expectedKey =
new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
KeyValue seekKey = expectedKey.createKeyOnly(false);
KeyValue kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
// Test the end key
byte[] endKey = Bytes.toBytes("zz"); // The end key bytes
expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
seekKey = expectedKey.createKeyOnly(false);
kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
// Test the random key
byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
seekKey = expectedKey.createKeyOnly(false);
kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
// Test the key which is less than the start key
byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa"
expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
// Test the key which is more than the end key
byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz"
seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
assertNull(kv);
}
示例15: create
/**
* Creates an instance of the MobFile.
* @param fs The file system.
* @param path The path of the underlying StoreFile.
* @param conf The configuration.
* @param cacheConf The CacheConfig.
* @return An instance of the MobFile.
* @throws IOException
*/
public static MobFile create(FileSystem fs, Path path, Configuration conf, CacheConfig cacheConf)
throws IOException {
// XXX: primaryReplica is only used for constructing the key of block cache so it is not a
// critical problem if we pass the wrong value, so here we always pass true. Need to fix later.
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
return new MobFile(sf);
}