本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreFileScanner类的典型用法代码示例。如果您正苦于以下问题:Java StoreFileScanner类的具体用法?Java StoreFileScanner怎么用?Java StoreFileScanner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StoreFileScanner类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了StoreFileScanner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: preCreateCoprocScanner
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
protected InternalScanner preCreateCoprocScanner(final CompactionRequest request,
final ScanType scanType, final long earliestPutTs, final List<StoreFileScanner> scanners,
User user) throws IOException {
if (store.getCoprocessorHost() == null) return null;
if (user == null) {
return store.getCoprocessorHost()
.preCompactScannerOpen(store, scanners, scanType, earliestPutTs, request);
} else {
try {
return user.getUGI().doAs(new PrivilegedExceptionAction<InternalScanner>() {
@Override public InternalScanner run() throws Exception {
return store.getCoprocessorHost()
.preCompactScannerOpen(store, scanners, scanType, earliestPutTs, request);
}
});
} catch (InterruptedException ie) {
InterruptedIOException iioe = new InterruptedIOException();
iioe.initCause(ie);
throw iioe;
}
}
}
示例2: initRowKeyList
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
ScanRange.ScanRangeList rangeList) throws IOException {
// init
StoreFile bucketStoreFile =
new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
cacheConf, BloomType.NONE);
StoreFile secondaryStoreFile =
new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
cacheConf, BloomType.NONE);
StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
// get hit buckets
MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
// scan rowkeys based on the buckets
List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
// deinit
bucketScanner.close();
bucketStoreFile.closeReader(true);
secondaryScanner.close();
secondaryStoreFile.closeReader(true);
return rowkeyList;
}
示例3: readCell
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
/**
* Reads a cell from the mob file.
* @param search The cell need to be searched in the mob file.
* @param cacheMobBlocks Should this scanner cache blocks.
* @param readPt the read point.
* @return The cell in the mob file.
* @throws IOException
*/
public Cell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException {
Cell result = null;
StoreFileScanner scanner = null;
List<HStoreFile> sfs = new ArrayList<>();
sfs.add(sf);
try {
List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(sfs,
cacheMobBlocks, true, false, false, readPt);
if (!sfScanners.isEmpty()) {
scanner = sfScanners.get(0);
if (scanner.seek(search)) {
result = scanner.peek();
}
}
} finally {
if (scanner != null) {
scanner.close();
}
}
return result;
}
示例4: testGetScanner
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
@Test
public void testGetScanner() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs)
.withOutputDir(testDir)
.withFileContext(meta)
.build();
MobTestUtil.writeStoreFile(writer, testName.getMethodName());
MobFile mobFile =
new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true));
assertNotNull(mobFile.getScanner());
assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
示例5: countDelCellsInDelFiles
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
/**
* Gets the number of del cell in the del files
* @param paths the del file paths
* @return the cell size
*/
private int countDelCellsInDelFiles(List<Path> paths) throws IOException {
List<HStoreFile> sfs = new ArrayList<>();
int size = 0;
for (Path path : paths) {
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
sfs.add(sf);
}
List<KeyValueScanner> scanners = new ArrayList<>(StoreFileScanner.getScannersForStoreFiles(sfs,
false, true, false, false, HConstants.LATEST_TIMESTAMP));
long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
long ttl = HStore.determineTTLFromFamily(hcd);
ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparatorImpl.COMPARATOR);
StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_RETAIN_DELETES, scanners);
List<Cell> results = new ArrayList<>();
boolean hasMore = true;
while (hasMore) {
hasMore = scanner.next(results);
size += results.size();
results.clear();
}
scanner.close();
return size;
}
示例6: readCell
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
/**
* Reads a cell from the mob file.
* @param search The KeyValue need to be searched in the mob file.
* @param cacheMobBlocks Should this scanner cache blocks.
* @return The KeyValue in the mob file.
* @throws IOException
*/
public KeyValue readCell(KeyValue search, boolean cacheMobBlocks) throws IOException {
KeyValue result = null;
StoreFileScanner scanner = null;
List<StoreFile> sfs = new ArrayList<StoreFile>();
sfs.add(sf);
try {
List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(sfs,
cacheMobBlocks, true, false, null, sf.getMaxMemstoreTS());
if (!sfScanners.isEmpty()) {
scanner = sfScanners.get(0);
if (scanner.seek(search)) {
result = scanner.peek();
}
}
} finally {
if (scanner != null) {
scanner.close();
}
}
return result;
}
示例7: testGetScanner
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
@Test
public void testGetScanner() throws Exception {
FileSystem fs = FileSystem.get(conf);
Path testDir = FSUtils.getRootDir(conf);
Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
.withOutputDir(outputDir)
.withFileContext(meta)
.build();
MobTestUtil.writeStoreFile(writer, getName());
MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
conf, cacheConf, BloomType.NONE));
assertNotNull(mobFile.getScanner());
assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
示例8: createFileScanners
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
/**
* Creates file scanners for compaction.
*
* @param filesToCompact Files.
* @return Scanners.
*/
protected List<StoreFileScanner> createFileScanners(final Collection<StoreFile> filesToCompact,
long smallestReadPoint, boolean useDropBehind) throws IOException {
return StoreFileScanner.getScannersForStoreFiles(filesToCompact,
/* cache blocks = */false,
/* use pread = */false,
/* is compaction */true,
/* use Drop Behind */useDropBehind, smallestReadPoint);
}
示例9: winterTestingStoreFile
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
private void winterTestingStoreFile(StoreFile sf) throws IOException {
StoreFileScanner compactedFileScanner = sf.getReader().getStoreFileScanner(false, false);
KeyValue startKey =
KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP);
compactedFileScanner.seek(startKey);
KeyValue kv;
int n = 0;
while ((kv = (KeyValue) compactedFileScanner.next()) != null) {
LOG.info("LCDBG, show kv: " + Bytes.toInt(kv.getRow()));
++n;
}
LOG.info("LCDBG, reader has: " + n + " in " + sf.getPath());
compactedFileScanner.close();
}
示例10: getStoreFileScanner
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
private StoreFileScanner getStoreFileScanner(StoreFile storeFile) throws IOException {
StoreFile.Reader r = storeFile.createReader(canUseDrop);
r.setReplicaStoreFile(isPrimaryReplica);
StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction, readPt);
scanner.setScanQueryMatcher(matcher);
return scanner;
}
示例11: getRawRowkeyList
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
private List<byte[]> getRawRowkeyList(StoreFileScanner secondaryScanner,
List<LMDBucket> bucketList, MDRange[] ranges) throws IOException {
List<byte[]> rowkeyList = new ArrayList<>();
for (LMDBucket bucket : bucketList) {
Cell peekCell = secondaryScanner.peek();
if (peekCell != null && Bytes.compareTo(bucket.getStartKey(), peekCell.getRow()) == 0) {
} else {
secondaryScanner.reseek(new KeyValue(bucket.getStartKey(), LMDIndexConstants.FAMILY,
LMDIndexConstants.QUALIFIER));
}
Cell cell;
while ((cell = secondaryScanner.peek()) != null) {
if (Bytes.compareTo(bucket.getStopKey(), cell.getRow()) < 0) {
break;
}
boolean included = true;
int[] values = MDUtils.bitwiseUnzip(cell.getRow(), ranges.length);
for (int i = 0; i < ranges.length; i++) {
if (!ranges[i].include(values[i])) {
included = false;
break;
}
}
if (included) {
// System.out.println("adding key: " + Bytes.toInt(cell.getQualifier()));
rowkeyList.add(cell.getQualifier());
secondaryScanner.next();
} else {
// System.out.println("skipped key: " + Bytes.toInt(cell.getQualifier()));
secondaryScanner.reseek(
new KeyValue(cell.getRow(), LMDIndexConstants.FAMILY, LMDIndexConstants.QUALIFIER));
}
}
}
return rowkeyList;
}
示例12: createFile
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
private static StoreFile createFile(long size) throws Exception {
StoreFile sf = mock(StoreFile.class);
when(sf.getPath()).thenReturn(new Path("moo"));
StoreFile.Reader r = mock(StoreFile.Reader.class);
when(r.getEntries()).thenReturn(size);
when(r.length()).thenReturn(size);
when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong())).thenReturn(
mock(StoreFileScanner.class));
when(sf.getReader()).thenReturn(r);
when(sf.createReader(anyBoolean())).thenReturn(r);
when(sf.createReader()).thenReturn(r);
return sf;
}
示例13: readHFile
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
private void readHFile(Configuration hadoopConf, Configuration hbaseConf, String fsStr,
String fileName) throws IOException {
CacheConfig tmpCacheConfig = new CacheConfig(hbaseConf);
FileSystem fs = null;
if (fsStr.equalsIgnoreCase("local")) {
fs = LocalFileSystem.getLocal(hadoopConf);
} else {
fs = FileSystem.get(hadoopConf);
}
Path path = new Path(fileName);
if (!fs.exists(path)) {
System.out.println("WinterTestAID file not exists: " + path);
} else {
System.out.println("WinterTestAID reading lccindex hfile: " + path);
StoreFile sf = new StoreFile(fs, path, hbaseConf, tmpCacheConfig, BloomType.NONE, null);
Reader reader = sf.createReader();
System.out.println("WinterTestAID store file attr: " + sf.mWinterGetAttribute());
StoreFileScanner sss = reader.getStoreFileScanner(false, false);
sss.seek(KeyValue.LOWESTKEY);
System.out.println("WinterTestAID store peek value: "
+ LCCIndexConstant.mWinterToPrint(sss.peek()));
KeyValue kv;
int counter = 0, printInterval = 1, totalSize = 0;
while ((kv = sss.next()) != null) {
if (counter == 0) {
counter = printInterval;
System.out
.println("WinterTestAID hfile keyvalue: " + LCCIndexConstant.mWinterToPrint(kv));
}
--counter;
++totalSize;
}
sss.close();
reader.close(false);
System.out.println("WinterTestAID total size: " + totalSize);
System.out.println("WinterTestAID winter inner mWinterGetScannersForStoreFiles start: "
+ LCCIndexConstant.convertUnknownBytes(reader.getFirstKey()));
}
}
示例14: readHFile
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
public static void readHFile(Configuration hbaseConf, Path hfilePath) throws IOException {
CacheConfig tmpCacheConfig = new CacheConfig(hbaseConf);
FileSystem hdfs = getHDFS();
if (!hdfs.exists(hfilePath)) {
System.out.println("WinterTestAID file not exists: " + hfilePath);
} else {
System.out.println("WinterTestAID reading lccindex hfile: " + hfilePath);
StoreFile sf = new StoreFile(hdfs, hfilePath, hbaseConf, tmpCacheConfig, BloomType.NONE, null);
Reader reader = sf.createReader();
System.out.println("WinterTestAID store file attr: " + sf.mWinterGetAttribute());
StoreFileScanner sss = reader.getStoreFileScanner(false, false);
sss.seek(KeyValue.LOWESTKEY);
System.out.println("WinterTestAID store peek value: "
+ LCCIndexConstant.mWinterToPrint(sss.peek()));
KeyValue kv;
int counter = 0, printInterval = 1, totalSize = 0;
while ((kv = sss.next()) != null) {
if (counter == 0) {
counter = printInterval;
System.out
.println("WinterTestAID hfile keyvalue: " + LCCIndexConstant.mWinterToPrint(kv));
}
--counter;
++totalSize;
}
sss.close();
reader.close(false);
System.out.println("WinterTestAID total size: " + totalSize);
System.out.println("WinterTestAID winter inner mWinterGetScannersForStoreFiles start: "
+ LCCIndexConstant.convertUnknownBytes(reader.getFirstKey()));
}
}
示例15: createFile
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; //导入依赖的package包/类
private static StoreFile createFile(long size) throws Exception {
StoreFile sf = mock(StoreFile.class);
when(sf.getPath()).thenReturn(new Path("moo"));
StoreFile.Reader r = mock(StoreFile.Reader.class);
when(r.getEntries()).thenReturn(size);
when(r.length()).thenReturn(size);
when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong())).thenReturn(
mock(StoreFileScanner.class));
when(sf.getReader()).thenReturn(r);
when(sf.createReader()).thenReturn(r);
return sf;
}