本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreFile.Reader类的典型用法代码示例。如果您正苦于以下问题:Java Reader类的具体用法?Java Reader怎么用?Java Reader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Reader类属于org.apache.hadoop.hbase.regionserver.StoreFile包,在下文中一共展示了Reader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getLCIndexScannersFromFiles
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
public static List<StoreFileScanner> getLCIndexScannersFromFiles(Collection<StoreFile> files,
boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
long readPt, ScanRange scanRange, HStore store) throws IOException {
List<StoreFileScanner> scanners = new ArrayList<>(files.size());
for (StoreFile file : files) {
if (file.getPath().getName().contains(".")) {
System.out.println("LCDBG trying to find file as a ref: " + file);
}
StoreFile.Reader r = file.createLCIndexReader(scanRange, store);
if (r == null) {
WinterOptimizer
.ThrowWhenCalled("LCDBG StoreFileScanner failed in opening a reader locally");
}
StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction, readPt);
scanner.setScanQueryMatcher(matcher);
scanners.add(scanner);
}
return scanners;
}
示例2: recalculateSize
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
/**
* Recalculate the size of the compaction based on current files.
* @param files files that should be included in the compaction
*/
private void recalculateSize() {
long sz = 0;
for (StoreFile sf : this.filesToCompact) {
Reader r = sf.getReader();
sz += r == null ? 0 : r.length();
}
this.totalSize = sz;
}
示例3: parseKeyValueFromReader
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
private Queue<KeyValue> parseKeyValueFromReader() throws IOException {
HFile.Reader reader = HFile
.createReader(store.getFileSystem(), majorMovedPath, store.getCacheConfig(), store.conf);
HFileScanner scanner = reader.getScanner(false, false, false);
scanner.seekTo();
Queue<KeyValue> queue = new LinkedList<>();
do {
KeyValue cell = (KeyValue) scanner.getKeyValue();
queue.offer(cell);
} while (scanner.next());
reader.close();
return queue;
}
示例4: winterTestingHFile
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
private void winterTestingHFile(Path file) throws IOException {
HFile.Reader reader =
HFile.createReader(store.getFileSystem(), file, store.getCacheConfig(), store.conf);
HFileScanner scanner = reader.getScanner(false, false, false);
scanner.seekTo();
int n = 0;
do {
Cell cell = scanner.getKeyValue();
++n;
} while (scanner.next());
LOG.info("LCDBG, HFile has: " + n + " in " + file);
}
示例5: StoreFileScanner
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
/**
* Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner}
*
* @param hfs HFile scanner
*/
public StoreFileScanner(StoreFile.Reader reader, HFileScanner hfs, boolean useMVCC,
boolean hasMVCC, long readPt) {
this.readPt = readPt;
this.reader = reader;
this.hfs = hfs;
this.enforceMVCC = useMVCC;
this.hasMVCCInfo = hasMVCC;
}
示例6: getScannersForStoreFiles
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
/**
* Return an array of scanners corresponding to the given set of store files,
* And set the ScanQueryMatcher for each store file scanner for further
* optimization
*/
public static List<StoreFileScanner> getScannersForStoreFiles(Collection<StoreFile> files,
boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop,
ScanQueryMatcher matcher, long readPt, boolean isPrimaryReplica) throws IOException {
List<StoreFileScanner> scanners = new ArrayList<StoreFileScanner>(files.size());
for (StoreFile file : files) {
StoreFile.Reader r = file.createReader(canUseDrop);
r.setReplicaStoreFile(isPrimaryReplica);
StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction, readPt);
scanner.setScanQueryMatcher(matcher);
scanners.add(scanner);
}
return scanners;
}
示例7: preStoreFileReaderOpen
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
@Override
public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
Reference r, Reader reader) throws IOException {
ctPreStoreFileReaderOpen.incrementAndGet();
return null;
}
示例8: postStoreFileReaderOpen
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
@Override
public Reader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
Reference r, Reader reader) throws IOException {
ctPostStoreFileReaderOpen.incrementAndGet();
return reader;
}
示例9: StoreFileScanner
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
/**
* Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner}
* @param hfs HFile scanner
*/
public StoreFileScanner(StoreFile.Reader reader, HFileScanner hfs, boolean useMVCC,
boolean hasMVCC) {
this.reader = reader;
this.hfs = hfs;
this.enforceMVCC = useMVCC;
this.hasMVCCInfo = hasMVCC;
}
示例10: getScannersForStoreFiles
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
/**
* Return an array of scanners corresponding to the given set of store files, And set the
* ScanQueryMatcher for each store file scanner for further optimization
*/
public static List<StoreFileScanner> getScannersForStoreFiles(Collection<StoreFile> files,
boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher)
throws IOException {
List<StoreFileScanner> scanners = new ArrayList<StoreFileScanner>(files.size());
for (StoreFile file : files) {
StoreFile.Reader r = file.createReader();
StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction);
scanner.setScanQueryMatcher(matcher);
scanners.add(scanner);
}
return scanners;
}
示例11: mWinterGetScannersFromFiles
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
public static List<StoreFileScanner> mWinterGetScannersFromFiles(Collection<StoreFile> files,
boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
byte[] targetLCCIndexCF, Store store) throws IOException {
List<StoreFileScanner> scanners = new ArrayList<StoreFileScanner>(files.size());
for (StoreFile file : files) {
if (file.getPath().getName().contains(".")) {
System.out.println("winter trying to find file as a ref: " + file);
}
StoreFile.Reader r = null;
if (store.localfs != null) {
// System.out.println("winter local file system: " + store.localfs);
r = file.mWinterCreateLCCLocalReader(targetLCCIndexCF, store);
} else if (store.hasLCCIndex) {
if (!file.hasLCCIndexOnHDFS) {
WinterOptimizer.ThrowWhenCalled("winter not table " + file.getTableName() + ", column: "
+ file.getColumnFamilyName() + " does not contain lccindex, file is: "
+ file.getPath() + " lccindex file is: "
+ StoreFile.mWinterGetIndexPathFromPath(file.getPath()));
}
r = file.mWinterCreateLCCIndexReader(targetLCCIndexCF);
}
if (r == null) {
if (store.localfs == null) {
WinterOptimizer
.ThrowWhenCalled("winter StoreFileScanner failed in opening a reader locally, home is: "
+ store.lccLocalHome);
} else {
WinterOptimizer
.ThrowWhenCalled("winter StoreFileScanner failed in opening a reader on HDFS");
}
}
StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction);
scanner.setScanQueryMatcher(matcher);
scanners.add(scanner);
}
return scanners;
}
示例12: readHFile
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
private void readHFile(Configuration hadoopConf, Configuration hbaseConf, String fsStr,
String fileName) throws IOException {
CacheConfig tmpCacheConfig = new CacheConfig(hbaseConf);
FileSystem fs = null;
if (fsStr.equalsIgnoreCase("local")) {
fs = LocalFileSystem.getLocal(hadoopConf);
} else {
fs = FileSystem.get(hadoopConf);
}
Path path = new Path(fileName);
if (!fs.exists(path)) {
System.out.println("WinterTestAID file not exists: " + path);
} else {
System.out.println("WinterTestAID reading lccindex hfile: " + path);
StoreFile sf = new StoreFile(fs, path, hbaseConf, tmpCacheConfig, BloomType.NONE, null);
Reader reader = sf.createReader();
System.out.println("WinterTestAID store file attr: " + sf.mWinterGetAttribute());
StoreFileScanner sss = reader.getStoreFileScanner(false, false);
sss.seek(KeyValue.LOWESTKEY);
System.out.println("WinterTestAID store peek value: "
+ LCCIndexConstant.mWinterToPrint(sss.peek()));
KeyValue kv;
int counter = 0, printInterval = 1, totalSize = 0;
while ((kv = sss.next()) != null) {
if (counter == 0) {
counter = printInterval;
System.out
.println("WinterTestAID hfile keyvalue: " + LCCIndexConstant.mWinterToPrint(kv));
}
--counter;
++totalSize;
}
sss.close();
reader.close(false);
System.out.println("WinterTestAID total size: " + totalSize);
System.out.println("WinterTestAID winter inner mWinterGetScannersForStoreFiles start: "
+ LCCIndexConstant.convertUnknownBytes(reader.getFirstKey()));
}
}
示例13: readHFile
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
public static void readHFile(Configuration hbaseConf, Path hfilePath) throws IOException {
CacheConfig tmpCacheConfig = new CacheConfig(hbaseConf);
FileSystem hdfs = getHDFS();
if (!hdfs.exists(hfilePath)) {
System.out.println("WinterTestAID file not exists: " + hfilePath);
} else {
System.out.println("WinterTestAID reading lccindex hfile: " + hfilePath);
StoreFile sf = new StoreFile(hdfs, hfilePath, hbaseConf, tmpCacheConfig, BloomType.NONE, null);
Reader reader = sf.createReader();
System.out.println("WinterTestAID store file attr: " + sf.mWinterGetAttribute());
StoreFileScanner sss = reader.getStoreFileScanner(false, false);
sss.seek(KeyValue.LOWESTKEY);
System.out.println("WinterTestAID store peek value: "
+ LCCIndexConstant.mWinterToPrint(sss.peek()));
KeyValue kv;
int counter = 0, printInterval = 1, totalSize = 0;
while ((kv = sss.next()) != null) {
if (counter == 0) {
counter = printInterval;
System.out
.println("WinterTestAID hfile keyvalue: " + LCCIndexConstant.mWinterToPrint(kv));
}
--counter;
++totalSize;
}
sss.close();
reader.close(false);
System.out.println("WinterTestAID total size: " + totalSize);
System.out.println("WinterTestAID winter inner mWinterGetScannersForStoreFiles start: "
+ LCCIndexConstant.convertUnknownBytes(reader.getFirstKey()));
}
}
示例14: StoreFileScanner
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
/**
* Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner}
* @param hfs HFile scanner
*/
public StoreFileScanner(StoreFile.Reader reader, HFileScanner hfs, boolean useMVCC,
boolean hasMVCC, long readPt) {
this.readPt = readPt;
this.reader = reader;
this.hfs = hfs;
this.enforceMVCC = useMVCC;
this.hasMVCCInfo = hasMVCC;
}
示例15: getScannersForStoreFiles
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; //导入依赖的package包/类
/**
* Return an array of scanners corresponding to the given set of store files,
* And set the ScanQueryMatcher for each store file scanner for further
* optimization
*/
public static List<StoreFileScanner> getScannersForStoreFiles(
Collection<StoreFile> files, boolean cacheBlocks, boolean usePread,
boolean isCompaction, ScanQueryMatcher matcher, long readPt) throws IOException {
List<StoreFileScanner> scanners = new ArrayList<StoreFileScanner>(
files.size());
for (StoreFile file : files) {
StoreFile.Reader r = file.createReader();
StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread,
isCompaction, readPt);
scanner.setScanQueryMatcher(matcher);
scanners.add(scanner);
}
return scanners;
}