本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.KeyValueScanner类的典型用法代码示例。如果您正苦于以下问题:Java KeyValueScanner类的具体用法?Java KeyValueScanner怎么用?Java KeyValueScanner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KeyValueScanner类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了KeyValueScanner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: preFlushScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例2: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
long earliestPutTs, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, scanners, scanType,
store.getSmallestReadPoint(), earliestPutTs);
}
示例3: preStoreScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
TableName tn = store.getTableName();
if (!tn.isSystemTable()) {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
return new StoreScanner(store, scanInfo, scan, targetCols,
((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
} else {
return s;
}
}
示例4: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
示例5: createStoreScanner
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionInfo().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
示例6: createStoreScanner
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
示例7: LCCIndexMemStoreScanner
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
public LCCIndexMemStoreScanner(KeyValueScanner scanner,
TreeMap<byte[], DataType> lccIndexQualifier, byte[] target) throws IOException {
super();
dataList = new LinkedList<KeyValue>();
this.lccIndexQualifier = new TreeMap<byte[], DataType>(Bytes.BYTES_COMPARATOR);
if (target == null || !lccIndexQualifier.containsKey(target)) {
throw new IOException("winter index column " + Bytes.toString(target) + " is uknown type");
}
this.lccIndexQualifier.put(target, lccIndexQualifier.get(target));
// this.lccIndexQualifier = lccIndexQualifier;
long start = System.currentTimeMillis();
init(scanner);
System.out.println("winter LCCIndexMemStoreScanner cost "
+ (System.currentTimeMillis() - start) / 1000.0
+ " seconds to build lcc memstore scanner from memstore, the size of this scanner is: "
+ dataList.size());
}
示例8: init
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
private void init(KeyValueScanner scanner) {
KeyValue kv;
LCCIndexGenerator generator = new LCCIndexGenerator(lccIndexQualifier, null);
try {
while ((kv = scanner.next()) != null) {
generator.processKeyValue(kv);
}
} catch (IOException e) {
e.printStackTrace();
}
KeyValue[] kvArray = generator.generatedSortedKeyValueArray();
if (kvArray != null && kvArray.length != 0) {
for (KeyValue t : kvArray) {
dataList.add(t);
}
}
currentIndexPoint = 0;
}
示例9: preFlushScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
Store.ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.MINOR_COMPACT, store.getHRegion().getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例10: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
long earliestPutTs, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
Store.ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, scanners, scanType, store.getHRegion()
.getSmallestReadPoint(), earliestPutTs);
}
示例11: preFlushScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例12: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
long earliestPutTs, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, scanners, scanType,
store.getSmallestReadPoint(), earliestPutTs);
}
示例13: preStoreScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
TableName tn = store.getTableName();
if (!tn.isSystemTable()) {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
return new StoreScanner(store, scanInfo, scan, targetCols,
((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
} else {
return s;
}
}
示例14: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
InternalScanner s, CompactionRequest request) throws IOException {
HRegionServer rs = (HRegionServer) c.getEnvironment().getRegionServerServices();
if (!IndexUtils.isIndexTable(store.getTableName())) {
// Not an index table
return null;
}
long smallestReadPoint = c.getEnvironment().getRegion().getSmallestReadPoint();
String actualTableName = IndexUtils.getActualTableName(store.getTableName().getNameAsString());
TTLStoreScanner ttlStoreScanner =
new TTLStoreScanner(store, smallestReadPoint, earliestPutTs, scanType, scanners,
new TTLExpiryChecker(), actualTableName, rs);
return ttlStoreScanner;
}
示例15: TTLStoreScanner
import org.apache.hadoop.hbase.regionserver.KeyValueScanner; //导入依赖的package包/类
public TTLStoreScanner(Store store, long smallestReadPoint, long earliestTS, ScanType type,
List<? extends KeyValueScanner> scanners, TTLExpiryChecker ttlExpiryChecker,
String actualTableName, HRegionServer rs) throws IOException {
this.store = store;
this.smallestReadPoint = smallestReadPoint;
this.earliestTS = earliestTS;
this.type = type;
Scan scan = new Scan();
scan.setMaxVersions(store.getFamily().getMaxVersions());
delegate =
new StoreScanner(store, store.getScanInfo(), scan, scanners, type, this.smallestReadPoint,
this.earliestTS);
this.ttlExpiryChecker = ttlExpiryChecker;
this.actualTableName = actualTableName;
this.rs = rs;
}