本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.InternalScanner类的典型用法代码示例。如果您正苦于以下问题:Java InternalScanner类的具体用法?Java InternalScanner怎么用?Java InternalScanner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
InternalScanner类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了InternalScanner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: preCreateCoprocScanner
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
protected InternalScanner preCreateCoprocScanner(final CompactionRequest request,
final ScanType scanType, final long earliestPutTs, final List<StoreFileScanner> scanners,
User user) throws IOException {
if (store.getCoprocessorHost() == null) return null;
if (user == null) {
return store.getCoprocessorHost()
.preCompactScannerOpen(store, scanners, scanType, earliestPutTs, request);
} else {
try {
return user.getUGI().doAs(new PrivilegedExceptionAction<InternalScanner>() {
@Override public InternalScanner run() throws Exception {
return store.getCoprocessorHost()
.preCompactScannerOpen(store, scanners, scanType, earliestPutTs, request);
}
});
} catch (InterruptedException ie) {
InterruptedIOException iioe = new InterruptedIOException();
iioe.initCause(ie);
throw iioe;
}
}
}
示例2: postCreateCoprocScanner
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
/**
* Calls coprocessor, if any, to create scanners - after normal scanner creation.
*
* @param request Compaction request.
* @param scanType Scan type.
* @param scanner The default scanner created for compaction.
* @return Scanner scanner to use (usually the default); null if compaction should not proceed.
*/
protected InternalScanner postCreateCoprocScanner(final CompactionRequest request,
final ScanType scanType, final InternalScanner scanner, User user) throws IOException {
if (store.getCoprocessorHost() == null) return scanner;
if (user == null) {
return store.getCoprocessorHost().preCompact(store, scanner, scanType, request);
} else {
try {
return user.getUGI().doAs(new PrivilegedExceptionAction<InternalScanner>() {
@Override public InternalScanner run() throws Exception {
return store.getCoprocessorHost().preCompact(store, scanner, scanType, request);
}
});
} catch (InterruptedException ie) {
InterruptedIOException iioe = new InterruptedIOException();
iioe.initCause(ie);
throw iioe;
}
}
}
示例3: getFromStoreFile
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
/**
* Do a small get/scan against one store. This is required because store
* has no actual methods of querying itself, and relies on StoreScanner.
*/
public static List<Cell> getFromStoreFile(HStore store,
Get get) throws IOException {
Scan scan = new Scan(get);
InternalScanner scanner = (InternalScanner) store.getScanner(scan,
scan.getFamilyMap().get(store.getFamily().getName()),
// originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
// readpoint 0.
0);
List<Cell> result = new ArrayList<Cell>();
scanner.next(result);
if (!result.isEmpty()) {
// verify that we are on the row we want:
Cell kv = result.get(0);
if (!CellUtil.matchingRow(kv, get.getRow())) {
result.clear();
}
}
scanner.close();
return result;
}
示例4: preFlushScannerOpen
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
}
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
示例5: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
long earliestPutTs, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
Integer newVersions = versions.get(store.getTableName());
ScanInfo oldSI = store.getScanInfo();
HColumnDescriptor family = store.getFamily();
ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
family.getName(), family.getMinVersions(),
newVersions == null ? family.getMaxVersions() : newVersions,
newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
return new StoreScanner(store, scanInfo, scan, scanners, scanType,
store.getSmallestReadPoint(), earliestPutTs);
}
示例6: verifyScan
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
/**
* This shouldn't be confused with TestFilter#verifyScan
* as expectedKeys is not the per row total, but the scan total
*
* @param s
* @param expectedRows
* @param expectedCells
* @throws IOException
*/
private void verifyScan(Scan s, long expectedRows, long expectedCells)
throws IOException {
InternalScanner scanner = this.region.getScanner(s);
List<Cell> results = new ArrayList<Cell>();
int i = 0;
int cells = 0;
for (boolean done = true; done; i++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new KeyValue[results.size()]),
KeyValue.COMPARATOR);
LOG.info("counter=" + i + ", " + results);
if (results.isEmpty()) break;
cells += results.size();
assertTrue("Scanned too many rows! Only expected " + expectedRows +
" total but already scanned " + (i+1), expectedRows > i);
assertTrue("Expected " + expectedCells + " cells total but " +
"already scanned " + cells, expectedCells >= cells);
results.clear();
}
assertEquals("Expected " + expectedRows + " rows but scanned " + i +
" rows", expectedRows, i);
assertEquals("Expected " + expectedCells + " cells but scanned " + cells +
" cells", expectedCells, cells);
}
示例7: testWhileMatchFilterWithFilterRowKeyWithReverseScan
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
public void testWhileMatchFilterWithFilterRowKeyWithReverseScan()
throws Exception {
Scan s = new Scan();
String prefix = "testRowOne";
WhileMatchFilter filter = new WhileMatchFilter(new PrefixFilter(
Bytes.toBytes(prefix)));
s.setFilter(filter);
s.setReversed(true);
InternalScanner scanner = this.region.getScanner(s);
while (true) {
ArrayList<Cell> values = new ArrayList<Cell>();
boolean isMoreResults = scanner.next(values);
if (!isMoreResults
|| !Bytes.toString(values.get(0).getRow()).startsWith(prefix)) {
Assert.assertTrue(
"The WhileMatchFilter should now filter all remaining",
filter.filterAllRemaining());
}
if (!isMoreResults) {
break;
}
}
scanner.close();
}
示例8: testWhileMatchFilterWithFilterRow
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
/**
* Tests the the {@link WhileMatchFilter} works in combination with a
* {@link Filter} that uses the
* {@link Filter#filterRow()} method.
*
* See HBASE-2258.
*
* @throws Exception
*/
@Test
public void testWhileMatchFilterWithFilterRow() throws Exception {
final int pageSize = 4;
Scan s = new Scan();
WhileMatchFilter filter = new WhileMatchFilter(new PageFilter(pageSize));
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
int scannerCounter = 0;
while (true) {
boolean isMoreResults = scanner.next(new ArrayList<Cell>());
scannerCounter++;
if (scannerCounter >= pageSize) {
assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining());
}
if (!isMoreResults) {
break;
}
}
assertEquals("The page filter returned more rows than expected", pageSize, scannerCounter);
}
示例9: testWhileMatchFilterWithFilterRowKey
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
/**
* Tests the the {@link WhileMatchFilter} works in combination with a
* {@link Filter} that uses the
* {@link Filter#filterRowKey(byte[], int, int)} method.
*
* See HBASE-2258.
*
* @throws Exception
*/
@Test
public void testWhileMatchFilterWithFilterRowKey() throws Exception {
Scan s = new Scan();
String prefix = "testRowOne";
WhileMatchFilter filter = new WhileMatchFilter(new PrefixFilter(Bytes.toBytes(prefix)));
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
while (true) {
ArrayList<Cell> values = new ArrayList<Cell>();
boolean isMoreResults = scanner.next(values);
if (!isMoreResults || !Bytes.toString(CellUtil.cloneRow(values.get(0))).startsWith(prefix)) {
assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining());
}
if (!isMoreResults) {
break;
}
}
}
示例10: testWhileMatchFilterWithFilterKeyValue
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
/**
* Tests the the {@link WhileMatchFilter} works in combination with a
* {@link Filter} that uses the
* {@link Filter#filterKeyValue(org.apache.hadoop.hbase.KeyValue)} method.
*
* See HBASE-2258.
*
* @throws Exception
*/
@Test
public void testWhileMatchFilterWithFilterKeyValue() throws Exception {
Scan s = new Scan();
WhileMatchFilter filter = new WhileMatchFilter(
new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOp.EQUAL, Bytes.toBytes("foo"))
);
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
while (true) {
ArrayList<Cell> values = new ArrayList<Cell>();
boolean isMoreResults = scanner.next(values);
assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining());
if (!isMoreResults) {
break;
}
}
}
示例11: verifyScan
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
private void verifyScan(Scan s, long expectedRows, long expectedKeys)
throws IOException {
InternalScanner scanner = this.region.getScanner(s);
List<Cell> results = new ArrayList<Cell>();
int i = 0;
for (boolean done = true; done; i++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new KeyValue[results.size()]),
KeyValue.COMPARATOR);
LOG.info("counter=" + i + ", " + results);
if (results.isEmpty()) break;
assertTrue("Scanned too many rows! Only expected " + expectedRows +
" total but already scanned " + (i+1), expectedRows > i);
assertEquals("Expected " + expectedKeys + " keys per row but " +
"returned " + results.size(), expectedKeys, results.size());
results.clear();
}
assertEquals("Expected " + expectedRows + " rows but scanned " + i +
" rows", expectedRows, i);
}
示例12: verifyScanNoEarlyOut
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
private void verifyScanNoEarlyOut(Scan s, long expectedRows,
long expectedKeys)
throws IOException {
InternalScanner scanner = this.region.getScanner(s);
List<Cell> results = new ArrayList<Cell>();
int i = 0;
for (boolean done = true; done; i++) {
done = scanner.next(results);
Arrays.sort(results.toArray(new KeyValue[results.size()]),
KeyValue.COMPARATOR);
LOG.info("counter=" + i + ", " + results);
if(results.isEmpty()) break;
assertTrue("Scanned too many rows! Only expected " + expectedRows +
" total but already scanned " + (i+1), expectedRows > i);
assertEquals("Expected " + expectedKeys + " keys per row but " +
"returned " + results.size(), expectedKeys, results.size());
results.clear();
}
assertEquals("Expected " + expectedRows + " rows but scanned " + i +
" rows", expectedRows, i);
}
示例13: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
示例14: createStoreScanner
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionInfo().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}
示例15: createStoreScanner
import org.apache.hadoop.hbase.regionserver.InternalScanner; //导入依赖的package包/类
protected InternalScanner createStoreScanner(RegionCoprocessorEnvironment env, String action,
TransactionVisibilityState snapshot, Store store,
List<? extends KeyValueScanner> scanners, ScanType type,
long earliestPutTs) throws IOException {
if (snapshot == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + env.getRegion().getRegionNameAsString() +
", no current transaction state found, defaulting to normal " + action + " scanner");
}
return null;
}
// construct a dummy transaction from the latest snapshot
Transaction dummyTx = TxUtils.createDummyTransaction(snapshot);
Scan scan = new Scan();
// need to see all versions, since we filter out excludes and applications may rely on multiple versions
scan.setMaxVersions();
scan.setFilter(
new IncludeInProgressFilter(dummyTx.getVisibilityUpperBound(),
snapshot.getInvalid(),
getTransactionFilter(dummyTx, type, null)));
return new StoreScanner(store, store.getScanInfo(), scan, scanners,
type, store.getSmallestReadPoint(), earliestPutTs);
}