本文整理匯總了Java中org.apache.hadoop.hbase.client.Scan.isGetScan方法的典型用法代碼示例。如果您正苦於以下問題:Java Scan.isGetScan方法的具體用法?Java Scan.isGetScan怎麽用?Java Scan.isGetScan使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.isGetScan方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: passesBloomFilter
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
* Checks whether the given scan passes the Bloom filter (if present). Only checks Bloom filters
* for single-row or single-row-column scans. Bloom filter checking for multi-gets is
* implemented as part of the store scanner system (see {@link StoreFileScanner#seekExactly})
* and uses the lower-level API
* {@link #passesGeneralBloomFilter(byte[], int, int, byte[], int, int)}.
*
* @param scan the scan specification. Used to determine the row, and to check whether this is a
* single-row ("get") scan.
* @param columns the set of columns. Only used for row-column Bloom filters.
* @return true if the scan with the given column set passes the Bloom filter, or if the Bloom
* filter is not applicable for the scan. False if the Bloom filter is applicable and
* the scan fails it.
*/
boolean passesBloomFilter(Scan scan, final SortedSet<byte[]> columns) {
// Multi-column non-get scans will use Bloom filters through the
// lower-level API function that this function calls.
if (!scan.isGetScan()) {
return true;
}
byte[] row = scan.getStartRow();
switch (this.bloomFilterType) {
case ROW:
return passesGeneralBloomFilter(row, 0, row.length, null, 0, 0);
case ROWCOL:
if (columns != null && columns.size() == 1) {
byte[] column = columns.first();
return passesGeneralBloomFilter(row, 0, row.length, column, 0, column.length);
}
// For multi-column queries the Bloom filter is checked from the
// seekExact operation.
return true;
default:
return true;
}
}
示例2: StoreScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
* An internal constructor.
*/
protected StoreScanner(Store store, Scan scan, final ScanInfo scanInfo,
final NavigableSet<byte[]> columns, long readPt, boolean cacheBlocks) {
this.readPt = readPt;
this.store = store;
this.cacheBlocks = cacheBlocks;
get = scan.isGetScan();
int numCol = columns == null ? 0 : columns.size();
explicitColumnQuery = numCol > 0;
this.scan = scan;
this.columns = columns;
this.now = EnvironmentEdgeManager.currentTime();
this.oldestUnexpiredTS = now - scanInfo.getTtl();
this.minVersions = scanInfo.getMinVersions();
// We look up row-column Bloom filters for multi-column queries as part of
// the seek operation. However, we also look the row-column Bloom filter
// for multi-row (non-"get") scans because this is not done in
// StoreFile.passesBloomFilter(Scan, SortedSet<byte[]>).
this.useRowColBloom = numCol > 1 || (!get && numCol == 1);
this.maxRowSize = scanInfo.getTableMaxRowSize();
this.scanUsePread = scan.isSmall() ? true : scanInfo.isUsePread();
this.cellsPerHeartbeatCheck = scanInfo.getCellsPerTimeoutCheck();
// Parallel seeking is on if the config allows and more there is more than one store file.
if (this.store != null && this.store.getStorefilesCount() > 1) {
RegionServerServices rsService = ((HStore) store).getHRegion().getRegionServerServices();
if (rsService != null && scanInfo.isParallelSeekEnabled()) {
this.parallelSeekEnabled = true;
this.executor = rsService.getExecutorService();
}
}
}
示例3: StoreIndexScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public StoreIndexScanner(Store store, List<KeyValueScanner> scanners, KVComparator comparator,
IndexKVComparator indexComparator, Range range, Scan scan, Set<ByteArray> joinSet,
boolean isAND) throws IOException {
// winter scanner is always 1? in my test it is 1 indeed
this.store = store;
this.joinSet = joinSet;
this.isAND = isAND;
this.memstoreScanner = scanners;
this.comparator = comparator;
this.indexComparator = indexComparator;
this.range = range;
this.isGet = scan.isGetScan();
this.cacheBlocks = scan.getCacheBlocks();
if (isAND) {
this.isEmptySet = this.joinSet.isEmpty();
this.indexSet = new HashSet<ByteArray>(10000);
}
this.startRow = scan.getStartRow();
this.startKV = KeyValue.createFirstOnRow(startRow);
this.stopRow =
Bytes.compareTo(scan.getStopRow(), HConstants.EMPTY_BYTE_ARRAY) == 0 ? null : scan
.getStopRow();
this.stopKV =
Bytes.compareTo(scan.getStopRow(), HConstants.EMPTY_BYTE_ARRAY) == 0 ? null : KeyValue
.createLastOnRow(scan.getStopRow());
this.stopRowCmpValue = scan.isGetScan() ? -1 : 0;
if (range.getStartValue() != null) {
switch (range.getStartType()) {
case EQUAL:
startIKV =
IndexKeyValue.createFirstOnQualifier(range.getQualifier(), range.getStartValue());
stopIKV = startIKV;
stopIKVCmpValue = -1;
break;
case GREATER_OR_EQUAL:
startIKV =
IndexKeyValue.createFirstOnQualifier(range.getQualifier(), range.getStartValue());
stopIKV = null;
stopIKVCmpValue = 0;
break;
case GREATER:
startIKV = IndexKeyValue.createLastOnQualifier(range.getQualifier(), range.getStartValue());
stopIKV = null;
stopIKVCmpValue = 0;
break;
default:
throw new IOException("Invalid Range:" + range);
}
} else {
startIKV = IndexKeyValue.createFirstOnQualifier(range.getQualifier());
stopIKV = null;
}
if (range.getStopValue() != null) {
switch (range.getStopType()) {
case LESS:
stopIKV = IndexKeyValue.createFirstOnQualifier(range.getQualifier(), range.getStopValue());
stopIKVCmpValue = 0;
break;
case LESS_OR_EQUAL:
stopIKV = IndexKeyValue.createFirstOnQualifier(range.getQualifier(), range.getStopValue());
stopIKVCmpValue = -1;
break;
default:
throw new IOException("Invalid Range:" + range);
}
}
this.needToRefresh = false;
getScanners();
}
示例4: testScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private void testScan(final int[] columnArr, final boolean lazySeekEnabled,
final int startRow, final int endRow, int maxVersions)
throws IOException {
StoreScanner.enableLazySeekGlobally(lazySeekEnabled);
final Scan scan = new Scan();
final Set<String> qualSet = new HashSet<String>();
for (int iColumn : columnArr) {
String qualStr = getQualStr(iColumn);
scan.addColumn(FAMILY_BYTES, Bytes.toBytes(qualStr));
qualSet.add(qualStr);
}
scan.setMaxVersions(maxVersions);
scan.setStartRow(rowBytes(startRow));
// Adjust for the fact that for multi-row queries the end row is exclusive.
{
final byte[] scannerStopRow =
rowBytes(endRow + (startRow != endRow ? 1 : 0));
scan.setStopRow(scannerStopRow);
}
final long initialSeekCount = StoreFileScanner.getSeekCount();
final InternalScanner scanner = region.getScanner(scan);
final List<Cell> results = new ArrayList<Cell>();
final List<Cell> actualKVs = new ArrayList<Cell>();
// Such a clumsy do-while loop appears to be the official way to use an
// internalScanner. scanner.next() return value refers to the _next_
// result, not to the one already returned in results.
boolean hasNext;
do {
hasNext = scanner.next(results);
actualKVs.addAll(results);
results.clear();
} while (hasNext);
List<Cell> filteredKVs = filterExpectedResults(qualSet,
rowBytes(startRow), rowBytes(endRow), maxVersions);
final String rowRestrictionStr =
(startRow == -1 && endRow == -1) ? "all rows" : (
startRow == endRow ? ("row=" + startRow) : ("startRow="
+ startRow + ", " + "endRow=" + endRow));
final String columnRestrictionStr =
columnArr.length == 0 ? "all columns"
: ("columns=" + Arrays.toString(columnArr));
final String testDesc =
"Bloom=" + bloomType + ", compr=" + comprAlgo + ", "
+ (scan.isGetScan() ? "Get" : "Scan") + ": "
+ columnRestrictionStr + ", " + rowRestrictionStr
+ ", maxVersions=" + maxVersions + ", lazySeek=" + lazySeekEnabled;
long seekCount = StoreFileScanner.getSeekCount() - initialSeekCount;
if (VERBOSE) {
System.err.println("Seek count: " + seekCount + ", KVs returned: "
+ actualKVs.size() + ". " + testDesc +
(lazySeekEnabled ? "\n" : ""));
}
if (lazySeekEnabled) {
totalSeekLazy += seekCount;
} else {
totalSeekDiligent += seekCount;
}
assertKVListsEqual(testDesc, filteredKVs, actualKVs);
}