本文整理匯總了Java中org.apache.hadoop.hbase.client.Scan.setId方法的典型用法代碼示例。如果您正苦於以下問題:Java Scan.setId方法的具體用法?Java Scan.setId怎麽用?Java Scan.setId使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.setId方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: splitScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ResultScanner[] splitScan(Table table, Scan originalScan, AbstractRowKeyDistributor
rowKeyDistributor) throws IOException {
Scan[] scans = rowKeyDistributor.getDistributedScans(originalScan);
final int length = scans.length;
for (int i = 0; i < length; i++) {
Scan scan = scans[i];
// other properties are already set upon construction
scan.setId(scan.getId() + "-" + i);
}
ResultScanner[] scanners = new ResultScanner[length];
boolean success = false;
try {
for (int i = 0; i < length; i++) {
scanners[i] = table.getScanner(scans[i]);
}
success = true;
} finally {
if (!success) {
closeScanner(scanners);
}
}
return scanners;
}
示例2: doRawScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private void doRawScan() throws IOException {
FilterList filterList = new FilterList();
CompareFilter.CompareOp startOp = CompareFilter.CompareOp.GREATER_OR_EQUAL;
CompareFilter.CompareOp stopOp = CompareFilter.CompareOp.LESS_OR_EQUAL;
for (int i = 0; i < indexColumnNames.length && i < scanValues.length; i++) {
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), startOp,
Bytes.toBytes(scanValues[i][0])));
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), stopOp,
Bytes.toBytes(scanValues[i][1])));
}
Scan scan = new Scan();
scan.setFilter(filterList);
scan.setId("raw-scan");
Table table = conn.getTable(tableName);
ResultScanner scanner = table.getScanner(scan);
Result result;
int count = 0;
while ((result = scanner.next()) != null) {
++count;
if (PRINT_RESULT) printResult(result);
}
scanner.close();
System.out.println("raw scan has " + count + " records");
}
示例3: getNextScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ResultScanner getNextScanner() throws IOException {
if (INIT_REGION_SIZE != getRegionNumber()) {
throw new IOException(
"region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
}
if (regionLocationQueue.isEmpty()) return null;
HRegionLocation regionLocation = regionLocationQueue.poll();
Scan newScan = new Scan(rawScan);
byte[] key = regionLocation.getRegionInfo().getStartKey();
if (key != null && key.length > 0) newScan.setStartRow(key);
key = regionLocation.getRegionInfo().getEndKey();
if (key != null && key.length > 0) newScan.setStopRow(key);
newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
newScan.setId(rawScan.getId());
newScan.setCacheBlocks(rawScan.getCacheBlocks());
newScan.setCaching(rawScan.getCaching());
return table.getScanner(newScan);
}
示例4: getNextScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ResultScanner getNextScanner() throws IOException {
if (INIT_REGION_SIZE != getRegionNumber()) {
throw new IOException(
"region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
}
if (regionLocationQueue.isEmpty()) return null;
HRegionLocation regionLocation = regionLocationQueue.poll();
Scan newScan = new Scan(rawScan);
if (regionLocation.getRegionInfo().getStartKey() != null)
newScan.setStartRow(regionLocation.getRegionInfo().getStartKey());
if (regionLocation.getRegionInfo().getEndKey() != null)
newScan.setStopRow(regionLocation.getRegionInfo().getEndKey());
newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
newScan.setFilter(rangeList.toFilterList());
newScan.setAttribute(IndexConstants.MAX_SCAN_SCALE, Bytes.toBytes(1.0f));
newScan.setId(rawScan.getId());
newScan.setCacheBlocks(rawScan.getCacheBlocks());
newScan.setCaching(rawScan.getCaching());
return table.getScanner(newScan);
}
示例5: splitScans
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private Scan[] splitScans(Scan originalScan) throws IOException {
Scan[] scans = this.keyDistributor.getDistributedScans(originalScan);
for (int i = 0; i < scans.length; ++i) {
Scan scan = scans[i];
scan.setId(originalScan.getId() + "-" + i);
}
return scans;
}
示例6: doIndexScan
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private void doIndexScan() throws IOException {
ScanRange.ScanRangeList rangeList = new ScanRange.ScanRangeList();
FilterList filterList = new FilterList();
CompareFilter.CompareOp startOp = CompareFilter.CompareOp.GREATER_OR_EQUAL;
CompareFilter.CompareOp stopOp = CompareFilter.CompareOp.LESS_OR_EQUAL;
for (int i = 0; i < indexColumnNames.length && i < scanValues.length; i++) {
rangeList.addScanRange(new ScanRange(familyName, Bytes.toBytes(indexColumnNames[i]),
Bytes.toBytes(scanValues[i][0]), Bytes.toBytes(scanValues[i][1]), startOp, stopOp,
DataType.INT));
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), startOp,
Bytes.toBytes(scanValues[i][0])));
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), stopOp,
Bytes.toBytes(scanValues[i][1])));
}
Scan scan = new Scan();
scan.setFilter(filterList);
if (rangeList.getRanges().size() > 0) {
scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, rangeList.toBytesAttribute());
}
scan.setId("LMD-scan");
scan.setCaching(1);
ResultScanner scanner = BaseIndexScanner.getIndexScanner(conn, relation, scan);
Result result;
int count = 0;
while ((result = scanner.next()) != null) {
count++;
if (PRINT_RESULT) printResult(result);
}
scanner.close();
System.out.println("LMDIndex scan has " + count + " records");
}
示例7: initScanner
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
* init selected range and scanner
*
* @throws IOException
*/
private void initScanner() throws IOException {
ScanRange selectedRange = null;
int selectedRegionNumber = Integer.MAX_VALUE;
for (ScanRange range : rangeList.getRanges()) {
int cover = countCoveringRegions(conn,
relation.getIndexTableName(range.getFamily(), range.getQualifier()), range.getStart(),
range.getStop());
LOG.info("LCDBG, " + cover + " regions are covered by range " + range);
if (selectedRegionNumber > cover) {
selectedRegionNumber = cover;
selectedRange = range;
}
}
LOG.info("LCDBG, GC Scanner using range " + selectedRange + " with " + selectedRegionNumber
+ " regions for scan id= " + rawScan.getId());
indexFamily = selectedRange.getFamily();
indexQualifier = selectedRange.getQualifier();
List<ScanRange> list = new ArrayList<>(rangeList.getRanges());
list.remove(selectedRange);
Scan scan = new Scan();
scan.setStartRow(selectedRange.getStart());
scan.setStopRow(selectedRange.getStop());
scan.setFamilyMap(rawScan.getFamilyMap());
scan.setCaching(rawScan.getCaching());
scan.setCacheBlocks(rawScan.getCacheBlocks());
scan.setId(rawScan.getId());
scan.setFilter(new ScanRange.ScanRangeList(list).toFilterList());
Table table = conn.getTable(
relation.getIndexTableName(selectedRange.getFamily(), selectedRange.getQualifier()));
scanner = table.getScanner(scan);
}
示例8: runOneTime
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ScanResult runOneTime(IndexType indexType, String fileName, Configuration conf,
int cacheSize, AbstractWorkload workload) throws IOException {
ScanRange.ScanRangeList rangeList = ScanRange.ScanRangeList.getScanRangeList(fileName);
System.out.println("scan range in file " + fileName + " is: " + rangeList);
DITBScanBase scanner;
if (indexType == IndexType.MDIndex) {
scanner = new DITBMDIndexScanner(workload.getTableName(), indexType, workload);
} else {
scanner = new DITBNormalScanner(workload.getTableName(), indexType, workload);
}
Scan scan = ScanRange.ScanRangeList.getScan(fileName);
scan.setCacheBlocks(false);
scan.setCaching(cacheSize);
scan.setId(fileName);
System.out.println("scan filter " + scan.getFilter());
// ScanResult scanResult = new ScanResult(fileName + ": " + rangeList);
ScanResult scanResult = new ScanResult();
int reportCount = 0;
Result[] results;
long timeStart = System.currentTimeMillis();
ResultScanner resultScanner = scanner.getScanner(scan);
try {
while (true) {
results = resultScanner.next(cacheSize);
if (scanResult.firstLatency == -1)
scanResult.firstLatency = System.currentTimeMillis() - timeStart;
if (results == null || results.length == 0) break;
if (PRINT_RESULT) {
for (Result result : results)
System.out.println(workload.parseResult(result));
}
scanResult.nbResults += results.length;
reportCount += results.length;
if (reportCount >= reportInterval) {
System.out.println(
"finish scan for " + results.length + " records, " + scanResult.nbResults
+ " in total");
reportCount = 0;
}
}
} catch (IOException e) {
e.printStackTrace();
}
scanResult.totalTime = System.currentTimeMillis() - timeStart;
if (scanResult.nbResults == 0) scanResult.nbResults = 1;
return scanResult;
}
示例9: createRowkeyQueueBySecondaryIndex
import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
* scan all index tables, common rowkeys will be saved in rowkeySet
* can be optimized in 2 ways:
* 1. scan index tables by the order of #CandidateRowkeys, similar to CCIndex
* 2. scan index tables in parallel
*
* @throws IOException
*/
public static Queue<byte[]> createRowkeyQueueBySecondaryIndex(Connection conn,
IndexTableRelation relation, Map<byte[], NavigableSet<byte[]>> familyMap,
ScanRange.ScanRangeList rangeList, Scan rawScan) throws IOException {
TreeSet<byte[]> rowkeySet = null;
long timeToMerge = 0;
for (ScanRange range : rangeList.getRanges()) {
Scan scan = new Scan();
scan.setStartRow(range.getStart());
scan.setStopRow(range.getStop());
scan.setFamilyMap(familyMap);
scan.setCaching(rawScan.getCaching());
scan.setCacheBlocks(rawScan.getCacheBlocks());
scan.setId(rawScan.getId());
if (range.getStartTs() != -1 && range.getStopTs() != -1) {
scan.setTimeRange(range.getStartTs(), range.getStopTs());
}
TableName tableName = relation.getIndexTableName(range.getFamily(), range.getQualifier());
Table table = conn.getTable(tableName);
ResultScanner scanner = table.getScanner(scan);
Result res;
long timeStart = System.currentTimeMillis();
TreeSet<byte[]> candidateSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
while ((res = scanner.next()) != null) {
candidateSet.add(IndexPutParser.parseIndexRowKey(res.getRow())[0]);
}
System.out.println(String
.format("get %d candidate rowkeys from %s in scan %s, cost %.2f seconds",
candidateSet.size(), range.toString(), scan.getId(),
(System.currentTimeMillis() - timeStart) / 1000.0));
if (rowkeySet == null) {
rowkeySet = candidateSet;
} else {
timeStart = System.currentTimeMillis();
rowkeySet = getCommonSet(rowkeySet, candidateSet);
timeToMerge += (System.currentTimeMillis() - timeStart);
}
System.out.println(
"common key set size " + rowkeySet.size() + " after " + range + " in scan " + scan
.getId());
if (rowkeySet.isEmpty()) { // no commons keys at all, can ignore the rest index tables
break;
}
}
System.out.println(String
.format("get %d result rowkeys in scan %s, cost %.2f seconds", rowkeySet.size(),
rawScan.getId(), timeToMerge / 1000.0));
if (rowkeySet != null && !rowkeySet.isEmpty()) {
Queue<byte[]> rowkeyQueue = new LinkedList<>();
for (byte[] rowkey : rowkeySet)
rowkeyQueue.add(rowkey);
return rowkeyQueue;
}
return null;
}