本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.RegionScanner类的典型用法代码示例。如果您正苦于以下问题:Java RegionScanner类的具体用法?Java RegionScanner怎么用?Java RegionScanner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RegionScanner类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了RegionScanner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getExistingLabelsWithAuths
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
protected List<List<Cell>> getExistingLabelsWithAuths() throws IOException {
Scan scan = new Scan();
RegionScanner scanner = labelsRegion.getScanner(scan);
List<List<Cell>> existingLabels = new ArrayList<List<Cell>>();
try {
while (true) {
List<Cell> cells = new ArrayList<Cell>();
scanner.next(cells);
if (cells.isEmpty()) {
break;
}
existingLabels.add(cells);
}
} finally {
scanner.close();
}
return existingLabels;
}
示例2: testHBASE12817
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
@Test
public void testHBASE12817() throws IOException {
for (int i = 0; i < 100; i++) {
region
.put(new Put(Bytes.toBytes("obj" + (2900 + i))).addColumn(fam, qual1, Bytes.toBytes(i)));
}
region.put(new Put(Bytes.toBytes("obj299")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj29")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj2")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj3")).addColumn(fam, qual1, Bytes.toBytes("whatever")));
region.flush(true);
Scan scan = new Scan(Bytes.toBytes("obj29995"));
RegionScanner scanner = region.getScanner(scan);
List<Cell> cells = new ArrayList<Cell>();
assertFalse(scanner.next(cells));
assertArrayEquals(Bytes.toBytes("obj3"), Result.create(cells).getRow());
}
示例3: postScannerOpen
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
@Override
public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e,
Scan scan,
RegionScanner s) throws IOException {
byte[] byteTransaction = scan.getAttribute(CellUtils.TRANSACTION_ATTRIBUTE);
if (byteTransaction == null) {
return s;
}
TSOProto.Transaction transaction = TSOProto.Transaction.parseFrom(byteTransaction);
long id = transaction.getTimestamp();
long readTs = transaction.getReadTimestamp();
long epoch = transaction.getEpoch();
VisibilityLevel visibilityLevel = VisibilityLevel.fromInteger(transaction.getVisibilityLevel());
HBaseTransaction hbaseTransaction = new HBaseTransaction(id, readTs, visibilityLevel, epoch, new HashSet<HBaseCellId>(), null);
RegionAccessWrapper regionAccessWrapper = new RegionAccessWrapper(HBaseShims.getRegionCoprocessorRegion(e.getEnvironment()));
snapshotFilter.setTableAccessWrapper(regionAccessWrapper);
return new OmidRegionScanner(snapshotFilter, s, hbaseTransaction, 1);
}
示例4: testHBASE12817
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
@Test
public void testHBASE12817() throws IOException {
for (int i = 0; i < 100; i++) {
region.put(new Put(Bytes.toBytes("obj" + (2900 + i))).add(fam, qual1, Bytes.toBytes(i)));
}
region.put(new Put(Bytes.toBytes("obj299")).add(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj29")).add(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj2")).add(fam, qual1, Bytes.toBytes("whatever")));
region.put(new Put(Bytes.toBytes("obj3")).add(fam, qual1, Bytes.toBytes("whatever")));
region.flushcache();
Scan scan = new Scan(Bytes.toBytes("obj29995"));
RegionScanner scanner = region.getScanner(scan);
List<Cell> cells = new ArrayList<Cell>();
assertFalse(scanner.next(cells));
assertArrayEquals(Bytes.toBytes("obj3"), Result.create(cells).getRow());
}
示例5: preScannerOpen
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
RegionScanner s) throws IOException {
HRegion region = e.getEnvironment().getRegion();
Authorizations authorizations = null;
// If a super user issues a scan, he should be able to scan the cells
// irrespective of the Visibility labels
if (checkIfScanOrGetFromSuperUser()) {
return s;
}
try {
authorizations = scan.getAuthorizations();
} catch (DeserializationException de) {
throw new IOException(de);
}
Filter visibilityLabelFilter = createVisibilityLabelFilter(region, authorizations);
if (visibilityLabelFilter != null) {
Filter filter = scan.getFilter();
if (filter != null) {
scan.setFilter(new FilterList(filter, visibilityLabelFilter));
} else {
scan.setFilter(visibilityLabelFilter);
}
}
return s;
}
示例6: getExistingLabelsWithAuths
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
private List<List<Cell>> getExistingLabelsWithAuths() throws IOException {
Scan scan = new Scan();
RegionScanner scanner = this.regionEnv.getRegion().getScanner(scan);
List<List<Cell>> existingLabels = new ArrayList<List<Cell>>();
try {
while (true) {
List<Cell> cells = new ArrayList<Cell>();
scanner.next(cells);
if (cells.isEmpty()) {
break;
}
existingLabels.add(cells);
}
} finally {
scanner.close();
}
return existingLabels;
}
示例7: getUserAuthsFromLabelsTable
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
private List<String> getUserAuthsFromLabelsTable(byte[] user) throws IOException {
Scan s = new Scan();
s.addColumn(LABELS_TABLE_FAMILY, user);
Filter filter = createVisibilityLabelFilter(this.regionEnv.getRegion(), new Authorizations(
SYSTEM_LABEL));
s.setFilter(filter);
List<String> auths = new ArrayList<String>();
// We do ACL check here as we create scanner directly on region. It will not make calls to
// AccessController CP methods.
performACLCheck();
RegionScanner scanner = this.regionEnv.getRegion().getScanner(s);
List<Cell> results = new ArrayList<Cell>(1);
while (true) {
scanner.next(results);
if (results.isEmpty()) break;
Cell cell = results.get(0);
int ordinal = Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
String label = this.visibilityManager.getLabel(ordinal);
if (label != null) {
auths.add(label);
}
results.clear();
}
return auths;
}
示例8: preScannerOpen
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
@Override
public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> e,
final Scan scan, final RegionScanner s) throws IOException {
try {
Long themisStartTs = getStartTsFromAttribute(scan);
if (themisStartTs != null) {
ThemisCpUtil.prepareScan(scan, e.getEnvironment().getRegion().getTableDesc().getFamilies());
checkFamily(e.getEnvironment().getRegion(), scan);
ThemisProtocolImpl.checkReadTTL(System.currentTimeMillis(), themisStartTs,
PRE_SCANNER_OPEN_FEEK_ROW);
Scan internalScan = ThemisCpUtil.constructLockAndWriteScan(scan, themisStartTs);
ThemisServerScanner pScanner = new ThemisServerScanner(e.getEnvironment().getRegion()
.getScanner(internalScan), internalScan, themisStartTs, scan);
e.bypass();
return pScanner;
}
return s;
} catch (Throwable ex) {
throw new DoNotRetryIOException("themis exception in preScannerOpen", ex);
}
}
示例9: getExistingLabelsWithAuths
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
protected List<List<Cell>> getExistingLabelsWithAuths() throws IOException {
Scan scan = new Scan();
RegionScanner scanner = labelsRegion.getScanner(scan);
List<List<Cell>> existingLabels = new ArrayList<>();
try {
while (true) {
List<Cell> cells = new ArrayList<>();
scanner.next(cells);
if (cells.isEmpty()) {
break;
}
existingLabels.add(cells);
}
} finally {
scanner.close();
}
return existingLabels;
}
示例10: initiateScan
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
private void initiateScan(HRegion region) throws IOException {
Scan scan = new Scan();
scan.setCaching(1);
RegionScanner resScanner = null;
try {
resScanner = region.getScanner(scan);
List<Cell> results = new ArrayList<>();
boolean next = resScanner.next(results);
try {
counter.incrementAndGet();
latch.await();
} catch (InterruptedException e) {
}
while (next) {
next = resScanner.next(results);
}
} finally {
scanCompletedCounter.incrementAndGet();
resScanner.close();
}
}
示例11: scanWithCoprocessorIfBeneficial
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
public static ResultScanner scanWithCoprocessorIfBeneficial(CubeSegment segment, Cuboid cuboid, TupleFilter tupleFiler, //
Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context, HTableInterface table, Scan scan) throws IOException {
if (context.isCoprocessorEnabled() == false) {
return table.getScanner(scan);
}
CoprocessorRowType type = CoprocessorRowType.fromCuboid(segment, cuboid);
CoprocessorFilter filter = CoprocessorFilter.fromFilter(segment, tupleFiler);
CoprocessorProjector projector = CoprocessorProjector.makeForObserver(segment, cuboid, groupBy);
ObserverAggregators aggrs = ObserverAggregators.fromValueDecoders(rowValueDecoders);
if (DEBUG_LOCAL_COPROCESSOR) {
RegionScanner innerScanner = new RegionScannerAdapter(table.getScanner(scan));
AggregationScanner aggrScanner = new AggregationScanner(type, filter, projector, aggrs, innerScanner);
return new ResultScannerAdapter(aggrScanner);
} else {
scan.setAttribute(AggregateRegionObserver.COPROCESSOR_ENABLE, new byte[] { 0x01 });
scan.setAttribute(AggregateRegionObserver.TYPE, CoprocessorRowType.serialize(type));
scan.setAttribute(AggregateRegionObserver.PROJECTOR, CoprocessorProjector.serialize(projector));
scan.setAttribute(AggregateRegionObserver.AGGREGATORS, ObserverAggregators.serialize(aggrs));
scan.setAttribute(AggregateRegionObserver.FILTER, CoprocessorFilter.serialize(filter));
return table.getScanner(scan);
}
}
示例12: postScannerOpen
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
@Override
public final RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> ctxt, final Scan scan, final RegionScanner innerScanner) throws IOException {
boolean copAbortOnError = ctxt.getEnvironment().getConfiguration().getBoolean(RegionCoprocessorHost.ABORT_ON_ERROR_KEY, RegionCoprocessorHost.DEFAULT_ABORT_ON_ERROR);
// never throw out exception that could abort region server
if (copAbortOnError) {
try {
return doPostScannerObserver(ctxt, scan, innerScanner);
} catch (Throwable e) {
LOG.error("Kylin Coprocessor Error", e);
return innerScanner;
}
} else {
return doPostScannerObserver(ctxt, scan, innerScanner);
}
}
示例13: testRegionScanner
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
/**
* Test region scanner.
*
* @throws IOException Signals that an I/O exception has occurred.
*/
public void testRegionScanner() throws IOException
{
LOG.info("Test Region scanner");
Scan scan = new Scan();
scan.setStartRow(region.getStartKey());
scan.setStopRow(region.getEndKey());
RegionScanner scanner = region.getScanner(scan);
//Store store = region.getStore(CF);
//StoreScanner scanner = new StoreScanner(store, store.getScanInfo(), scan, null);
long start = System.currentTimeMillis();
int total = 0;
List<Cell> result = new ArrayList<Cell>();
while(scanner.next(result)){
total++; result.clear();
}
LOG.info("Test Region scanner finished. Found "+total +" in "+(System.currentTimeMillis() - start)+"ms");
LOG.info("cache hits ="+cache.getStats().getHitCount()+" miss="+cache.getStats().getMissCount());
}
示例14: updateScanner
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
/**
* refresh underlying RegionScanner we call this when new store file gets
* created by MemStore flushes or current scanner fails due to compaction
*/
public void updateScanner() throws IOException {
if (LOG.isDebugEnabled()) {
SpliceLogUtils.debug(LOG,
"updateScanner with hregionInfo=%s, tableName=%s, rootDir=%s, scan=%s",
hri, htd.getNameAsString(), rootDir, scan);
}
if (flushed) {
if (LOG.isDebugEnabled())
SpliceLogUtils.debug(LOG, "Flush occurred");
if (this.topCell != null) {
if (LOG.isDebugEnabled())
SpliceLogUtils.debug(LOG, "setting start row to %s", topCell);
//noinspection deprecation
scan.setStartRow(Bytes.add(topCell.getRow(), new byte[]{0}));
}
}
memScannerList.add(getMemStoreScanner());
this.region = openHRegion();
RegionScanner regionScanner = new CountingRegionScanner(BaseHRegionUtil.getScanner(region, scan, memScannerList), region, scan);
if (flushed) {
if (scanner != null)
scanner.close();
}
scanner = regionScanner;
}
示例15: preScannerOpen
import org.apache.hadoop.hbase.regionserver.RegionScanner; //导入依赖的package包/类
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
RegionScanner s) throws IOException {
if (!initialized) {
throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!");
}
// Nothing to do if authorization is not enabled
if (!authorizationEnabled) {
return s;
}
Region region = e.getEnvironment().getRegion();
Authorizations authorizations = null;
try {
authorizations = scan.getAuthorizations();
} catch (DeserializationException de) {
throw new IOException(de);
}
if (authorizations == null) {
// No Authorizations present for this scan/Get!
// In case of system tables other than "labels" just scan with out visibility check and
// filtering. Checking visibility labels for META and NAMESPACE table is not needed.
TableName table = region.getRegionInfo().getTable();
if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
return s;
}
}
Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region,
authorizations);
if (visibilityLabelFilter != null) {
Filter filter = scan.getFilter();
if (filter != null) {
scan.setFilter(new FilterList(filter, visibilityLabelFilter));
} else {
scan.setFilter(visibilityLabelFilter);
}
}
return s;
}