本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan类的典型用法代码示例。如果您正苦于以下问题:Java Scan类的具体用法?Java Scan怎么用?Java Scan使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Scan类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Scan类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: wipeOutMeta
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
protected void wipeOutMeta() throws IOException {
// Mess it up by blowing up meta.
Admin admin = TEST_UTIL.getHBaseAdmin();
Scan s = new Scan();
Table meta = new HTable(conf, TableName.META_TABLE_NAME);
ResultScanner scanner = meta.getScanner(s);
List<Delete> dels = new ArrayList<Delete>();
for (Result r : scanner) {
HRegionInfo info =
HRegionInfo.getHRegionInfo(r);
if(info != null && !info.getTable().getNamespaceAsString()
.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
Delete d = new Delete(r.getRow());
dels.add(d);
admin.unassign(r.getRow(), true);
}
}
meta.delete(dels);
scanner.close();
meta.close();
}
示例2: testRowCountWithInvalidRange2
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
/**
* This will test the row count with startrow = endrow and they will be
* non-null. The result should be 0, as it assumes a non-get query.
* @throws Throwable
*/
@Test (timeout=300000)
public void testRowCountWithInvalidRange2() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.setStartRow(ROWS[5]);
scan.setStopRow(ROWS[5]);
final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
new LongColumnInterpreter();
long rowCount = -1;
try {
rowCount = aClient.rowCount(TEST_TABLE, ci, scan);
} catch (Throwable e) {
rowCount = 0;
}
assertEquals(0, rowCount);
}
示例3: testVisibilityLabelsOnRSRestart
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
@Test(timeout = 60 * 1000)
public void testVisibilityLabelsOnRSRestart() throws Exception {
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster()
.getRegionServerThreads();
for (RegionServerThread rsThread : regionServerThreads) {
rsThread.getRegionServer().abort("Aborting ");
}
// Start one new RS
RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
waitForLabelsRegionAvailability(rs.getRegionServer());
try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL
+ ")", PRIVATE);) {
Scan s = new Scan();
s.setAuthorizations(new Authorizations(SECRET));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 1);
}
}
示例4: assertExpectedTable
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
/**
* Checks that all columns have the expected value and that there is the
* expected number of rows.
* @throws IOException
*/
void assertExpectedTable(final Connection connection, TableName table, int count, int value)
throws IOException {
HTableDescriptor [] htds = util.getHBaseAdmin().listTables(table.getNameAsString());
assertEquals(htds.length, 1);
Table t = null;
try {
t = connection.getTable(table);
Scan s = new Scan();
ResultScanner sr = t.getScanner(s);
int i = 0;
for (Result r : sr) {
i++;
for (NavigableMap<byte[], byte[]> nm : r.getNoVersionMap().values()) {
for (byte[] val : nm.values()) {
assertTrue(Bytes.equals(val, value(value)));
}
}
}
assertEquals(count, i);
} catch (IOException e) {
fail("Failed due to exception");
} finally {
if (t != null) t.close();
}
}
示例5: testSumWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
@Test(timeout = 300000)
public void testSumWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[2]);
final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci =
new DoubleColumnInterpreter();
Double sum = null;
try {
sum = aClient.sum(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, sum);// control should go to the catch block
}
示例6: createScanOnIFile
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
/**
* create scan on IFile
*/
private Scan createScanOnIFile(Scan rawScan, ScanRange primaryRange,
ScanRange.ScanRangeList allRangeList) throws IOException {
ScanRange.ScanRangeList copy = new ScanRange.ScanRangeList();
for (ScanRange r : allRangeList.getRanges()) {
if (r != primaryRange) {
copy.addScanRange(r);
}
}
Scan scan = new Scan();
scan.setStartRow(primaryRange.getStart());
scan.setStopRow(primaryRange.getStop());
if (!copy.getRanges().isEmpty()) {
scan.setFilter(copy.toFilterList());
}
scan.setCacheBlocks(false);
return scan;
}
示例7: testMultiRowRangeFilterWithoutRangeOverlap
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
@Test
public void testMultiRowRangeFilterWithoutRangeOverlap() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithoutRangeOverlap");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
LOG.info("found " + resultsSize + " results");
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(20), ht);
List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
List<Cell> results3 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht);
assertEquals(results1.size() + results2.size() + results3.size(), resultsSize);
ht.close();
}
示例8: countRows
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
private int countRows(final HRegion r) throws IOException {
int rowcount = 0;
InternalScanner scanner = r.getScanner(new Scan());
try {
List<Cell> kvs = new ArrayList<Cell>();
boolean hasNext = true;
while (hasNext) {
hasNext = scanner.next(kvs);
if (!kvs.isEmpty())
rowcount++;
}
} finally {
scanner.close();
}
return rowcount;
}
示例9: getRowOrBefore
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
public Result getRowOrBefore(Table table, byte[] row, byte[] family) throws IOException {
long start = System.currentTimeMillis();
Scan scan = new Scan();
scan.addFamily(family);
scan.setReversed(true);
scan.setStartRow(row);
scan.setCacheBlocks(false);
scan.setCaching(1);
scan.setSmall(true);
ResultScanner scanner = table.getScanner(scan);
Result ret = scanner.next();
scanner.close();
prevRowTotalTime += System.currentTimeMillis() - start;
prevRowTotalCount++;
return ret;
}
示例10: testSkipColumn
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
public void testSkipColumn() throws IOException {
List<KeyValueScanner> scanners = scanFixture(kvs);
StoreScanner scan = new StoreScanner(new Scan(), scanInfo, scanType,
getCols("a", "d"), scanners);
List<Cell> results = new ArrayList<Cell>();
assertEquals(true, scan.next(results));
assertEquals(2, results.size());
assertEquals(kvs[0], results.get(0));
assertEquals(kvs[3], results.get(1));
results.clear();
assertEquals(true, scan.next(results));
assertEquals(1, results.size());
assertEquals(kvs[kvs.length-1], results.get(0));
results.clear();
assertEquals(false, scan.next(results));
}
示例11: testStdWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
@Test (timeout=300000)
public void testStdWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[1]);
final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
new LongColumnInterpreter();
Double std = null;
try {
std = aClient.std(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, std);// control should go to the catch block
}
示例12: testGetScanner_WithNotOkFamilies
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
@Test
public void testGetScanner_WithNotOkFamilies() throws IOException {
byte[] fam1 = Bytes.toBytes("fam1");
byte[] fam2 = Bytes.toBytes("fam2");
byte[][] families = { fam1 };
// Setting up region
String method = this.getName();
this.region = initHRegion(tableName, method, CONF, families);
try {
Scan scan = new Scan();
scan.addFamily(fam2);
boolean ok = false;
try {
region.getScanner(scan);
} catch (Exception e) {
ok = true;
}
assertTrue("Families could not be found in Region", ok);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例13: testScanForSuperUserWithFewerLabelAuths
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
@Test
public void testScanForSuperUserWithFewerLabelAuths() throws Throwable {
String[] auths = { SECRET };
String user = "admin";
try (Connection conn = ConnectionFactory.createConnection(conf)) {
VisibilityClient.setAuths(conn, auths, user);
}
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL
+ "&!" + PRIVATE, SECRET + "&!" + PRIVATE);
PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Scan s = new Scan();
s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL));
try (Connection connection = ConnectionFactory.createConnection(conf);
Table t = connection.getTable(table.getName())) {
ResultScanner scanner = t.getScanner(s);
Result[] result = scanner.next(5);
assertTrue(result.length == 2);
}
return null;
}
};
SUPERUSER.runAs(scanAction);
}
示例14: countDeleteMarkers
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
private int countDeleteMarkers(Region region) throws IOException {
Scan s = new Scan();
s.setRaw(true);
// use max versions from the store(s)
s.setMaxVersions(region.getStores().iterator().next().getScanInfo().getMaxVersions());
InternalScanner scan = region.getScanner(s);
List<Cell> kvs = new ArrayList<Cell>();
int res = 0;
boolean hasMore;
do {
hasMore = scan.next(kvs);
for (Cell kv : kvs) {
if(CellUtil.isDelete(kv)) res++;
}
kvs.clear();
} while (hasMore);
scan.close();
return res;
}
示例15: ScanTask
import org.apache.hadoop.hbase.client.Scan; //导入依赖的package包/类
public ScanTask(ScanTaskConfig scanTaskConfig, Scan... scans) {
if (scanTaskConfig == null) {
throw new NullPointerException("scanTaskConfig must not be null");
}
if (scans == null) {
throw new NullPointerException("scans must not be null");
}
if (scans.length == 0) {
throw new IllegalArgumentException("scans must not be empty");
}
this.tableName = scanTaskConfig.getTableName();
this.tableFactory = scanTaskConfig.getTableFactory();
this.rowKeyDistributor = scanTaskConfig.getRowKeyDistributor();
this.scans = scans;
this.resultQueue = new ArrayBlockingQueue<>(scanTaskConfig.getScanTaskQueueSize());
}