本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.setReversed方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.setReversed方法的具体用法?Java Scan.setReversed怎么用?Java Scan.setReversed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.setReversed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testWhileMatchFilterWithFilterRowKeyWithReverseScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testWhileMatchFilterWithFilterRowKeyWithReverseScan()
throws Exception {
Scan s = new Scan();
String prefix = "testRowOne";
WhileMatchFilter filter = new WhileMatchFilter(new PrefixFilter(
Bytes.toBytes(prefix)));
s.setFilter(filter);
s.setReversed(true);
InternalScanner scanner = this.region.getScanner(s);
while (true) {
ArrayList<Cell> values = new ArrayList<Cell>();
boolean isMoreResults = scanner.next(values);
if (!isMoreResults
|| !Bytes.toString(values.get(0).getRow()).startsWith(prefix)) {
Assert.assertTrue(
"The WhileMatchFilter should now filter all remaining",
filter.filterAllRemaining());
}
if (!isMoreResults) {
break;
}
}
scanner.close();
}
示例2: testExpectedValuesOfPartialResults
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testExpectedValuesOfPartialResults(boolean reversed) throws Exception {
Scan partialScan = new Scan();
partialScan.setMaxVersions();
// Max result size of 1 ensures that each RPC request will return a single cell. The scanner
// will need to reconstruct the results into a complete result before returning to the caller
partialScan.setMaxResultSize(1);
partialScan.setReversed(reversed);
ResultScanner partialScanner = TABLE.getScanner(partialScan);
final int startRow = reversed ? ROWS.length - 1 : 0;
final int endRow = reversed ? -1 : ROWS.length;
final int loopDelta = reversed ? -1 : 1;
String message;
for (int row = startRow; row != endRow; row = row + loopDelta) {
message = "Ensuring the expected keyValues are present for row " + row;
List<Cell> expectedKeyValues = createKeyValuesForRow(ROWS[row], FAMILIES, QUALIFIERS, VALUE);
Result result = partialScanner.next();
assertFalse(result.isPartial());
verifyResult(result, expectedKeyValues, message);
}
partialScanner.close();
}
示例3: getRowOrBefore
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public Result getRowOrBefore(Table table, byte[] row, byte[] family) throws IOException {
long start = System.currentTimeMillis();
Scan scan = new Scan();
scan.addFamily(family);
scan.setReversed(true);
scan.setStartRow(row);
scan.setCacheBlocks(false);
scan.setCaching(1);
scan.setSmall(true);
ResultScanner scanner = table.getScanner(scan);
Result ret = scanner.next();
scanner.close();
prevRowTotalTime += System.currentTimeMillis() - start;
prevRowTotalCount++;
return ret;
}
示例4: insertOneRecord
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override protected void insertOneRecord(AbstractDITBRecord record) throws IOException {
// note, MD-HBase get before update, we summarize the time in get seperately
MDPoint point = record.toMDPoint();
byte[] row = MDUtils.bitwiseZip(point.values, mdAdmin.getDimensions());
// get before row
long startTime = System.currentTimeMillis();
Scan scan = new Scan();
scan.addFamily(MDHBaseAdmin.BUCKET_FAMILY);
scan.setReversed(true);
scan.setStartRow(row);
scan.setCacheBlocks(false);
scan.setCaching(1);
scan.setSmall(true);
ResultScanner scanner = table.getScanner(scan);
Result result = scanner.next();
scanner.close();
gbuTime += System.currentTimeMillis() - startTime;
gbuCount++;
// default scan
if (result == null) {
row = mdAdmin.getBucketSuffixRow(point);
} else {
row = result.getRow();
}
table.incrementColumnValue(row, MDHBaseAdmin.BUCKET_FAMILY,
MDHBaseAdmin.BUCKET_SIZE_QUALIFIER, 1);
}
示例5: processGet
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override protected Result processGet(Table table, Get get) throws IOException {
Scan scan = new Scan();
scan.addFamily(MDHBaseAdmin.BUCKET_FAMILY);
scan.setReversed(true);
scan.setStartRow(get.getRow());
scan.setCacheBlocks(false);
scan.setCaching(1);
scan.setSmall(true);
ResultScanner scanner = table.getScanner(scan);
Result ret = scanner.next();
scanner.close();
return ret;
}
示例6: getRowOrBefore
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private Result getRowOrBefore(byte[] tableName, byte[] row, byte[] family) throws IOException {
Scan scan = new Scan(row);
scan.setReversed(true);
scan.addFamily(family);
scan.setStartRow(row);
Table table = getTable(tableName);
try (ResultScanner scanner = table.getScanner(scan)) {
return scanner.next();
} finally{
if(table != null){
table.close();
}
}
}
示例7: testPrefixFilterWithReverseScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testPrefixFilterWithReverseScan() throws Exception {
// Grab rows from group one (half of total)
long expectedRows = this.numRows / 2;
long expectedKeys = this.colsPerRow;
Scan s = new Scan();
s.setReversed(true);
s.setFilter(new PrefixFilter(Bytes.toBytes("testRowOne")));
verifyScan(s, expectedRows, expectedKeys);
}
示例8: testWhileMatchFilterWithFilterRowWithReverseScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testWhileMatchFilterWithFilterRowWithReverseScan()
throws Exception {
final int pageSize = 4;
Scan s = new Scan();
s.setReversed(true);
WhileMatchFilter filter = new WhileMatchFilter(new PageFilter(pageSize));
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
int scannerCounter = 0;
while (true) {
boolean isMoreResults = scanner.next(new ArrayList<Cell>());
scannerCounter++;
if (scannerCounter >= pageSize) {
Assert.assertTrue(
"The WhileMatchFilter should now filter all remaining",
filter.filterAllRemaining());
}
if (!isMoreResults) {
break;
}
}
scanner.close();
Assert.assertEquals("The page filter returned more rows than expected",
pageSize, scannerCounter);
}
示例9: testReverseScanWithoutPadding
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testReverseScanWithoutPadding() throws Exception {
byte[] row1 = Bytes.toBytes("a");
byte[] row2 = Bytes.toBytes("ab");
byte[] row3 = Bytes.toBytes("b");
Put put1 = new Put(row1);
put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put2 = new Put(row2);
put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put3 = new Put(row3);
put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
region.put(put1);
region.put(put2);
region.put(put3);
region.flush(true);
Scan scan = new Scan();
scan.setCacheBlocks(false);
scan.setReversed(true);
scan.setFilter(new FirstKeyOnlyFilter());
scan.addFamily(cfName);
RegionScanner scanner = region.getScanner(scan);
List<Cell> res = new ArrayList<Cell>();
int count = 1;
while (scanner.next(res)) {
count++;
}
assertEquals(Bytes.toString(res.get(0).getRowArray(), res.get(0).getRowOffset(), res.get(0)
.getRowLength()), "b");
assertEquals(Bytes.toString(res.get(1).getRowArray(), res.get(1).getRowOffset(), res.get(1)
.getRowLength()), "ab");
assertEquals(Bytes.toString(res.get(2).getRowArray(), res.get(2).getRowOffset(), res.get(2)
.getRowLength()), "a");
assertEquals(3, count);
}
示例10: testReverseScanWithPadding
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testReverseScanWithPadding() throws Exception {
byte[] terminator = new byte[] { -1 };
byte[] row1 = Bytes.add(invert(Bytes.toBytes("a")), terminator);
byte[] row2 = Bytes.add(invert(Bytes.toBytes("ab")), terminator);
byte[] row3 = Bytes.add(invert(Bytes.toBytes("b")), terminator);
Put put1 = new Put(row1);
put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put2 = new Put(row2);
put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put3 = new Put(row3);
put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
region.put(put1);
region.put(put2);
region.put(put3);
region.flush(true);
Scan scan = new Scan();
scan.setCacheBlocks(false);
scan.setReversed(true);
scan.setFilter(new FirstKeyOnlyFilter());
scan.addFamily(cfName);
RegionScanner scanner = region.getScanner(scan);
List<Cell> res = new ArrayList<Cell>();
int count = 1;
while (scanner.next(res)) {
count++;
}
assertEquals(3, count);
}
示例11: testOrderingOfCellsInPartialResults
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Order of cells in partial results matches the ordering of cells from complete results
* @throws Exception
*/
@Test
public void testOrderingOfCellsInPartialResults() throws Exception {
Scan scan = new Scan();
for (int col = 1; col <= NUM_COLS; col++) {
scan.setMaxResultSize(getResultSizeForNumberOfCells(col));
testOrderingOfCellsInPartialResults(scan);
// Test again with a reversed scanner
scan.setReversed(true);
testOrderingOfCellsInPartialResults(scan);
}
}
示例12: testExpectedNumberOfCellsPerPartialResult
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Setting the max result size allows us to control how many cells we expect to see on each call
* to next on the scanner. Test a variety of different sizes for correctness
* @throws Exception
*/
@Test
public void testExpectedNumberOfCellsPerPartialResult() throws Exception {
Scan scan = new Scan();
testExpectedNumberOfCellsPerPartialResult(scan);
scan.setReversed(true);
testExpectedNumberOfCellsPerPartialResult(scan);
}
示例13: testPartialResultsReassembly
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Test the method {@link Result#createCompleteResult(List)}
* @throws Exception
*/
@Test
public void testPartialResultsReassembly() throws Exception {
Scan scan = new Scan();
testPartialResultsReassembly(scan);
scan.setReversed(true);
testPartialResultsReassembly(scan);
}
示例14: testSmallScansDoNotAllowPartials
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Small scans should not return partial results because it would prevent small scans from
* retrieving all of the necessary results in a single RPC request which is what makese small
* scans useful. Thus, ensure that even when {@link Scan#getAllowPartialResults()} is true, small
* scans do not return partial results
* @throws Exception
*/
@Test
public void testSmallScansDoNotAllowPartials() throws Exception {
Scan scan = new Scan();
testSmallScansDoNotAllowPartials(scan);
scan.setReversed(true);
testSmallScansDoNotAllowPartials(scan);
}
示例15: testReversedPartialResultWhenRegionMove
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testReversedPartialResultWhenRegionMove() throws IOException {
Table table=createTestTable(TableName.valueOf("testReversedPartialResultWhenRegionMove"),
ROWS, FAMILIES, QUALIFIERS, VALUE);
moveRegion(table, 1);
Scan scan = new Scan();
scan.setMaxResultSize(1);
scan.setAllowPartialResults(true);
scan.setReversed(true);
ResultScanner scanner = table.getScanner(scan);
for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS-1; i++) {
scanner.next();
}
Result result1 = scanner.next();
assertEquals(1, result1.rawCells().length);
Cell c1 = result1.rawCells()[0];
assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]);
assertFalse(result1.isPartial());
moveRegion(table, 2);
Result result2 = scanner.next();
assertEquals(1, result2.rawCells().length);
Cell c2 = result2.rawCells()[0];
assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]);
assertTrue(result2.isPartial());
moveRegion(table, 3);
Result result3 = scanner.next();
assertEquals(1, result3.rawCells().length);
Cell c3 = result3.rawCells()[0];
assertCell(c3, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[1]);
assertTrue(result3.isPartial());
}