本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.setMaxVersions方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.setMaxVersions方法的具体用法?Java Scan.setMaxVersions怎么用?Java Scan.setMaxVersions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.setMaxVersions方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAll
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* 获取单张表的所有记录
*
* @throws IOException
*/
public static Map<byte[], byte[]> getAll(String TableName, String ColumnFamily, String ColumnName)
throws IOException {
Map<byte[], byte[]> tableContent = new HashMap<byte[], byte[]>();
Scan s = new Scan();
s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
s.setMaxVersions(1);
s.setCacheBlocks(false);
ResultScanner rs = hbase_table.getScanner(s);
for (Result r : rs) {
byte[] key = r.getRow();
byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
tableContent.put(key, value);
}
rs.close();
return tableContent;
}
示例2: initScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
Scan initScan() throws IOException {
Scan scan = new Scan();
scan.setCacheBlocks(false);
if (startTime != 0 || endTime != 0) {
scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
}
if (scanBatch > 0) {
scan.setBatch(scanBatch);
}
if (versions >= 0) {
scan.setMaxVersions(versions);
}
if (!isTableStartRow(startRow)) {
scan.setStartRow(startRow);
}
if (!isTableEndRow(stopRow)) {
scan.setStopRow(stopRow);
}
if(families != null) {
for(String fam : families.split(",")) {
scan.addFamily(Bytes.toBytes(fam));
}
}
return scan;
}
示例3: countDeleteMarkers
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private int countDeleteMarkers(Region region) throws IOException {
Scan s = new Scan();
s.setRaw(true);
// use max versions from the store(s)
s.setMaxVersions(region.getStores().iterator().next().getScanInfo().getMaxVersions());
InternalScanner scan = region.getScanner(s);
List<Cell> kvs = new ArrayList<Cell>();
int res = 0;
boolean hasMore;
do {
hasMore = scan.next(kvs);
for (Cell kv : kvs) {
if(CellUtil.isDelete(kv)) res++;
}
kvs.clear();
} while (hasMore);
scan.close();
return res;
}
示例4: testMultiRowRangeFilterWithEmptyStartRow
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testMultiRowRangeFilterWithEmptyStartRow() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithEmptyStartRow");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false));
ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
List<Cell> results1 = getScanResult(Bytes.toBytes(""), Bytes.toBytes(10), ht);
List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
assertEquals(results1.size() + results2.size(), resultsSize);
ht.close();
}
示例5: testMultiRowRangeFilterWithExclusive
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testMultiRowRangeFilterWithExclusive() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithExclusive");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false));
ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
LOG.info("found " + resultsSize + " results");
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
List<Cell> results2 = getScanResult(Bytes.toBytes(65), Bytes.toBytes(75), ht);
assertEquals((results1.size() - 1) + results2.size(), resultsSize);
ht.close();
}
示例6: testExpectedValuesOfPartialResults
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testExpectedValuesOfPartialResults(boolean reversed) throws Exception {
Scan partialScan = new Scan();
partialScan.setMaxVersions();
// Max result size of 1 ensures that each RPC request will return a single cell. The scanner
// will need to reconstruct the results into a complete result before returning to the caller
partialScan.setMaxResultSize(1);
partialScan.setReversed(reversed);
ResultScanner partialScanner = TABLE.getScanner(partialScan);
final int startRow = reversed ? ROWS.length - 1 : 0;
final int endRow = reversed ? -1 : ROWS.length;
final int loopDelta = reversed ? -1 : 1;
String message;
for (int row = startRow; row != endRow; row = row + loopDelta) {
message = "Ensuring the expected keyValues are present for row " + row;
List<Cell> expectedKeyValues = createKeyValuesForRow(ROWS[row], FAMILIES, QUALIFIERS, VALUE);
Result result = partialScanner.next();
assertFalse(result.isPartial());
verifyResult(result, expectedKeyValues, message);
}
partialScanner.close();
}
示例7: doAction
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
protected boolean doAction() throws Exception {
ResultScanner rs = null;
try {
Scan s = new Scan();
s.setBatch(2);
s.addFamily(FAMILY);
s.setFilter(new KeyOnlyFilter());
s.setMaxVersions(1);
rs = table.getScanner(s);
Result result = rs.next();
return result != null && result.size() > 0;
} finally {
if (rs != null) {
rs.close();
}
}
}
示例8: testMultiRowRangeFilterWithEmptyStopRow
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testMultiRowRangeFilterWithEmptyStopRow() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithEmptyStopRow");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false));
ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(""), ht);
assertEquals(results1.size(), resultsSize);
ht.close();
}
示例9: _testBlocksScanned
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
Region r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration());
addContent(r, FAMILY, COL);
r.flush(true);
CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
long before = stats.getHitCount() + stats.getMissCount();
// Do simple test of getting one row only first.
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
scan.addColumn(FAMILY, COL);
scan.setMaxVersions(1);
InternalScanner s = r.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
while (s.next(results))
;
s.close();
int expectResultSize = 'z' - 'a';
assertEquals(expectResultSize, results.size());
int kvPerBlock = (int) Math.ceil(BLOCK_SIZE /
(double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
Assert.assertEquals(2, kvPerBlock);
long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
long expectIndexBlockRead = expectDataBlockRead;
assertEquals(expectIndexBlockRead+expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
示例10: testMultiRowRangeFilterWithRangeOverlap
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testMultiRowRangeFilterWithRangeOverlap() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithRangeOverlap");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
ranges.add(new RowRange(Bytes.toBytes(15), true, Bytes.toBytes(40), false));
ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false));
ranges.add(new RowRange(Bytes.toBytes(60), true, null, false));
ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(80), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
LOG.info("found " + resultsSize + " results");
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
List<Cell> results2 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(""), ht);
assertEquals(results1.size() + results2.size(), resultsSize);
ht.close();
}
示例11: testMultiRowRangeFilterWithInclusive
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testMultiRowRangeFilterWithInclusive() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithInclusive");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
ranges.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(40), false));
ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false));
ranges.add(new RowRange(Bytes.toBytes(60), true, null, false));
ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(80), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
LOG.info("found " + resultsSize + " results");
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
List<Cell> results2 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(""), ht);
assertEquals(results1.size() + results2.size(), resultsSize);
ht.close();
}
示例12: testWideScanBatching
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testWideScanBatching() throws IOException {
final int batch = 256;
try {
this.r = createNewHRegion(TESTTABLEDESC, null, null);
int inserted = addWideContent(this.r);
List<Cell> results = new ArrayList<Cell>();
Scan scan = new Scan();
scan.addFamily(A);
scan.addFamily(B);
scan.addFamily(C);
scan.setMaxVersions(100);
scan.setBatch(batch);
InternalScanner s = r.getScanner(scan);
int total = 0;
int i = 0;
boolean more;
do {
more = s.next(results);
i++;
LOG.info("iteration #" + i + ", results.size=" + results.size());
// assert that the result set is no larger
assertTrue(results.size() <= batch);
total += results.size();
if (results.size() > 0) {
// assert that all results are from the same row
byte[] row = CellUtil.cloneRow(results.get(0));
for (Cell kv: results) {
assertTrue(Bytes.equals(row, CellUtil.cloneRow(kv)));
}
}
results.clear();
// trigger ChangedReadersObservers
Iterator<KeyValueScanner> scanners =
((HRegion.RegionScannerImpl)s).storeHeap.getHeap().iterator();
while (scanners.hasNext()) {
StoreScanner ss = (StoreScanner)scanners.next();
ss.updateReaders();
}
} while (more);
// assert that the scanner returned all values
LOG.info("inserted " + inserted + ", scanned " + total);
assertEquals(total, inserted);
s.close();
} finally {
HRegion.closeHRegion(this.r);
}
}
示例13: testDeleteFamilySpecificTimeStampWithMulipleVersions
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testDeleteFamilySpecificTimeStampWithMulipleVersions() throws Exception {
setAuths();
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = doPuts(tableName)) {
TEST_UTIL.getHBaseAdmin().flush(tableName);
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|("
+ SECRET + "&" + TOPSECRET + ")"));
d.addFamily(fam, 126l);
table.delete(d);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(actiona);
TEST_UTIL.getHBaseAdmin().flush(tableName);
Scan s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(6);
assertTrue(next.length == 2);
CellScanner cellScanner = next[0].cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 127l);
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 125l);
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 123l);
cellScanner = next[1].cellScanner();
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row2, 0, row2.length));
}
}
示例14: testScanAfterCompaction
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testScanAfterCompaction() throws Exception {
setAuths();
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = doPuts(tableName)) {
TEST_UTIL.getHBaseAdmin().flush(tableName);
PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(tableName)) {
Delete d = new Delete(row1);
d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" +
SECRET + "&" + TOPSECRET+")"));
d.addFamily(fam, 126l);
table.delete(d);
} catch (Throwable t) {
throw new IOException(t);
}
return null;
}
};
SUPERUSER.runAs(actiona);
TEST_UTIL.getHBaseAdmin().flush(tableName);
Put put = new Put(Bytes.toBytes("row3"));
put.add(fam, qual, 127l, value);
put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "&" + PRIVATE));
table.put(put);
TEST_UTIL.getHBaseAdmin().flush(tableName);
TEST_UTIL.getHBaseAdmin().compact(tableName);
Thread.sleep(5000);
// Sleep to ensure compaction happens. Need to do it in a better way
Scan s = new Scan();
s.setMaxVersions(5);
s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 3);
CellScanner cellScanner = next[0].cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row1, 0, row1.length));
assertEquals(current.getTimestamp(), 127l);
cellScanner = next[1].cellScanner();
cellScanner.advance();
current = cellScanner.current();
assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
current.getRowLength(), row2, 0, row2.length));
}
}
示例15: createSubmittableJob
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Sets up the actual job.
*
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws java.io.IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
if (!doCommandLine(args)) {
return null;
}
if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
HConstants.REPLICATION_ENABLE_DEFAULT)) {
throw new IOException("Replication needs to be enabled to verify it.");
}
conf.set(NAME+".peerId", peerId);
conf.set(NAME+".tableName", tableName);
conf.setLong(NAME+".startTime", startTime);
conf.setLong(NAME+".endTime", endTime);
if (families != null) {
conf.set(NAME+".families", families);
}
Pair<ReplicationPeerConfig, Configuration> peerConfigPair = getPeerQuorumConfig(conf);
ReplicationPeerConfig peerConfig = peerConfigPair.getFirst();
String peerQuorumAddress = peerConfig.getClusterKey();
LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " +
peerConfig.getConfiguration());
conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX,
peerConfig.getConfiguration().entrySet());
conf.setInt(NAME + ".versions", versions);
LOG.info("Number of version: " + versions);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJarByClass(VerifyReplication.class);
Scan scan = new Scan();
scan.setTimeRange(startTime, endTime);
if (versions >= 0) {
scan.setMaxVersions(versions);
LOG.info("Number of versions set to " + versions);
}
if(families != null) {
String[] fams = families.split(",");
for(String fam : fams) {
scan.addFamily(Bytes.toBytes(fam));
}
}
TableMapReduceUtil.initTableMapperJob(tableName, scan,
Verifier.class, null, null, job);
Configuration peerClusterConf = peerConfigPair.getSecond();
// Obtain the auth token from peer cluster
TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);
job.setOutputFormatClass(NullOutputFormat.class);
job.setNumReduceTasks(0);
return job;
}