本文整理汇总了Java中org.apache.hadoop.hbase.filter.PageFilter类的典型用法代码示例。如果您正苦于以下问题:Java PageFilter类的具体用法?Java PageFilter怎么用?Java PageFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
PageFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了PageFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRow
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
scan.setCaching(opts.caching);
FilterList list = new FilterList();
if (opts.addColumns) {
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
} else {
scan.addFamily(FAMILY_NAME);
}
if (opts.filterAll) {
list.addFilter(new FilterAllFilter());
}
list.addFilter(new WhileMatchFilter(new PageFilter(120)));
scan.setFilter(list);
ResultScanner s = this.table.getScanner(scan);
for (Result rr; (rr = s.next()) != null;) {
updateValueSize(rr);
}
s.close();
}
示例2: getVertexIndexScanWithLimit
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
private Scan getVertexIndexScanWithLimit(String label, boolean isUnique, String key, Object from, int limit, boolean reversed) {
byte[] prefix = serializeForRead(label, isUnique, key, null);
byte[] startRow = from != null
? serializeForRead(label, isUnique, key, from)
: prefix;
byte[] stopRow = HConstants.EMPTY_END_ROW;
if (graph.configuration().getInstanceType() == HBaseGraphConfiguration.InstanceType.BIGTABLE) {
if (reversed) {
throw new UnsupportedOperationException("Reverse scans not supported by Bigtable");
} else {
// PrefixFilter in Bigtable does not automatically stop
// See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/issues/1087
stopRow = HBaseGraphUtils.incrementBytes(prefix);
}
}
if (reversed) startRow = HBaseGraphUtils.incrementBytes(startRow);
Scan scan = new Scan(startRow, stopRow);
FilterList filterList = new FilterList();
filterList.addFilter(new PrefixFilter(prefix));
filterList.addFilter(new PageFilter(limit));
scan.setFilter(filterList);
scan.setReversed(reversed);
return scan;
}
示例3: testRow
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
FilterList list = new FilterList();
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
if (opts.filterAll) {
list.addFilter(new FilterAllFilter());
}
list.addFilter(new WhileMatchFilter(new PageFilter(120)));
scan.setFilter(list);
ResultScanner s = this.table.getScanner(scan);
for (Result rr; (rr = s.next()) != null;) {
updateValueSize(rr);
}
s.close();
}
示例4: testRow
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows))
.setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType);
FilterList list = new FilterList();
if (opts.addColumns) {
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
} else {
scan.addFamily(FAMILY_NAME);
}
if (opts.filterAll) {
list.addFilter(new FilterAllFilter());
}
list.addFilter(new WhileMatchFilter(new PageFilter(120)));
scan.setFilter(list);
ResultScanner s = this.table.getScanner(scan);
for (Result rr; (rr = s.next()) != null;) {
updateValueSize(rr);
}
s.close();
}
示例5: generateScan
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
/**
* Helper method that generates a scan object for
* use in a query
* @param theQuery the query to generate a scan for
* @param theTableMap the table the query is against
* @return the generated scan
*/
private <T> Scan generateScan( Query<T> theQuery, HBaseTableMap theTableMap ) {
Scan scan = new Scan( );
for( Filter filter : theQuery.getFilters( ) ) {
if( filter instanceof RowFilter ) {
RowFilter<?> rowFilter = ( RowFilter<?> )filter;
HBaseKeyMap keyMap = theTableMap.getKey();
if( rowFilter.getLimit() > 0 ) {
scan.setFilter( new PageFilter( rowFilter.getLimit( ) ) );
}
if( rowFilter.getEndKey() != null ) {
scan.setStopRow( ( byte[] )keyMap.getKeyTranslator().translate( rowFilter.getEndKey( ) ) );
}
if( rowFilter.getStartKey() != null ) {
scan.setStartRow( ( byte[] )keyMap.getKeyTranslator().translate( rowFilter.getStartKey( ) ) );
}
}
}
return scan;
}
示例6: testRow
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
ResultScanner s = this.table.getScanner(scan);
s.close();
}
示例7: getFilter
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
/**
* 获取过滤器。
*
* @return 过滤器。
*/
public synchronized Filter getFilter() {
if (pageSize > 0) {
addFilter(new PageFilter(pageSize));
pageSize = 0L;
}
return filters;
}
示例8: edges
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
public Iterator<Edge> edges(Object fromId, int limit) {
final EdgeReader parser = new EdgeReader(graph);
Scan scan = fromId != null ? new Scan(ValueUtils.serializeWithSalt(fromId)) : new Scan();
scan.setFilter(new PageFilter(limit));
ResultScanner scanner = null;
try {
scanner = table.getScanner(scan);
return CloseableIteratorUtils.limit(HBaseGraphUtils.mapWithCloseAtEnd(scanner, parser::parse), limit);
} catch (IOException e) {
throw new HBaseGraphException(e);
}
}
示例9: vertices
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
public Iterator<Vertex> vertices(Object fromId, int limit) {
final VertexReader parser = new VertexReader(graph);
Scan scan = fromId != null ? new Scan(ValueUtils.serializeWithSalt(fromId)) : new Scan();
scan.setFilter(new PageFilter(limit));
ResultScanner scanner = null;
try {
scanner = table.getScanner(scan);
return CloseableIteratorUtils.limit(HBaseGraphUtils.mapWithCloseAtEnd(scanner, parser::parse), limit);
} catch (IOException e) {
throw new HBaseGraphException(e);
}
}
示例10: testRow
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
ResultScanner s = this.table.getScanner(scan);
for (Result rr; (rr = s.next()) != null;) ;
s.close();
}
示例11: testRow
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
ResultScanner s = this.table.getScanner(scan);
//int count = 0;
for (Result rr = null; (rr = s.next()) != null;) {
// LOG.info("" + count++ + " " + rr.toString());
}
s.close();
}
示例12: ScanRows
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
/**
* Specifies a range of rows to retrieve based on a starting row key
* and retrieves up to limit rows. Each row is passed to the supplied
* DataScanner.
*/
public void ScanRows(String startDate, String symbol,
int limit, DataScanner scanner) throws IOException {
ResultScanner results = null;
try (Connection conn = ConnectionFactory.createConnection(config)){
// Get the table
Table table = conn.getTable(TableName.valueOf(TABLE_NAME));
// Create the scan
Scan scan = new Scan();
// start at a specific rowkey.
scan.setStartRow(makeKey(startDate, symbol));
// Tell the server not to cache more than limit rows
// since we won;t need them
scan.setCaching(limit);
// Can also set a server side filter
scan.setFilter(new PageFilter(limit));
// Get the scan results
results = table.getScanner(scan);
// Iterate over the scan results and break at the limit
int count = 0;
for ( Result r : results ) {
scanner.ProcessRow(r);
if ( count++ >= limit ) break;
}
}
finally {
// ResultScanner must be closed.
if ( results != null ) results.close();
}
}
示例13: setFilter
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
public HbaseScanManager setFilter(Filter filter) {
if (filter instanceof PageFilter && isPageLimitExceeded((PageFilter) filter)) {
throw new IllegalArgumentException("Page size limit it to big, should be smaller than: "
+ MAX_DATA_PER_SCAN);
}
scan.setFilter(filter);
return this;
}
示例14: find
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
public List<User> find(String startRow, long pageSize) {
Scan scan = new Scan();
startRow = startRow == null ? "" : startRow;
scan.setStartRow(Bytes.toBytes(startRow));
PageFilter filter = new PageFilter(pageSize);
// TODO order and sort
// scan.setStartRow(startRow).setMaxResultSize(pageSize);
scan.setFilter(filter);
List<User> userList = find(User.TB_NAME, scan, getRowMapper(User.CF_KEY, type));
return userList;
}
示例15: testRow
import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
FilterList list = new FilterList();
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
if (opts.filterAll) {
list.addFilter(new FilterAllFilter());
}
list.addFilter(new WhileMatchFilter(new PageFilter(120)));
scan.setFilter(list);
ResultScanner s = this.table.getScanner(scan);
for (Result rr; (rr = s.next()) != null;) ;
s.close();
}