当前位置: 首页>>代码示例>>Java>>正文


Java PageFilter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.filter.PageFilter的典型用法代码示例。如果您正苦于以下问题:Java PageFilter类的具体用法?Java PageFilter怎么用?Java PageFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


PageFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了PageFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRow

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  scan.setCaching(opts.caching);
  FilterList list = new FilterList();
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:PerformanceEvaluation.java

示例2: getVertexIndexScanWithLimit

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
private Scan getVertexIndexScanWithLimit(String label, boolean isUnique, String key, Object from, int limit, boolean reversed) {
    byte[] prefix = serializeForRead(label, isUnique, key, null);
    byte[] startRow = from != null
            ? serializeForRead(label, isUnique, key, from)
            : prefix;
    byte[] stopRow = HConstants.EMPTY_END_ROW;
    if (graph.configuration().getInstanceType() == HBaseGraphConfiguration.InstanceType.BIGTABLE) {
        if (reversed) {
            throw new UnsupportedOperationException("Reverse scans not supported by Bigtable");
        } else {
            // PrefixFilter in Bigtable does not automatically stop
            // See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/issues/1087
            stopRow = HBaseGraphUtils.incrementBytes(prefix);
        }
    }
    if (reversed) startRow = HBaseGraphUtils.incrementBytes(startRow);
    Scan scan = new Scan(startRow, stopRow);
    FilterList filterList = new FilterList();
    filterList.addFilter(new PrefixFilter(prefix));
    filterList.addFilter(new PageFilter(limit));
    scan.setFilter(filterList);
    scan.setReversed(reversed);
    return scan;
}
 
开发者ID:rayokota,项目名称:hgraphdb,代码行数:25,代码来源:VertexIndexModel.java

示例3: testRow

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  FilterList list = new FilterList();
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:17,代码来源:PerformanceEvaluation.java

示例4: testRow

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows))
      .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
      .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType);
  FilterList list = new FilterList();
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:PerformanceEvaluation.java

示例5: generateScan

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
/**
 * Helper method that generates a scan object for
 * use in a query
 * @param theQuery the query to generate a scan for
 * @param theTableMap the table the query is against
 * @return the generated scan
 */
private <T> Scan generateScan( Query<T> theQuery, HBaseTableMap theTableMap ) {
	Scan scan = new Scan( );
	
	for( Filter filter : theQuery.getFilters( ) ) {
		if( filter instanceof RowFilter ) {
			RowFilter<?> rowFilter = ( RowFilter<?> )filter;
			HBaseKeyMap keyMap = theTableMap.getKey();
			
			if( rowFilter.getLimit() > 0 ) {
				scan.setFilter( new PageFilter( rowFilter.getLimit( ) ) );
			}
			if( rowFilter.getEndKey() != null ) {
				scan.setStopRow( ( byte[] )keyMap.getKeyTranslator().translate( rowFilter.getEndKey( ) ) );
			} 
			if( rowFilter.getStartKey() != null ) {
				scan.setStartRow( ( byte[] )keyMap.getKeyTranslator().translate( rowFilter.getStartKey( ) ) );						
			}
		}
	}
	return scan;
}
 
开发者ID:Talvish,项目名称:Tales,代码行数:29,代码来源:HBaseDataContext.java

示例6: testRow

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
  ResultScanner s = this.table.getScanner(scan);
  s.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:PerformanceEvaluation.java

示例7: getFilter

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
/**
 * 获取过滤器。
 *
 * @return 过滤器。
 */
public synchronized Filter getFilter() {
    if (pageSize > 0) {
        addFilter(new PageFilter(pageSize));
        pageSize = 0L;
    }

    return filters;
}
 
开发者ID:heisedebaise,项目名称:tephra,代码行数:14,代码来源:HbaseQuery.java

示例8: edges

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
public Iterator<Edge> edges(Object fromId, int limit) {
    final EdgeReader parser = new EdgeReader(graph);

    Scan scan = fromId != null ? new Scan(ValueUtils.serializeWithSalt(fromId)) : new Scan();
    scan.setFilter(new PageFilter(limit));
    ResultScanner scanner = null;
    try {
        scanner = table.getScanner(scan);
        return CloseableIteratorUtils.limit(HBaseGraphUtils.mapWithCloseAtEnd(scanner, parser::parse), limit);
    } catch (IOException e) {
        throw new HBaseGraphException(e);
    }
}
 
开发者ID:rayokota,项目名称:hgraphdb,代码行数:14,代码来源:EdgeModel.java

示例9: vertices

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
public Iterator<Vertex> vertices(Object fromId, int limit) {
    final VertexReader parser = new VertexReader(graph);

    Scan scan = fromId != null ? new Scan(ValueUtils.serializeWithSalt(fromId)) : new Scan();
    scan.setFilter(new PageFilter(limit));
    ResultScanner scanner = null;
    try {
        scanner = table.getScanner(scan);
        return CloseableIteratorUtils.limit(HBaseGraphUtils.mapWithCloseAtEnd(scanner, parser::parse), limit);
    } catch (IOException e) {
        throw new HBaseGraphException(e);
    }
}
 
开发者ID:rayokota,项目名称:hgraphdb,代码行数:14,代码来源:VertexModel.java

示例10: testRow

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) ;
  s.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:10,代码来源:PerformanceEvaluation.java

示例11: testRow

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
  ResultScanner s = this.table.getScanner(scan);
  //int count = 0;
  for (Result rr = null; (rr = s.next()) != null;) {
    // LOG.info("" + count++ + " " + rr.toString());
  }
  s.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:13,代码来源:PerformanceEvaluation.java

示例12: ScanRows

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
/** 
 *    Specifies a range of rows to retrieve based on a starting row key
 *    and retrieves up to limit rows.  Each row is passed to the supplied
 *    DataScanner.
 */
public void ScanRows(String startDate, String symbol, 
         int limit, DataScanner scanner) throws IOException {
    ResultScanner results = null;
    try (Connection conn = ConnectionFactory.createConnection(config)){
        // Get the table
        Table table = conn.getTable(TableName.valueOf(TABLE_NAME));
        // Create the scan
        Scan scan = new Scan();
        // start at a specific rowkey. 
        scan.setStartRow(makeKey(startDate, symbol));
        // Tell the server not to cache more than limit rows 
        // since we won;t need them
        scan.setCaching(limit);
        // Can also set a server side filter
        scan.setFilter(new PageFilter(limit));
        // Get the scan results
        results = table.getScanner(scan);
        // Iterate over the scan results and break at the limit
        int count = 0;
        for ( Result r : results ) {
            scanner.ProcessRow(r);
            if ( count++ >= limit ) break;
        }
    }
    finally {
        // ResultScanner must be closed.
        if ( results != null ) results.close();         
    }
}
 
开发者ID:ggraham-412,项目名称:HBaseJavaExample,代码行数:35,代码来源:BarDatabase.java

示例13: setFilter

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
public HbaseScanManager setFilter(Filter filter) {
    if (filter instanceof PageFilter && isPageLimitExceeded((PageFilter) filter)) {
        throw new IllegalArgumentException("Page size limit it to big, should be smaller than: "
                + MAX_DATA_PER_SCAN);
    }

    scan.setFilter(filter);
    return this;
}
 
开发者ID:enableiot,项目名称:iotanalytics-backend,代码行数:10,代码来源:HbaseScanManager.java

示例14: find

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
public List<User> find(String startRow, long pageSize) {
	Scan scan = new Scan();
	startRow = startRow == null ? "" : startRow;
	scan.setStartRow(Bytes.toBytes(startRow));
	PageFilter filter = new PageFilter(pageSize);
	// TODO order and sort
	// scan.setStartRow(startRow).setMaxResultSize(pageSize);
	scan.setFilter(filter);
	List<User> userList = find(User.TB_NAME, scan, getRowMapper(User.CF_KEY, type));
	return userList;
}
 
开发者ID:geosmart,项目名称:me.demo.hadoop,代码行数:13,代码来源:UserDaoImpl.java

示例15: testRow

import org.apache.hadoop.hbase.filter.PageFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  FilterList list = new FilterList();
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) ;
  s.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:15,代码来源:PerformanceEvaluation.java


注:本文中的org.apache.hadoop.hbase.filter.PageFilter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。