本文整理汇总了Java中org.apache.hadoop.hbase.filter.RandomRowFilter类的典型用法代码示例。如果您正苦于以下问题:Java RandomRowFilter类的具体用法?Java RandomRowFilter怎么用?Java RandomRowFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RandomRowFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了RandomRowFilter类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testNoPartialResultsWhenRowFilterPresent
import org.apache.hadoop.hbase.filter.RandomRowFilter; //导入依赖的package包/类
/**
* When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
* true, the scanner should not return partial results. The scanner cannot return partial results
* because the entire row needs to be read for the include/exclude decision to be made
*/
@Test
public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
Scan scan = new Scan();
scan.setMaxResultSize(1);
scan.setAllowPartialResults(true);
// If a filter hasFilter() is true then partial results should not be returned else filter
// application server side would break.
scan.setFilter(new RandomRowFilter(1.0f));
ResultScanner scanner = TABLE.getScanner(scan);
Result r = null;
while ((r = scanner.next()) != null) {
assertFalse(r.isPartial());
}
scanner.close();
}
示例2: testRandomRowFilter
import org.apache.hadoop.hbase.filter.RandomRowFilter; //导入依赖的package包/类
@Test
public void testRandomRowFilter() throws IOException {
byte[][] rowKeys = dataHelper.randomData("trandA", 100);
byte[] qualifier = dataHelper.randomData("trandq-");
byte[] value = dataHelper.randomData("value-");
Table table = getConnection().getTable(TABLE_NAME);
List<Put> puts = new ArrayList<>();
for (byte[] rowKey : rowKeys) {
Put put = new Put(rowKey);
put.addColumn(COLUMN_FAMILY, qualifier, value);
puts.add(put);
}
table.put(puts);
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes("trandA"));
scan.setStopRow(Bytes.toBytes("trandB"));
RandomRowFilter filter = new RandomRowFilter(0.5f);
scan.setFilter(filter);
ResultScanner scanner = table.getScanner(scan);
Result[] results = scanner.next(100);
Assert.assertTrue(
String.format("Using p=0.5, expected half of added rows, found %s", results.length),
25 <= results.length && results.length <= 75);
}
示例3: testNoPartialResultsWhenRowFilterPresent
import org.apache.hadoop.hbase.filter.RandomRowFilter; //导入依赖的package包/类
/**
* When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
* true, the scanner should not return partial results. The scanner cannot return partial results
* because the entire row needs to be read for the include/exclude decision to be made
*/
@Test
public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
Scan scan = new Scan();
scan.setMaxResultSize(1);
scan.setAllowPartialResults(true);
// If a filter hasFilter() is true then partial results should not be returned else filter
// application server side would break.
scan.setFilter(new RandomRowFilter(1.0f));
ResultScanner scanner = TABLE.getScanner(scan);
Result r = null;
while ((r = scanner.next()) != null) {
assertFalse(r.mayHaveMoreCellsInRow());
}
scanner.close();
}
示例4: adapt
import org.apache.hadoop.hbase.filter.RandomRowFilter; //导入依赖的package包/类
@Override
public RowFilter adapt(FilterAdapterContext context, RandomRowFilter filter)
throws IOException {
return RowFilter.newBuilder()
.setRowSampleFilter(filter.getChance())
.build();
}
示例5: buildAdapter
import org.apache.hadoop.hbase.filter.RandomRowFilter; //导入依赖的package包/类
/**
* Create a new FilterAdapter
*/
public static FilterAdapter buildAdapter() {
FilterAdapter adapter = new FilterAdapter();
adapter.addFilterAdapter(
ColumnPrefixFilter.class, new ColumnPrefixFilterAdapter());
adapter.addFilterAdapter(
ColumnRangeFilter.class, new ColumnRangeFilterAdapter());
adapter.addFilterAdapter(
KeyOnlyFilter.class, new KeyOnlyFilterAdapter());
adapter.addFilterAdapter(
MultipleColumnPrefixFilter.class, new MultipleColumnPrefixFilterAdapter());
adapter.addFilterAdapter(
TimestampsFilter.class, new TimestampsFilterAdapter());
ValueFilterAdapter valueFilterAdapter = new ValueFilterAdapter();
adapter.addFilterAdapter(
ValueFilter.class, valueFilterAdapter);
SingleColumnValueFilterAdapter scvfa =
new SingleColumnValueFilterAdapter(valueFilterAdapter);
adapter.addFilterAdapter(
SingleColumnValueFilter.class, scvfa);
adapter.addFilterAdapter(
SingleColumnValueExcludeFilter.class,
new SingleColumnValueExcludeFilterAdapter(scvfa));
adapter.addFilterAdapter(
ColumnPaginationFilter.class, new ColumnPaginationFilterAdapter());
adapter.addFilterAdapter(
FirstKeyOnlyFilter.class, new FirstKeyOnlyFilterAdapter());
adapter.addFilterAdapter(
ColumnCountGetFilter.class, new ColumnCountGetFilterAdapter());
adapter.addFilterAdapter(
RandomRowFilter.class, new RandomRowFilterAdapter());
adapter.addFilterAdapter(
PrefixFilter.class, new PrefixFilterAdapter());
adapter.addFilterAdapter(
QualifierFilter.class, new QualifierFilterAdapter());
// Passing the FilterAdapter in to the FilterListAdapter is a bit
// unfortunate, but makes adapting the FilterList's subfilters simpler.
FilterListAdapter filterListAdapter = new FilterListAdapter(adapter);
// FilterList implements UnsupportedStatusCollector so it should
// be used when possible (third parameter to addFilterAdapter()).
adapter.addFilterAdapter(
FilterList.class, filterListAdapter, filterListAdapter);
return adapter;
}
示例6: isFilterSupported
import org.apache.hadoop.hbase.filter.RandomRowFilter; //导入依赖的package包/类
@Override
public FilterSupportStatus isFilterSupported(
FilterAdapterContext context, RandomRowFilter filter) {
return FilterSupportStatus.SUPPORTED;
}