本文整理汇总了Java中org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter类的典型用法代码示例。如果您正苦于以下问题:Java SingleColumnValueExcludeFilter类的具体用法?Java SingleColumnValueExcludeFilter怎么用?Java SingleColumnValueExcludeFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SingleColumnValueExcludeFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了SingleColumnValueExcludeFilter类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: makeExcludeMatchColumnFilter
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
private RowFilter makeExcludeMatchColumnFilter(
Scan scan, SingleColumnValueExcludeFilter filter) {
String family = Bytes.toString(scan.getFamilies()[0]);
ByteString qualifier = ByteString.copyFrom(filter.getQualifier());
return RowFilter.newBuilder()
.setInterleave(
Interleave.newBuilder()
.addFilters(
RowFilter.newBuilder()
.setColumnRangeFilter(
ColumnRange.newBuilder()
.setFamilyName(family)
.setEndQualifierExclusive(qualifier)))
.addFilters(
RowFilter.newBuilder()
.setColumnRangeFilter(
ColumnRange.newBuilder()
.setFamilyName(family)
.setStartQualifierExclusive(qualifier))))
.build();
}
示例2: adapt
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
@Override
public RowFilter adapt(FilterAdapterContext context, SingleColumnValueExcludeFilter filter)
throws IOException {
RowFilter excludeMatchColumnFilter =
makeExcludeMatchColumnFilter(context.getScan(), filter);
return RowFilter.newBuilder()
.setChain(
Chain.newBuilder()
.addFilters(delegateAdapter.adapt(context, filter))
.addFilters(excludeMatchColumnFilter))
.build();
}
示例3: isFilterSupported
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
@Override
public FilterSupportStatus isFilterSupported(
FilterAdapterContext context, SingleColumnValueExcludeFilter filter) {
FilterSupportStatus delegateStatus = delegateAdapter.isFilterSupported(context, filter);
if (!delegateStatus.isSupported()) {
return delegateStatus;
}
// This filter can only be adapted when there's a single family.
if (context.getScan().numFamilies() != 1) {
return UNSUPPORTED_STATUS;
}
return FilterSupportStatus.SUPPORTED;
}
示例4: testSingleColumnValueExcludeFilter
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
@Test
public void testSingleColumnValueExcludeFilter() throws IOException {
byte[] rowKey1 = dataHelper.randomData("scvfrk1");
byte[] qualifier1 = dataHelper.randomData("scvfq1");
byte[] qualifier2 = dataHelper.randomData("scvfq2");
byte[] value1_1 = dataHelper.randomData("val1.1");
byte[] value2_1 = dataHelper.randomData("val2.1");
Table table = getConnection().getTable(TABLE_NAME);
Put put = new Put(rowKey1);
put.addColumn(COLUMN_FAMILY, qualifier1, value1_1);
put.addColumn(COLUMN_FAMILY, qualifier2, value2_1);
table.put(put);
Scan scan = new Scan();
scan.addFamily(COLUMN_FAMILY);
SingleColumnValueExcludeFilter excludeFilter =
new SingleColumnValueExcludeFilter(COLUMN_FAMILY, qualifier1, CompareOp.EQUAL, value1_1);
excludeFilter.setFilterIfMissing(true);
excludeFilter.setLatestVersionOnly(false);
scan.setFilter(excludeFilter);
ResultScanner scanner = table.getScanner(scan);
Result[] results = scanner.next(10);
// Expect 1 row with value2_1 in qualifier2:
Assert.assertEquals(1, results.length);
Result result = results[0];
Assert.assertEquals(1, result.size());
Assert.assertTrue(result.containsColumn(COLUMN_FAMILY, qualifier2));
Assert.assertFalse(result.containsColumn(COLUMN_FAMILY, qualifier1));
Assert.assertArrayEquals(
value2_1,
CellUtil.cloneValue(result.getColumnLatestCell(COLUMN_FAMILY, qualifier2)));
}
示例5: testScanner_JoinedScanners
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
/**
* Added for HBASE-5416
*
* Here we test scan optimization when only subset of CFs are used in filter
* conditions.
*/
public void testScanner_JoinedScanners() throws IOException {
byte [] tableName = Bytes.toBytes("testTable");
byte [] cf_essential = Bytes.toBytes("essential");
byte [] cf_joined = Bytes.toBytes("joined");
byte [] cf_alpha = Bytes.toBytes("alpha");
this.region = initHRegion(tableName, getName(), conf, cf_essential, cf_joined, cf_alpha);
try {
byte [] row1 = Bytes.toBytes("row1");
byte [] row2 = Bytes.toBytes("row2");
byte [] row3 = Bytes.toBytes("row3");
byte [] col_normal = Bytes.toBytes("d");
byte [] col_alpha = Bytes.toBytes("a");
byte [] filtered_val = Bytes.toBytes(3);
Put put = new Put(row1);
put.add(cf_essential, col_normal, Bytes.toBytes(1));
put.add(cf_joined, col_alpha, Bytes.toBytes(1));
region.put(put);
put = new Put(row2);
put.add(cf_essential, col_alpha, Bytes.toBytes(2));
put.add(cf_joined, col_normal, Bytes.toBytes(2));
put.add(cf_alpha, col_alpha, Bytes.toBytes(2));
region.put(put);
put = new Put(row3);
put.add(cf_essential, col_normal, filtered_val);
put.add(cf_joined, col_normal, filtered_val);
region.put(put);
// Check two things:
// 1. result list contains expected values
// 2. result list is sorted properly
Scan scan = new Scan();
Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal,
CompareOp.NOT_EQUAL, filtered_val);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(true);
InternalScanner s = region.getScanner(scan);
List<KeyValue> results = new ArrayList<KeyValue>();
assertTrue(s.next(results));
assertEquals(results.size(), 1);
results.clear();
assertTrue(s.next(results));
assertEquals(results.size(), 3);
assertTrue("orderCheck", results.get(0).matchingFamily(cf_alpha));
assertTrue("orderCheck", results.get(1).matchingFamily(cf_essential));
assertTrue("orderCheck", results.get(2).matchingFamily(cf_joined));
results.clear();
assertFalse(s.next(results));
assertEquals(results.size(), 0);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例6: buildAdapter
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
/**
* Create a new FilterAdapter
*/
public static FilterAdapter buildAdapter() {
FilterAdapter adapter = new FilterAdapter();
adapter.addFilterAdapter(
ColumnPrefixFilter.class, new ColumnPrefixFilterAdapter());
adapter.addFilterAdapter(
ColumnRangeFilter.class, new ColumnRangeFilterAdapter());
adapter.addFilterAdapter(
KeyOnlyFilter.class, new KeyOnlyFilterAdapter());
adapter.addFilterAdapter(
MultipleColumnPrefixFilter.class, new MultipleColumnPrefixFilterAdapter());
adapter.addFilterAdapter(
TimestampsFilter.class, new TimestampsFilterAdapter());
ValueFilterAdapter valueFilterAdapter = new ValueFilterAdapter();
adapter.addFilterAdapter(
ValueFilter.class, valueFilterAdapter);
SingleColumnValueFilterAdapter scvfa =
new SingleColumnValueFilterAdapter(valueFilterAdapter);
adapter.addFilterAdapter(
SingleColumnValueFilter.class, scvfa);
adapter.addFilterAdapter(
SingleColumnValueExcludeFilter.class,
new SingleColumnValueExcludeFilterAdapter(scvfa));
adapter.addFilterAdapter(
ColumnPaginationFilter.class, new ColumnPaginationFilterAdapter());
adapter.addFilterAdapter(
FirstKeyOnlyFilter.class, new FirstKeyOnlyFilterAdapter());
adapter.addFilterAdapter(
ColumnCountGetFilter.class, new ColumnCountGetFilterAdapter());
adapter.addFilterAdapter(
RandomRowFilter.class, new RandomRowFilterAdapter());
adapter.addFilterAdapter(
PrefixFilter.class, new PrefixFilterAdapter());
adapter.addFilterAdapter(
QualifierFilter.class, new QualifierFilterAdapter());
// Passing the FilterAdapter in to the FilterListAdapter is a bit
// unfortunate, but makes adapting the FilterList's subfilters simpler.
FilterListAdapter filterListAdapter = new FilterListAdapter(adapter);
// FilterList implements UnsupportedStatusCollector so it should
// be used when possible (third parameter to addFilterAdapter()).
adapter.addFilterAdapter(
FilterList.class, filterListAdapter, filterListAdapter);
return adapter;
}
示例7: testScanner_JoinedScanners
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
/**
* Added for HBASE-5416
*
* Here we test scan optimization when only subset of CFs are used in filter
* conditions.
*/
@Test
public void testScanner_JoinedScanners() throws IOException {
byte[] cf_essential = Bytes.toBytes("essential");
byte[] cf_joined = Bytes.toBytes("joined");
byte[] cf_alpha = Bytes.toBytes("alpha");
this.region = initHRegion(tableName, getName(), CONF, cf_essential, cf_joined, cf_alpha);
try {
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
byte[] row3 = Bytes.toBytes("row3");
byte[] col_normal = Bytes.toBytes("d");
byte[] col_alpha = Bytes.toBytes("a");
byte[] filtered_val = Bytes.toBytes(3);
Put put = new Put(row1);
put.add(cf_essential, col_normal, Bytes.toBytes(1));
put.add(cf_joined, col_alpha, Bytes.toBytes(1));
region.put(put);
put = new Put(row2);
put.add(cf_essential, col_alpha, Bytes.toBytes(2));
put.add(cf_joined, col_normal, Bytes.toBytes(2));
put.add(cf_alpha, col_alpha, Bytes.toBytes(2));
region.put(put);
put = new Put(row3);
put.add(cf_essential, col_normal, filtered_val);
put.add(cf_joined, col_normal, filtered_val);
region.put(put);
// Check two things:
// 1. result list contains expected values
// 2. result list is sorted properly
Scan scan = new Scan();
Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal,
CompareOp.NOT_EQUAL, filtered_val);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(true);
InternalScanner s = region.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
assertTrue(s.next(results));
assertEquals(results.size(), 1);
results.clear();
assertTrue(s.next(results));
assertEquals(results.size(), 3);
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined));
results.clear();
assertFalse(s.next(results));
assertEquals(results.size(), 0);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例8: testScanner_JoinedScanners
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
/**
* Added for HBASE-5416
*
* Here we test scan optimization when only subset of CFs are used in filter
* conditions.
*/
@Test
public void testScanner_JoinedScanners() throws IOException {
byte[] cf_essential = Bytes.toBytes("essential");
byte[] cf_joined = Bytes.toBytes("joined");
byte[] cf_alpha = Bytes.toBytes("alpha");
this.region = initHRegion(tableName, getName(), CONF, cf_essential, cf_joined, cf_alpha);
try {
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
byte[] row3 = Bytes.toBytes("row3");
byte[] col_normal = Bytes.toBytes("d");
byte[] col_alpha = Bytes.toBytes("a");
byte[] filtered_val = Bytes.toBytes(3);
Put put = new Put(row1);
put.add(cf_essential, col_normal, Bytes.toBytes(1));
put.add(cf_joined, col_alpha, Bytes.toBytes(1));
region.put(put);
put = new Put(row2);
put.add(cf_essential, col_alpha, Bytes.toBytes(2));
put.add(cf_joined, col_normal, Bytes.toBytes(2));
put.add(cf_alpha, col_alpha, Bytes.toBytes(2));
region.put(put);
put = new Put(row3);
put.add(cf_essential, col_normal, filtered_val);
put.add(cf_joined, col_normal, filtered_val);
region.put(put);
// Check two things:
// 1. result list contains expected values
// 2. result list is sorted properly
Scan scan = new Scan();
Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal,
CompareOp.NOT_EQUAL, filtered_val);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(true);
InternalScanner s = region.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
assertTrue(s.next(results));
assertEquals(results.size(), 1);
results.clear();
assertTrue(s.next(results));
assertEquals(results.size(), 3);
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined));
results.clear();
assertFalse(s.next(results));
assertEquals(results.size(), 0);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例9: testScanner_JoinedScanners
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
/**
* Added for HBASE-5416
*
* Here we test scan optimization when only subset of CFs are used in filter
* conditions.
*/
@Test
public void testScanner_JoinedScanners() throws IOException {
byte[] cf_essential = Bytes.toBytes("essential");
byte[] cf_joined = Bytes.toBytes("joined");
byte[] cf_alpha = Bytes.toBytes("alpha");
this.region = initHRegion(tableName, method, CONF, cf_essential, cf_joined, cf_alpha);
try {
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
byte[] row3 = Bytes.toBytes("row3");
byte[] col_normal = Bytes.toBytes("d");
byte[] col_alpha = Bytes.toBytes("a");
byte[] filtered_val = Bytes.toBytes(3);
Put put = new Put(row1);
put.addColumn(cf_essential, col_normal, Bytes.toBytes(1));
put.addColumn(cf_joined, col_alpha, Bytes.toBytes(1));
region.put(put);
put = new Put(row2);
put.addColumn(cf_essential, col_alpha, Bytes.toBytes(2));
put.addColumn(cf_joined, col_normal, Bytes.toBytes(2));
put.addColumn(cf_alpha, col_alpha, Bytes.toBytes(2));
region.put(put);
put = new Put(row3);
put.addColumn(cf_essential, col_normal, filtered_val);
put.addColumn(cf_joined, col_normal, filtered_val);
region.put(put);
// Check two things:
// 1. result list contains expected values
// 2. result list is sorted properly
Scan scan = new Scan();
Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal,
CompareOp.NOT_EQUAL, filtered_val);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(true);
InternalScanner s = region.getScanner(scan);
List<Cell> results = new ArrayList<>();
assertTrue(s.next(results));
assertEquals(1, results.size());
results.clear();
assertTrue(s.next(results));
assertEquals(3, results.size());
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined));
results.clear();
assertFalse(s.next(results));
assertEquals(0, results.size());
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}
示例10: testScanner_JoinedScanners
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; //导入依赖的package包/类
/**
* Added for HBASE-5416
*
* Here we test scan optimization when only subset of CFs are used in filter
* conditions.
*/
@Test
public void testScanner_JoinedScanners() throws IOException {
byte[] cf_essential = Bytes.toBytes("essential");
byte[] cf_joined = Bytes.toBytes("joined");
byte[] cf_alpha = Bytes.toBytes("alpha");
this.region = initHRegion(tableName, getName(), conf, cf_essential, cf_joined, cf_alpha);
try {
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
byte[] row3 = Bytes.toBytes("row3");
byte[] col_normal = Bytes.toBytes("d");
byte[] col_alpha = Bytes.toBytes("a");
byte[] filtered_val = Bytes.toBytes(3);
Put put = new Put(row1);
put.add(cf_essential, col_normal, Bytes.toBytes(1));
put.add(cf_joined, col_alpha, Bytes.toBytes(1));
region.put(put);
put = new Put(row2);
put.add(cf_essential, col_alpha, Bytes.toBytes(2));
put.add(cf_joined, col_normal, Bytes.toBytes(2));
put.add(cf_alpha, col_alpha, Bytes.toBytes(2));
region.put(put);
put = new Put(row3);
put.add(cf_essential, col_normal, filtered_val);
put.add(cf_joined, col_normal, filtered_val);
region.put(put);
// Check two things:
// 1. result list contains expected values
// 2. result list is sorted properly
Scan scan = new Scan();
Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal,
CompareOp.NOT_EQUAL, filtered_val);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(true);
InternalScanner s = region.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
assertTrue(s.next(results));
assertEquals(results.size(), 1);
results.clear();
assertTrue(s.next(results));
assertEquals(results.size(), 3);
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential));
assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined));
results.clear();
assertFalse(s.next(results));
assertEquals(results.size(), 0);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}