本文整理汇总了Java中org.apache.hadoop.hbase.filter.SubstringComparator类的典型用法代码示例。如果您正苦于以下问题:Java SubstringComparator类的具体用法?Java SubstringComparator怎么用?Java SubstringComparator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SubstringComparator类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了SubstringComparator类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ByteArrayComparableModel
import org.apache.hadoop.hbase.filter.SubstringComparator; //导入依赖的package包/类
public ByteArrayComparableModel(
ByteArrayComparable comparator) {
String typeName = comparator.getClass().getSimpleName();
ComparatorType type = ComparatorType.valueOf(typeName);
this.type = typeName;
switch (type) {
case BinaryComparator:
case BinaryPrefixComparator:
this.value = Base64.encodeBytes(comparator.getValue());
break;
case BitComparator:
this.value = Base64.encodeBytes(comparator.getValue());
this.op = ((BitComparator)comparator).getOperator().toString();
break;
case NullComparator:
break;
case RegexStringComparator:
case SubstringComparator:
this.value = Bytes.toString(comparator.getValue());
break;
default:
throw new RuntimeException("unhandled filter type: " + type);
}
}
示例2: identify
import org.apache.hadoop.hbase.filter.SubstringComparator; //导入依赖的package包/类
@Override
public User identify(String rowKey) {
Scan scan = new Scan();
Filter f = new RowFilter(CompareOp.EQUAL, new SubstringComparator(rowKey));
scan.setFilter(f);
List<User> userList = find(User.TB_NAME, scan, getRowMapper(User.CF_KEY, type));
return userList.get(0);
}
示例3: testFilterList
import org.apache.hadoop.hbase.filter.SubstringComparator; //导入依赖的package包/类
@Test
public void testFilterList() throws Exception {
// Test getting a single row, single key using Row, Qualifier, and Value
// regular expression and substring filters
// Use must pass all
List<Filter> filters = new ArrayList<Filter>();
filters.add(new RowFilter(CompareOp.EQUAL,
new RegexStringComparator(".+-2")));
filters.add(new QualifierFilter(CompareOp.EQUAL,
new RegexStringComparator(".+-2")));
filters.add(new ValueFilter(CompareOp.EQUAL,
new SubstringComparator("One")));
Filter f = new FilterList(Operator.MUST_PASS_ALL, filters);
Scan s = new Scan();
s.addFamily(FAMILIES[0]);
s.setFilter(f);
KeyValue [] kvs = {
new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0])
};
verifyScanFull(s, kvs);
// Test getting everything with a MUST_PASS_ONE filter including row, qf,
// val, regular expression and substring filters
filters.clear();
filters.add(new RowFilter(CompareOp.EQUAL,
new RegexStringComparator(".+Two.+")));
filters.add(new QualifierFilter(CompareOp.EQUAL,
new RegexStringComparator(".+-2")));
filters.add(new ValueFilter(CompareOp.EQUAL,
new SubstringComparator("One")));
f = new FilterList(Operator.MUST_PASS_ONE, filters);
s = new Scan();
s.setFilter(f);
verifyScanNoEarlyOut(s, numRows, colsPerRow);
}
示例4: testFilterList
import org.apache.hadoop.hbase.filter.SubstringComparator; //导入依赖的package包/类
@Test
public void testFilterList() throws Exception {
// Test getting a single row, single key using Row, Qualifier, and Value
// regular expression and substring filters
// Use must pass all
List<Filter> filters = new ArrayList<>(3);
filters.add(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(".+-2")));
filters.add(new QualifierFilter(CompareOperator.EQUAL,
new RegexStringComparator(".+-2")));
filters.add(new ValueFilter(CompareOperator.EQUAL,
new SubstringComparator("One")));
Filter f = new FilterList(Operator.MUST_PASS_ALL, filters);
Scan s = new Scan();
s.addFamily(FAMILIES[0]);
s.setFilter(f);
KeyValue [] kvs = {
new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0])
};
verifyScanFull(s, kvs);
// Test getting everything with a MUST_PASS_ONE filter including row, qf,
// val, regular expression and substring filters
filters.clear();
filters.add(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(".+Two.+")));
filters.add(new QualifierFilter(CompareOperator.EQUAL,
new RegexStringComparator(".+-2")));
filters.add(new ValueFilter(CompareOperator.EQUAL,
new SubstringComparator("One")));
f = new FilterList(Operator.MUST_PASS_ONE, filters);
s = new Scan();
s.setFilter(f);
verifyScanNoEarlyOut(s, numRows, colsPerRow);
}
示例5: testScanWithLimit
import org.apache.hadoop.hbase.filter.SubstringComparator; //导入依赖的package包/类
@Test
public void testScanWithLimit() {
int kv_number = 0;
try {
Scan scan = new Scan();
// set batch number as 2, which means each Result should contain 2 KVs at
// most
scan.setBatch(2);
SingleColumnValueFilter filter = new SingleColumnValueFilter(
Bytes.toBytes("f1"), Bytes.toBytes("c5"),
CompareFilter.CompareOp.EQUAL, new SubstringComparator("2_c5"));
// add filter after batch defined
scan.setFilter(filter);
HTable table = new HTable(conf, name);
ResultScanner scanner = table.getScanner(scan);
// Expect to get following row
// row2 => <f1:c1, 2_c1>, <f1:c2, 2_c2>,
// row2 => <f1:c3, 2_c3>, <f1:c4, 2_c4>,
// row2 => <f1:c5, 2_c5>
for (Result result : scanner) {
for (KeyValue kv : result.list()) {
kv_number++;
LOG.debug(kv_number + ". kv: " + kv);
}
}
scanner.close();
table.close();
} catch (Exception e) {
// no correct result is expected
assertNotNull("No IncompatibleFilterException catched", e);
}
LOG.debug("check the fetched kv number");
assertEquals("We should not get result(s) returned.", 0, kv_number);
}
示例6: testGetWithFilter
import org.apache.hadoop.hbase.filter.SubstringComparator; //导入依赖的package包/类
@Test
public void testGetWithFilter() throws IOException, InterruptedException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
byte[] col1 = Bytes.toBytes("col1");
byte[] value1 = Bytes.toBytes("value1");
byte[] value2 = Bytes.toBytes("value2");
final int maxVersions = 3;
HColumnDescriptor hcd = new HColumnDescriptor(fam1);
hcd.setMaxVersions(maxVersions);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testFilterAndColumnTracker"));
htd.addFamily(hcd);
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
final WAL wal = HBaseTestingUtility.createWal(TEST_UTIL.getConfiguration(), logDir, info);
this.region = TEST_UTIL.createLocalHRegion(info, htd, wal);
try {
// Put 4 version to memstore
long ts = 0;
Put put = new Put(row1, ts);
put.addColumn(fam1, col1, value1);
region.put(put);
put = new Put(row1, ts + 1);
put.addColumn(fam1, col1, Bytes.toBytes("filter1"));
region.put(put);
put = new Put(row1, ts + 2);
put.addColumn(fam1, col1, Bytes.toBytes("filter2"));
region.put(put);
put = new Put(row1, ts + 3);
put.addColumn(fam1, col1, value2);
region.put(put);
Get get = new Get(row1);
get.setMaxVersions();
Result res = region.get(get);
// Get 3 versions, the oldest version has gone from user view
assertEquals(maxVersions, res.size());
get.setFilter(new ValueFilter(CompareOp.EQUAL, new SubstringComparator("value")));
res = region.get(get);
// When use value filter, the oldest version should still gone from user view and it
// should only return one key vaule
assertEquals(1, res.size());
assertTrue(CellUtil.matchingValue(new KeyValue(row1, fam1, col1, value2), res.rawCells()[0]));
assertEquals(ts + 3, res.rawCells()[0].getTimestamp());
region.flush(true);
region.compact(true);
Thread.sleep(1000);
res = region.get(get);
// After flush and compact, the result should be consistent with previous result
assertEquals(1, res.size());
assertTrue(CellUtil.matchingValue(new KeyValue(row1, fam1, col1, value2), res.rawCells()[0]));
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}
示例7: testFilterWrapper
import org.apache.hadoop.hbase.filter.SubstringComparator; //导入依赖的package包/类
@Test
public void testFilterWrapper() {
int kv_number = 0;
int row_number = 0;
try {
Scan scan = new Scan();
List<Filter> fs = new ArrayList<Filter>();
DependentColumnFilter f1 = new DependentColumnFilter(Bytes.toBytes("f1"),
Bytes.toBytes("c5"), true, CompareFilter.CompareOp.EQUAL,
new SubstringComparator("c5"));
PageFilter f2 = new PageFilter(2);
fs.add(f1);
fs.add(f2);
FilterList filter = new FilterList(fs);
scan.setFilter(filter);
HTable table = new HTable(conf, name);
ResultScanner scanner = table.getScanner(scan);
// row2 (c1-c4) and row3(c1-c4) are returned
for (Result result : scanner) {
row_number++;
for (KeyValue kv : result.list()) {
LOG.debug(kv_number + ". kv: " + kv);
kv_number++;
assertEquals("Returned row is not correct", new String(kv.getRow()),
"row" + ( row_number + 1 ));
}
}
scanner.close();
table.close();
} catch (Exception e) {
// no correct result is expected
assertNull("Exception happens in scan", e);
}
LOG.debug("check the fetched kv number");
assertEquals("We should get 8 results returned.", 8, kv_number);
assertEquals("We should get 2 rows returned", 2, row_number);
}