本文整理汇总了Java中org.apache.hadoop.hbase.filter.FilterList.Operator类的典型用法代码示例。如果您正苦于以下问题:Java Operator类的具体用法?Java Operator怎么用?Java Operator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Operator类属于org.apache.hadoop.hbase.filter.FilterList包,在下文中一共展示了Operator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testScanWithFilter
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
@Test
public void testScanWithFilter() throws IOException {
prepareScanData(TRANSACTION_COLUMNS);
writeData(COLUMN, lastTs(prewriteTs), ANOTHER_VALUE);
ValueFilter valueFilter = new ValueFilter(CompareOp.EQUAL, new BinaryComparator(ANOTHER_VALUE));
PrefixFilter prefixFilter = new PrefixFilter(ANOTHER_ROW);
FilterList filterList = new FilterList();
filterList.addFilter(valueFilter);
filterList.addFilter(prefixFilter);
ThemisScanner scanner = prepareScanner(TRANSACTION_COLUMNS, filterList);
checkAndCloseScanner(scanner);
filterList = new FilterList(Operator.MUST_PASS_ONE);
filterList.addFilter(valueFilter);
filterList.addFilter(prefixFilter);
scanner = prepareScanner(TRANSACTION_COLUMNS, filterList);
checkScanRow(new ColumnCoordinate[]{COLUMN_WITH_ANOTHER_ROW}, scanner.next());
Assert.assertEquals(1, scanner.next().size());
checkAndCloseScanner(scanner);
}
示例2: generateIRIndexCandidateRows
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
private void generateIRIndexCandidateRows(Scan scan) throws IOException {
Set<ByteArray> indexHeap = new HashSet<ByteArray>(10000);
printIndexTree(this.indexTree);
indexHeap = readAndMergeIndex(indexTree, scan, indexHeap, Operator.MUST_PASS_ONE);
if (!indexHeap.isEmpty()) {
// here the values are already filtered!
candidateIRIndexRows = new ArrayDeque<byte[]>(indexHeap.size() + 1);
ByteArray[] heap = indexHeap.toArray(new ByteArray[indexHeap.size()]);
long sortStartTime = System.currentTimeMillis();
// sort here!
QuickSort.sort(heap, ByteArray.BAC);
this.indexSortTime = System.currentTimeMillis() - sortStartTime;
long mergeStartTime = System.currentTimeMillis();
byte[][] byteHeap = new byte[heap.length][];
for (int i = 0; i < heap.length; i++) {
byteHeap[i] = heap[i].getByteArray();
}
this.indexMergeTime = System.currentTimeMillis() - mergeStartTime;
for (byte[] tmp : byteHeap) {
candidateIRIndexRows.add(tmp);
}
} else {
LOG.info("LCDBG, index heap is empty");
}
}
示例3: testAddFilter
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
@Test
public void testAddFilter() throws Exception {
Filter filter1 = new FirstKeyOnlyFilter();
Filter filter2 = new FirstKeyOnlyFilter();
FilterList filterList = new FilterList(filter1, filter2);
filterList.addFilter(new FirstKeyOnlyFilter());
filterList = new FilterList(Arrays.asList(filter1, filter2));
filterList.addFilter(new FirstKeyOnlyFilter());
filterList = new FilterList(Operator.MUST_PASS_ALL, filter1, filter2);
filterList.addFilter(new FirstKeyOnlyFilter());
filterList = new FilterList(Operator.MUST_PASS_ALL, Arrays.asList(filter1, filter2));
filterList.addFilter(new FirstKeyOnlyFilter());
}
示例4: testFilterListWithInclusiveStopFilteMustPassOne
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
/**
* When we do a "MUST_PASS_ONE" (a logical 'OR') of the two filters
* we expect to get the same result as the inclusive stop result.
* @throws Exception
*/
public void testFilterListWithInclusiveStopFilteMustPassOne() throws Exception {
byte[] r1 = Bytes.toBytes("Row1");
byte[] r11 = Bytes.toBytes("Row11");
byte[] r2 = Bytes.toBytes("Row2");
FilterList flist = new FilterList(FilterList.Operator.MUST_PASS_ONE);
flist.addFilter(new AlwaysNextColFilter());
flist.addFilter(new InclusiveStopFilter(r1));
flist.filterRowKey(r1, 0, r1.length);
assertEquals(flist.filterKeyValue(new KeyValue(r1,r1,r1)), ReturnCode.INCLUDE);
assertEquals(flist.filterKeyValue(new KeyValue(r11,r11,r11)), ReturnCode.INCLUDE);
flist.reset();
flist.filterRowKey(r2, 0, r2.length);
assertEquals(flist.filterKeyValue(new KeyValue(r2,r2,r2)), ReturnCode.SKIP);
}
示例5: testSerialization
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
/**
* Test serialization
* @throws Exception
*/
@Test
public void testSerialization() throws Exception {
List<Filter> filters = new ArrayList<Filter>();
filters.add(new PageFilter(MAX_PAGES));
filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy"))));
Filter filterMPALL =
new FilterList(FilterList.Operator.MUST_PASS_ALL, filters);
// Decompose filterMPALL to bytes.
byte[] buffer = filterMPALL.toByteArray();
// Recompose filterMPALL.
FilterList newFilter = FilterList.parseFrom(buffer);
// Run tests
mpOneTest(ProtobufUtil.toFilter(ProtobufUtil.toFilter(getFilterMPONE())));
mpAllTest(ProtobufUtil.toFilter(ProtobufUtil.toFilter(getMPALLFilter())));
orderingTest(ProtobufUtil.toFilter(ProtobufUtil.toFilter(getOrderingFilter())));
}
示例6: testTransformMPO
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
/**
* Tests the behavior of transform() in a hierarchical filter.
*
* transform() only applies after a filterKeyValue() whose return-code includes the KeyValue.
* Lazy evaluation of AND
*/
@Test
public void testTransformMPO() throws Exception {
// Apply the following filter:
// (family=fam AND qualifier=qual1 AND KeyOnlyFilter)
// OR (family=fam AND qualifier=qual2)
final FilterList flist = new FilterList(Operator.MUST_PASS_ONE, Lists.<Filter>newArrayList(
new FilterList(Operator.MUST_PASS_ALL, Lists.<Filter>newArrayList(
new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))),
new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("qual1"))),
new KeyOnlyFilter())),
new FilterList(Operator.MUST_PASS_ALL, Lists.<Filter>newArrayList(
new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))),
new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("qual2")))))));
final KeyValue kvQual1 = new KeyValue(
Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual1"), Bytes.toBytes("value"));
final KeyValue kvQual2 = new KeyValue(
Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual2"), Bytes.toBytes("value"));
final KeyValue kvQual3 = new KeyValue(
Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual3"), Bytes.toBytes("value"));
// Value for fam:qual1 should be stripped:
assertEquals(Filter.ReturnCode.INCLUDE, flist.filterKeyValue(kvQual1));
final KeyValue transformedQual1 = KeyValueUtil.ensureKeyValue(flist.transform(kvQual1));
assertEquals(0, transformedQual1.getValue().length);
// Value for fam:qual2 should not be stripped:
assertEquals(Filter.ReturnCode.INCLUDE, flist.filterKeyValue(kvQual2));
final KeyValue transformedQual2 = KeyValueUtil.ensureKeyValue(flist.transform(kvQual2));
assertEquals("value", Bytes.toString(transformedQual2.getValue()));
// Other keys should be skipped:
assertEquals(Filter.ReturnCode.SKIP, flist.filterKeyValue(kvQual3));
}
示例7: getColumnValueFilters
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
private FilterList getColumnValueFilters(Row row) {
FilterList filterList = new FilterList(Operator.MUST_PASS_ALL);
Set<String> filterColumnNames = Sets.newHashSet(row.schema().fieldNames());
for (Map.Entry<String, ColumnDef> column : columns.entrySet()) {
if (!column.getValue().cf.equals("rowkey")) {
if (filterColumnNames.contains(column.getKey())) {
byte[] value = getColumnValueAsBytes(column.getValue().name, column.getValue().type, row);
if (value != null) {
SingleColumnValueFilter columnValueFilter = new SingleColumnValueFilter(
Bytes.toBytes(column.getValue().cf),
Bytes.toBytes(column.getValue().name),
CompareFilter.CompareOp.EQUAL,
value
);
filterList.addFilter(columnValueFilter);
}
}
}
}
return filterList;
}
示例8: generateCandidateRows
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
private void generateCandidateRows(Scan scan) throws IOException {
Set<ByteArray> indexHeap = new HashSet<ByteArray>(10000);
printIndexTree(this.indexTree);
indexHeap = readAndMergeIndex(indexTree, scan, indexHeap, Operator.MUST_PASS_ONE);
if (!indexHeap.isEmpty()) {
// here the values are already filtered!
candidateRows = new ArrayDeque<byte[]>(indexHeap.size() + 1);
ByteArray[] heap = indexHeap.toArray(new ByteArray[indexHeap.size()]);
long sortStartTime = System.currentTimeMillis();
// winter sort here!
QuickSort.sort(heap, ByteArray.BAC);
this.indexSortTime = System.currentTimeMillis() - sortStartTime;
long mergeStartTime = System.currentTimeMillis();
byte[][] byteHeap = new byte[heap.length][];
for (int i = 0; i < heap.length; i++) {
byteHeap[i] = heap[i].getByteArray();
}
this.indexMergeTime = System.currentTimeMillis() - mergeStartTime;
for (byte[] tmp : byteHeap) {
candidateRows.add(tmp);
}
}
}
示例9: testSerialization
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
/**
* Test serialization
* @throws Exception
*/
public void testSerialization() throws Exception {
List<Filter> filters = new ArrayList<Filter>();
filters.add(new PageFilter(MAX_PAGES));
filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy"))));
Filter filterMPALL =
new FilterList(FilterList.Operator.MUST_PASS_ALL, filters);
// Decompose filterMPALL to bytes.
ByteArrayOutputStream stream = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(stream);
filterMPALL.write(out);
out.close();
byte[] buffer = stream.toByteArray();
// Recompose filterMPALL.
DataInputStream in = new DataInputStream(new ByteArrayInputStream(buffer));
FilterList newFilter = new FilterList();
newFilter.readFields(in);
// TODO: Run TESTS!!!
}
示例10: handleFilterWithinAND
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
private Filter handleFilterWithinAND(Filter filter) {
if (filter instanceof FilterList) {
FilterList fList = (FilterList) filter;
if (fList.getOperator() == Operator.MUST_PASS_ONE) {
return new FilterGroupingWorker().group(fList);
} else {
List<Filter> filters = fList.getFilters();
for (Filter subFilter : filters) {
handleFilterWithinAND(subFilter);
}
}
} else if (filter instanceof SingleColumnValueFilter) {
handleScvf((SingleColumnValueFilter) filter);
} // TODO when we expose SingleColumnRangeFilter to handle that also here.
return null;
}
示例11: evalFilterForIndexSelection
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
FilterNode evalFilterForIndexSelection(Filter filter, List<IndexSpecification> indices) {
if (filter instanceof FilterList) {
FilterList fList = (FilterList) filter;
GroupingCondition condition =
(fList.getOperator() == Operator.MUST_PASS_ALL) ? GroupingCondition.AND
: GroupingCondition.OR;
NonLeafFilterNode nonLeafFilterNode = new NonLeafFilterNode(condition);
List<Filter> filters = fList.getFilters();
for (Filter fltr : filters) {
FilterNode node = evalFilterForIndexSelection(fltr, indices);
nonLeafFilterNode.addFilterNode(node);
}
return handleNonLeafFilterNode(nonLeafFilterNode);
} else if (filter instanceof SingleColumnValueFilter) {
// Check for the availability of index
return selectBestFitAndPossibleIndicesForSCVF(indices, (SingleColumnValueFilter) filter);
} else if (filter instanceof SingleColumnRangeFilter) {
return selectBestFitAndPossibleIndicesForSCRF(indices, (SingleColumnRangeFilter) filter);
}
return new NoIndexFilterNode();
}
示例12: testShouldRetrieveNegativeIntValue
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
@Test(timeout = 180000)
public void testShouldRetrieveNegativeIntValue() throws Exception {
Configuration conf = UTIL.getConfiguration();
String userTableName = "testShouldRetrieveNegativeIntValue";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
admin.createTable(ihtd);
HTable table = new HTable(conf, userTableName);
rangePutForIdx2WithInteger(table);
FilterList masterFilter = new FilterList(Operator.MUST_PASS_ALL);
SingleColumnValueFilter scvf =
new SingleColumnValueFilter("cf1".getBytes(), "c1".getBytes(), CompareOp.GREATER,
new IntComparator(Bytes.toBytes(-6)));
masterFilter.addFilter(scvf);
Scan scan = new Scan();
scan.setFilter(masterFilter);
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
result = scanner.next(1);
}
assertTrue(testRes.size() == 5);
}
示例13: constructFilterForContain
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
private static Filter constructFilterForContain(
HBaseColumnSchema hbaseColumnSchema, CompareOp compareOp,
List<Object> list, Operator operator) {
Util.checkNull(hbaseColumnSchema);
Util.checkNull(compareOp);
Util.checkNull(list);
Util.checkNull(operator);
List<Filter> filters = new ArrayList<Filter>();
for (Object obj : list) {
filters.add(constructFilter(hbaseColumnSchema, compareOp, obj));
}
FilterList filterList = new FilterList(operator, filters);
return filterList;
}
示例14: visitNotbetweenconstant
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
@Override
public Filter visitNotbetweenconstant(NotbetweenconstantContext ctx) {
CidContext cidContext = ctx.cid();
HBaseColumnSchema hbaseColumnSchema = ContextUtil
.parseHBaseColumnSchema(hbaseTableConfig, cidContext);
List<ConstantContext> constantContextList = ctx.constant();
List<Object> list = ContextUtil.parseConstantList(hbaseColumnSchema,
constantContextList, runtimeSetting);
Filter startFilter = constructFilter(hbaseColumnSchema, CompareOp.LESS,
list.get(0));
Filter endFilter = constructFilter(hbaseColumnSchema,
CompareOp.GREATER, list.get(1));
FilterList filterList = new FilterList(Operator.MUST_PASS_ONE,
Arrays.asList(startFilter, endFilter));
return filterList;
}
示例15: visitNotbetweenvar
import org.apache.hadoop.hbase.filter.FilterList.Operator; //导入依赖的package包/类
@Override
public Filter visitNotbetweenvar(NotbetweenvarContext ctx) {
CidContext cidContext = ctx.cid();
List<VarContext> varContextList = ctx.var();
HBaseColumnSchema hbaseColumnSchema = ContextUtil
.parseHBaseColumnSchema(hbaseTableConfig, cidContext);
List<Object> list = ContextUtil.parseParaList(varContextList, para);
Filter startFilter = constructFilter(hbaseColumnSchema, CompareOp.LESS,
list.get(0));
Filter endFilter = constructFilter(hbaseColumnSchema,
CompareOp.GREATER, list.get(1));
FilterList filterList = new FilterList(Operator.MUST_PASS_ONE,
Arrays.asList(startFilter, endFilter));
return filterList;
}