本文整理匯總了Java中org.apache.hadoop.hbase.filter.SingleColumnValueFilter.setFilterIfMissing方法的典型用法代碼示例。如果您正苦於以下問題:Java SingleColumnValueFilter.setFilterIfMissing方法的具體用法?Java SingleColumnValueFilter.setFilterIfMissing怎麽用?Java SingleColumnValueFilter.setFilterIfMissing使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.filter.SingleColumnValueFilter
的用法示例。
在下文中一共展示了SingleColumnValueFilter.setFilterIfMissing方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: buildScanner
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private ResultScanner buildScanner(String keyPrefix, String value, Table ht)
throws IOException {
// OurFilterList allFilters = new OurFilterList();
FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
.toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
.toBytes(value));
filter.setFilterIfMissing(true);
allFilters.addFilter(filter);
// allFilters.addFilter(new
// RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
// Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));
Scan scan = new Scan();
scan.addFamily(Bytes.toBytes("trans-blob"));
scan.addFamily(Bytes.toBytes("trans-type"));
scan.addFamily(Bytes.toBytes("trans-date"));
scan.addFamily(Bytes.toBytes("trans-tags"));
scan.addFamily(Bytes.toBytes("trans-group"));
scan.setFilter(allFilters);
return ht.getScanner(scan);
}
示例2: buildScanner
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
throws IOException {
// Defaults FilterList.Operator.MUST_PASS_ALL.
FilterList allFilters = new FilterList();
allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
// Only return rows where this column value exists in the row.
SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
filter.setFilterIfMissing(true);
allFilters.addFilter(filter);
Scan scan = new Scan();
scan.addFamily(Bytes.toBytes("trans-blob"));
scan.addFamily(Bytes.toBytes("trans-type"));
scan.addFamily(Bytes.toBytes("trans-date"));
scan.addFamily(Bytes.toBytes("trans-tags"));
scan.addFamily(Bytes.toBytes("trans-group"));
scan.setFilter(allFilters);
return r.getScanner(scan);
}
示例3: runScanner
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private void runScanner(Table table, boolean slow) throws Exception {
long time = System.nanoTime();
Scan scan = new Scan();
scan.addColumn(cf_essential, col_name);
scan.addColumn(cf_joined, col_name);
SingleColumnValueFilter filter = new SingleColumnValueFilter(
cf_essential, col_name, CompareFilter.CompareOp.EQUAL, flag_yes);
filter.setFilterIfMissing(true);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(!slow);
ResultScanner result_scanner = table.getScanner(scan);
Result res;
long rows_count = 0;
while ((res = result_scanner.next()) != null) {
rows_count++;
}
double timeSec = (System.nanoTime() - time) / 1000000000.0;
result_scanner.close();
LOG.info((slow ? "Slow" : "Joined") + " scanner finished in " + Double.toString(timeSec)
+ " seconds, got " + Long.toString(rows_count/2) + " rows");
}
示例4: constructFilter
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private static Filter constructFilter(HBaseColumnSchema hbaseColumnSchema,
CompareOp compareOp, byte[] value, boolean filterIfMissing) {
Util.checkNull(hbaseColumnSchema);
Util.checkNull(compareOp);
Util.checkNull(value);
byte[] familyBytes = hbaseColumnSchema.getFamilyBytes();
byte[] qualifierBytes = hbaseColumnSchema.getQualifierBytes();
SingleColumnValueFilter singleColumnValueFilter = new SingleColumnValueFilter(
familyBytes, qualifierBytes, compareOp, value);
singleColumnValueFilter.setFilterIfMissing(filterIfMissing);
return singleColumnValueFilter;
}
示例5: latestVersionOnlyComparisonsAreDone
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
@Test
public void latestVersionOnlyComparisonsAreDone() throws IOException {
byte[] filterValue = Bytes.toBytes("foobar");
byte[] qualifier = Bytes.toBytes("someColumn");
byte[] family = Bytes.toBytes("f");
SingleColumnValueFilter filter = new SingleColumnValueFilter(
family,
qualifier,
CompareFilter.CompareOp.EQUAL,
new BinaryComparator(filterValue));
filter.setFilterIfMissing(false);
filter.setLatestVersionOnly(true);
RowFilter adaptedFilter = adapter.adapt(
new FilterAdapterContext(new Scan()),
filter);
assertFilterIfNotMIssingMatches(
family,
qualifier,
filterValue,
1 /* latest version only = true */,
adaptedFilter);
}
示例6: allVersionComparisonAreDone
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
@Test
public void allVersionComparisonAreDone() throws IOException {
byte[] filterValue = Bytes.toBytes("foobar");
byte[] qualifier = Bytes.toBytes("someColumn");
byte[] family = Bytes.toBytes("f");
SingleColumnValueFilter filter = new SingleColumnValueFilter(
family,
qualifier,
CompareFilter.CompareOp.EQUAL,
new BinaryComparator(filterValue));
filter.setFilterIfMissing(false);
filter.setLatestVersionOnly(false);
RowFilter adaptedFilter = adapter.adapt(
new FilterAdapterContext(new Scan()),
filter);
assertFilterIfNotMIssingMatches(
family,
qualifier,
filterValue,
Integer.MAX_VALUE /* latest version only = false */,
adaptedFilter);
}
示例7: buildScanner
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private ResultScanner buildScanner(String keyPrefix, String value, HTable ht)
throws IOException {
// OurFilterList allFilters = new OurFilterList();
FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
.toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
.toBytes(value));
filter.setFilterIfMissing(true);
allFilters.addFilter(filter);
// allFilters.addFilter(new
// RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
// Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));
Scan scan = new Scan();
scan.addFamily(Bytes.toBytes("trans-blob"));
scan.addFamily(Bytes.toBytes("trans-type"));
scan.addFamily(Bytes.toBytes("trans-date"));
scan.addFamily(Bytes.toBytes("trans-tags"));
scan.addFamily(Bytes.toBytes("trans-group"));
scan.setFilter(allFilters);
return ht.getScanner(scan);
}
示例8: runScanner
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private void runScanner(HTable table, boolean slow) throws Exception {
long time = System.nanoTime();
Scan scan = new Scan();
scan.addColumn(cf_essential, col_name);
scan.addColumn(cf_joined, col_name);
SingleColumnValueFilter filter = new SingleColumnValueFilter(
cf_essential, col_name, CompareFilter.CompareOp.EQUAL, flag_yes);
filter.setFilterIfMissing(true);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(!slow);
ResultScanner result_scanner = table.getScanner(scan);
Result res;
long rows_count = 0;
while ((res = result_scanner.next()) != null) {
rows_count++;
}
double timeSec = (System.nanoTime() - time) / 1000000000.0;
result_scanner.close();
LOG.info((slow ? "Slow" : "Joined") + " scanner finished in " + Double.toString(timeSec)
+ " seconds, got " + Long.toString(rows_count/2) + " rows");
}
示例9: getActiveRows
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private Result[] getActiveRows(boolean getRowIdAndStatusOnly, byte recordType,
byte[] parentForeignKey, byte[] entityName, byte[] columnToGet)
throws IOException {
SingleColumnValueFilter activeRowsOnlyFilter = new SingleColumnValueFilter(
REPOSITORY_CF, ENTITY_STATUS_COLUMN, CompareFilter.CompareOp.EQUAL, ACTIVE_STATUS);
activeRowsOnlyFilter.setFilterIfMissing(true);
return getRepositoryRows(getRowIdAndStatusOnly, recordType, parentForeignKey, entityName,
columnToGet, activeRowsOnlyFilter);
}
示例10: getPropertyScan
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
protected Scan getPropertyScan(String label) {
Scan scan = new Scan();
SingleColumnValueFilter valueFilter = new SingleColumnValueFilter(Constants.DEFAULT_FAMILY_BYTES,
Constants.LABEL_BYTES, CompareFilter.CompareOp.EQUAL, new BinaryComparator(ValueUtils.serialize(label)));
valueFilter.setFilterIfMissing(true);
scan.setFilter(valueFilter);
return scan;
}
示例11: constructFilterWithRegex
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private static Filter constructFilterWithRegex(
HBaseColumnSchema hbaseColumnSchema, CompareOp compareOp,
Object object) {
Util.checkNull(hbaseColumnSchema);
Util.checkNull(compareOp);
Util.checkNull(object);
if (compareOp != CompareOp.EQUAL && compareOp != CompareOp.NOT_EQUAL) {
throw new SimpleHBaseException(
"only EQUAL or NOT_EQUAL can use regex match. compareOp = "
+ compareOp);
}
if (object.getClass() != String.class) {
throw new SimpleHBaseException(
"only String can use regex match. object = " + object);
}
if (hbaseColumnSchema.getType() != String.class) {
throw new SimpleHBaseException(
"only String can use regex match. hbaseColumnSchema = "
+ hbaseColumnSchema);
}
byte[] familyBytes = hbaseColumnSchema.getFamilyBytes();
byte[] qualifierBytes = hbaseColumnSchema.getQualifierBytes();
RegexStringComparator regexStringComparator = new RegexStringComparator(
(String) object);
SingleColumnValueFilter singleColumnValueFilter = new SingleColumnValueFilter(
familyBytes, qualifierBytes, compareOp, regexStringComparator);
singleColumnValueFilter.setFilterIfMissing(true);
return singleColumnValueFilter;
}
示例12: testFilter_filterIfMissing_family
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private void testFilter_filterIfMissing_family(boolean filterIfMissing,
int expectedSize, String... rowKeys) throws Exception {
Scan scan = new Scan(rowKey4, rowKey4);
scan.addFamily(ColumnFamilyNameBytes);
SingleColumnValueFilter filter = new SingleColumnValueFilter(
ColumnFamilyNameBytes, QName1, CompareOp.GREATER_OR_EQUAL,
new BinaryComparator(Bytes.toBytes(4L)));
filter.setFilterIfMissing(filterIfMissing);
scan.setFilter(filter);
testScan(scan, expectedSize, rowKeys);
}
示例13: testFilter_filterIfMissing
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
private void testFilter_filterIfMissing(boolean filterIfMissing,
byte[] qualifer, int expectedSize, String... rowKeys)
throws Exception {
Scan scan = new Scan(rowKey4, rowKey4);
scan.addColumn(ColumnFamilyNameBytes, qualifer);
SingleColumnValueFilter filter = new SingleColumnValueFilter(
ColumnFamilyNameBytes, QName1, CompareOp.GREATER_OR_EQUAL,
new BinaryComparator(Bytes.toBytes(4L)));
filter.setFilterIfMissing(filterIfMissing);
scan.setFilter(filter);
testScan(scan, expectedSize, rowKeys);
}
示例14: testScanMultipleIdxWithDifferentColFamilyAndCacheShouldBeSuccessful
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
@Test(timeout = 180000)
public void testScanMultipleIdxWithDifferentColFamilyAndCacheShouldBeSuccessful()
throws Exception {
Configuration conf = UTIL.getConfiguration();
String userTableName = "testScanWithMultIndexedCacheDiffColFamilyColumn";
putMulIndex(userTableName);
int i = 0;
Scan s = new Scan();
FilterList filterList = new FilterList();
// check for combination of cat in q1 and dog in q1
SingleColumnValueFilter filter1 =
new SingleColumnValueFilter("col1".getBytes(), "ql".getBytes(), CompareOp.EQUAL,
"cat".getBytes());
filter1.setFilterIfMissing(true);
SingleColumnValueFilter filter2 =
new SingleColumnValueFilter("col2".getBytes(), "ql".getBytes(), CompareOp.EQUAL,
"dog".getBytes());
filter2.setFilterIfMissing(true);
filterList.addFilter(filter1);
filterList.addFilter(filter2);
s.setCaching(4);
s.setFilter(filterList);
HTable table = new HTable(conf, userTableName);
ResultScanner scanner = table.getScanner(s);
for (Result result : scanner) {
i++;
}
Assert.assertEquals(
"Should match for 5 rows in multiple index with diff column family successfully ", 5, i);
Assert.assertTrue("Seek points should be added ", IndexRegionObserver.getSeekpointAdded());
Assert.assertTrue("Indexed table should be used ", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertEquals("Remaining rows in cache should be 1 ", 1, IndexRegionObserver
.getSeekpoints().size());
}
示例15: testScanWithIndexOn2ColumnsAndFiltersOn2ColumnsInReverseWayShouldBeSuccessful
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入方法依賴的package包/類
@Test(timeout = 180000)
public void testScanWithIndexOn2ColumnsAndFiltersOn2ColumnsInReverseWayShouldBeSuccessful()
throws Exception {
Configuration conf = UTIL.getConfiguration();
String userTableName = "testScan2Indexed2ReversedFilters";
putMulIndex(userTableName);
HTable table = new HTable(conf, userTableName);
int i = 0;
Scan s = new Scan();
FilterList filterList = new FilterList();
// check for combination of cat in q1 and dog in q1
SingleColumnValueFilter filter1 =
new SingleColumnValueFilter("col2".getBytes(), "ql".getBytes(), CompareOp.EQUAL,
"dog".getBytes());
filter1.setFilterIfMissing(true);
SingleColumnValueFilter filter2 =
new SingleColumnValueFilter("col1".getBytes(), "ql".getBytes(), CompareOp.EQUAL,
"cat".getBytes());
filter2.setFilterIfMissing(true);
filterList.addFilter(filter1);
filterList.addFilter(filter2);
s.setFilter(filterList);
ResultScanner scanner = table.getScanner(s);
for (Result result : scanner) {
i++;
}
Assert.assertEquals(
"Should match for 5 rows in multiple index with diff column family successfully ", 5, i);
Assert.assertTrue("Seek points should be added ", IndexRegionObserver.getSeekpointAdded());
Assert.assertTrue("Indexed table should be used ", IndexRegionObserver.getIndexedFlowUsed());
}