本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.setFilter方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.setFilter方法的具体用法?Java Scan.setFilter怎么用?Java Scan.setFilter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.setFilter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: configure
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
public void configure(JobConf job) {
try {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
TableName tableName = TableName.valueOf("exampleJobConfigurableTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
} catch (IOException exception) {
throw new RuntimeException("Failed to initialize.", exception);
}
}
示例2: QueryByCondition2
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public static void QueryByCondition2(String tableName) {
try {
HTablePool pool = new HTablePool(configuration, 1000);
HTable table = (HTable) pool.getTable(tableName);
Filter filter = new SingleColumnValueFilter(Bytes
.toBytes("column1"), null, CompareOp.EQUAL, Bytes
.toBytes("aaa")); // 当列column1的值为aaa时进行查询
Scan s = new Scan();
s.setFilter(filter);
ResultScanner rs = table.getScanner(s);
for (Result r : rs) {
System.out.println("获得到rowkey:" + new String(r.getRow()));
for (KeyValue keyValue : r.raw()) {
System.out.println("列:" + new String(keyValue.getFamily())
+ "====值:" + new String(keyValue.getValue()));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
示例3: testWhileMatchFilterWithFilterKeyValue
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Tests the the {@link WhileMatchFilter} works in combination with a
* {@link Filter} that uses the
* {@link Filter#filterKeyValue(org.apache.hadoop.hbase.KeyValue)} method.
*
* See HBASE-2258.
*
* @throws Exception
*/
@Test
public void testWhileMatchFilterWithFilterKeyValue() throws Exception {
Scan s = new Scan();
WhileMatchFilter filter = new WhileMatchFilter(
new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOp.EQUAL, Bytes.toBytes("foo"))
);
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
while (true) {
ArrayList<Cell> values = new ArrayList<Cell>();
boolean isMoreResults = scanner.next(values);
assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining());
if (!isMoreResults) {
break;
}
}
}
示例4: testMultiRowRangeFilterWithoutRangeOverlap
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testMultiRowRangeFilterWithoutRangeOverlap() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithoutRangeOverlap");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
LOG.info("found " + resultsSize + " results");
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(20), ht);
List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
List<Cell> results3 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht);
assertEquals(results1.size() + results2.size() + results3.size(), resultsSize);
ht.close();
}
示例5: dependentColumnFilter
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* 参考列过滤器(获取相同时间戳的列)
*
* @param tableName 表名
* @param columnFamily 列族
* @param qualifier 列限定符
* @param columnValue 列值
* @param count 数量
*/
public void dependentColumnFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
Table table = hBaseConfiguration.table(tableName);
Scan scan = new Scan();
scan.setFilter(new PrefixFilter(Bytes.toBytes("")));
scan.setCaching(10);
scan.setBatch(10);
try {
ResultScanner scanner = table.getScanner(scan);
Result[] results = scanner.next(count);
HBaseResultUtil.print(results);
} catch (IOException e) {
e.printStackTrace();
}
}
示例6: SingleColumnValueExcludeFilter
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* 单列排除过滤器(返回的列 不包含参考列)
*
* @param tableName 表名
* @param columnFamily 列族
* @param qualifier 列限定符
* @param columnValue 列值
* @param count 数量
*/
public void SingleColumnValueExcludeFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
Table table = hBaseConfiguration.table(tableName);
Scan scan = new Scan();
SingleColumnValueExcludeFilter singleColumnValueFilter = new SingleColumnValueExcludeFilter(Bytes.toBytes(columnFamily), Bytes.toBytes(qualifier), CompareFilter.CompareOp.EQUAL, Bytes.toBytes(columnValue));
//singleColumnValueFilter.setFilterIfMissing(true);//当不存在这列的行 默认不过滤
singleColumnValueFilter.setLatestVersionOnly(true);//获取最新版本
scan.setFilter(singleColumnValueFilter);
scan.setCaching(10);
//scan.setBatch(10);
try {
ResultScanner scanner = table.getScanner(scan);
Result[] results = scanner.next(count);
HBaseResultUtil.print(results);
} catch (IOException e) {
e.printStackTrace();
}
}
示例7: testMultiRowRangeFilterWithEmptyStopRow
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testMultiRowRangeFilterWithEmptyStopRow() throws IOException {
tableName = Bytes.toBytes("testMultiRowRangeFilterWithEmptyStopRow");
HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
generateRows(numRows, ht, family, qf, value);
Scan scan = new Scan();
scan.setMaxVersions();
List<RowRange> ranges = new ArrayList<RowRange>();
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false));
ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
scan.setFilter(filter);
int resultsSize = getResultsSize(ht, scan);
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(""), ht);
assertEquals(results1.size(), resultsSize);
ht.close();
}
示例8: getScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public static Scan getScan(String fileName) throws IOException {
BufferedReader br = new BufferedReader(new FileReader(fileName));
String line;
Scan scan = new Scan();
System.out.println("winter for scan ******");
FilterList filterList = new FilterList();
while ((line = br.readLine()) != null) {
System.out.println("winter for scan : " + line);
if (line.startsWith("#")) continue;
// family, qualifier, type, >=, 10, <=, 1000
// family, qualifier, type, >=, 10
String[] splits = line.split("\t");
byte[] family = Bytes.toBytes(splits[0]);
byte[] qualifier = Bytes.toBytes(splits[1]);
DataType type = DataType.valueOf(splits[2].toUpperCase());
CompareFilter.CompareOp firstOp = parseOp(splits[3]);
byte[] firstValue = DataType.stringToBytes(type, splits[4]);
filterList.addFilter(new SingleColumnValueFilter(family, qualifier, firstOp, firstValue));
if (splits.length >= 6) {
CompareFilter.CompareOp secondOp = parseOp(splits[5].toUpperCase());
byte[] secondValue = DataType.stringToBytes(type, splits[6]);
filterList
.addFilter(new SingleColumnValueFilter(family, qualifier, secondOp, secondValue));
}
}
scan.setFilter(filterList);
ScanRangeList scanRangeList = ScanRangeList.getScanRangeList(fileName);
if (scanRangeList.getRanges().size() > 0) {
scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, scanRangeList.toBytesAttribute());
}
return scan;
}
示例9: testRow
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
ResultScanner s = this.table.getScanner(scan);
s.close();
}
示例10: scanWithSpecificQualifiers2
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Test the filter by adding 1 column of family A (the one used in the filter) and 1 column of
* family B in the scan. (OK)
*/
@Test
public void scanWithSpecificQualifiers2() throws IOException {
/* When */
Scan scan = new Scan();
scan.addColumn(FAMILY_A, QUALIFIER_FOO);
scan.addColumn(FAMILY_B, QUALIFIER_BAR);
scan.setFilter(scanFilter);
verify(scan);
}
示例11: testMinWithFilter
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMinWithFilter() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
scan.setFilter(f);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
BigDecimal min = null;
min = aClient.min(TEST_TABLE, ci, scan);
assertEquals(null, min);
}
示例12: constructScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
protected Scan constructScan(byte[] valuePrefix) throws IOException {
Filter filter = new SingleColumnValueFilter(
FAMILY_NAME, QUALIFIER_NAME, CompareFilter.CompareOp.EQUAL,
new BinaryComparator(valuePrefix)
);
Scan scan = new Scan();
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
scan.setFilter(filter);
return scan;
}
示例13: testAvgWithFilter
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testAvgWithFilter() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
scan.setFilter(f);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
Double avg = null;
avg = aClient.avg(TEST_TABLE, ci, scan);
assertEquals(Double.NaN, avg, 0);
}
示例14: testSumWithFilter
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testSumWithFilter() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Filter f = new PrefixFilter(Bytes.toBytes("foo:bar"));
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setFilter(f);
final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci =
new DoubleColumnInterpreter();
Double sum = null;
sum = aClient.sum(TEST_TABLE, ci, scan);
assertEquals(null, sum);
}
示例15: getUserAuths
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
public List<String> getUserAuths(byte[] user, boolean systemCall)
throws IOException {
assert (labelsRegion != null || systemCall);
if (systemCall || labelsRegion == null) {
return this.labelsCache.getUserAuths(Bytes.toString(user));
}
Scan s = new Scan();
if (user != null && user.length > 0) {
s.addColumn(LABELS_TABLE_FAMILY, user);
}
Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion,
new Authorizations(SYSTEM_LABEL));
s.setFilter(filter);
ArrayList<String> auths = new ArrayList<String>();
RegionScanner scanner = this.labelsRegion.getScanner(s);
try {
List<Cell> results = new ArrayList<Cell>(1);
while (true) {
scanner.next(results);
if (results.isEmpty()) break;
Cell cell = results.get(0);
int ordinal = Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
String label = this.labelsCache.getLabel(ordinal);
if (label != null) {
auths.add(label);
}
results.clear();
}
} finally {
scanner.close();
}
return auths;
}