本文整理汇总了Java中org.apache.hadoop.hbase.filter.RowFilter类的典型用法代码示例。如果您正苦于以下问题:Java RowFilter类的具体用法?Java RowFilter怎么用?Java RowFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RowFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了RowFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: configure
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
TableName tableName = TableName.valueOf("exampleJobConfigurableTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
} catch (IOException exception) {
throw new RuntimeException("Failed to initialize.", exception);
}
}
示例2: parseTree
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
public HBaseScanSpec parseTree() {
HBaseScanSpec parsedSpec = le.accept(this, null);
if (parsedSpec != null) {
parsedSpec = mergeScanSpecs("booleanAnd", this.groupScan.getHBaseScanSpec(), parsedSpec);
/*
* If RowFilter is THE filter attached to the scan specification,
* remove it since its effect is also achieved through startRow and stopRow.
*/
Filter parsedFilter = HBaseUtils.deserializeFilter(parsedSpec.filter);
if (parsedFilter instanceof RowFilter &&
((RowFilter)parsedFilter).getComparator() instanceof BinaryComparator) {
parsedSpec.filter = null;
}
}
return parsedSpec;
}
示例3: configure
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
HTable exampleTable = new HTable(HBaseConfiguration.create(job),
Bytes.toBytes("exampleDeprecatedTable"));
// mandatory
setHTable(exampleTable);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
// mandatory
setInputColumns(inputColumns);
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
// optional
setRowFilter(exampleFilter);
} catch (IOException exception) {
throw new RuntimeException("Failed to configure for job.", exception);
}
}
示例4: testJira6912
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Test
public void testJira6912() throws Exception {
TableName TABLE = TableName.valueOf("testJira6912");
Table foo = TEST_UTIL.createTable(TABLE, new byte[][] {FAMILY}, 10);
List<Put> puts = new ArrayList<Put>();
for (int i=0;i !=100; i++){
Put put = new Put(Bytes.toBytes(i));
put.add(FAMILY, FAMILY, Bytes.toBytes(i));
puts.add(put);
}
foo.put(puts);
// If i comment this out it works
TEST_UTIL.flush();
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(1));
scan.setStopRow(Bytes.toBytes(3));
scan.addColumn(FAMILY, FAMILY);
scan.setFilter(new RowFilter(CompareFilter.CompareOp.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1))));
ResultScanner scanner = foo.getScanner(scan);
Result[] bar = scanner.next(100);
assertEquals(1, bar.length);
}
示例5: initialize
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Override
protected void initialize(JobContext job) throws IOException {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
job.getConfiguration()));
TableName tableName = TableName.valueOf("exampleTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
}
示例6: parseTree
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
public HBaseScanSpec parseTree() {
HBaseScanSpec parsedSpec = le.accept(this, null);
if (parsedSpec != null) {
parsedSpec = mergeScanSpecs("booleanAnd", this.groupScan.getHBaseScanSpec(), parsedSpec);
/*
* If RowFilter is THE filter attached to the scan specification,
* remove it since its effect is also achieved through startRow and stopRow.
*/
Filter filter = parsedSpec.getFilter();
if (filter instanceof RowFilter &&
((RowFilter)filter).getOperator() != CompareOp.NOT_EQUAL &&
((RowFilter)filter).getComparator() instanceof BinaryComparator) {
parsedSpec = new HBaseScanSpec(parsedSpec.getTableName(), parsedSpec.getStartRow(), parsedSpec.getStopRow(), null);
}
}
return parsedSpec;
}
示例7: testJira6912
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Test
public void testJira6912() throws Exception {
byte [] TABLE = Bytes.toBytes("testJira6912");
HTable foo = TEST_UTIL.createTable(TABLE, new byte[][] {FAMILY}, 10);
List<Put> puts = new ArrayList<Put>();
for (int i=0;i !=100; i++){
Put put = new Put(Bytes.toBytes(i));
put.add(FAMILY, FAMILY, Bytes.toBytes(i));
puts.add(put);
}
foo.put(puts);
// If i comment this out it works
TEST_UTIL.flush();
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(1));
scan.setStopRow(Bytes.toBytes(3));
scan.addColumn(FAMILY, FAMILY);
scan.setFilter(new RowFilter(CompareFilter.CompareOp.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1))));
ResultScanner scanner = foo.getScanner(scan);
Result[] bar = scanner.next(100);
assertEquals(1, bar.length);
}
示例8: configure
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
HTable exampleTable = new HTable(HBaseConfiguration.create(job),
Bytes.toBytes("exampleDeprecatedTable"));
// mandatory
setHTable(exampleTable);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
// optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
} catch (IOException exception) {
throw new RuntimeException("Failed to configure for job.", exception);
}
}
示例9: testJira6912
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Test
public void testJira6912() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
Table foo = TEST_UTIL.createTable(tableName, new byte[][] {FAMILY}, 10);
List<Put> puts = new ArrayList<Put>();
for (int i=0;i !=100; i++){
Put put = new Put(Bytes.toBytes(i));
put.addColumn(FAMILY, FAMILY, Bytes.toBytes(i));
puts.add(put);
}
foo.put(puts);
// If i comment this out it works
TEST_UTIL.flush();
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(1));
scan.setStopRow(Bytes.toBytes(3));
scan.addColumn(FAMILY, FAMILY);
scan.setFilter(new RowFilter(CompareOperator.NOT_EQUAL,
new BinaryComparator(Bytes.toBytes(1))));
ResultScanner scanner = foo.getScanner(scan);
Result[] bar = scanner.next(100);
assertEquals(1, bar.length);
}
示例10: configure
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
Connection connection = ConnectionFactory.createConnection(job);
Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable"));
// mandatory
initializeTable(connection, exampleTable.getName());
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
// mandatory
setInputColumns(inputColumns);
Filter exampleFilter =
new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
// optional
setRowFilter(exampleFilter);
} catch (IOException exception) {
throw new RuntimeException("Failed to configure for job.", exception);
}
}
示例11: configure
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
Connection connection = ConnectionFactory.createConnection(job);
Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable")));
// mandatory
initializeTable(connection, exampleTable.getName());
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
// optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter =
new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
} catch (IOException exception) {
throw new RuntimeException("Failed to configure for job.", exception);
}
}
示例12: initialize
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
@Override
protected void initialize(JobContext job) throws IOException {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
job.getConfiguration()));
TableName tableName = TableName.valueOf("exampleTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter =
new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
}
示例13: scanWithFilter
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
private void scanWithFilter(byte[] tableName, byte[] columnFamilyName, String val, int expected) throws IOException {
HTable table = new HTable(hbaseCluster.getConfiguration(), tableName);
LOGGER.info("Scanning with filter from HBase table.");
Filter filter = new RowFilter(CompareFilter.CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(val)));
Scan scan = new Scan();
LOGGER.info("Getting from HBase table.");
scan.setFilter(filter);
int resultCount = 0;
ResultScanner resultScanner = table.getScanner(scan);
for (Result result : resultScanner) {
logResult(result, columnFamilyName);
resultCount++;
}
resultScanner.close();
table.close();
}
示例14: getRecommendedUserItem
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
public List<GroupedData> getRecommendedUserItem(String cp, String collection, long user, Long from, Long startDate,
Long endDate, int size)
{
FilterList filters = new FilterList();
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys.getStatRecommendedItemKey(
collection, user))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRecommendedItemKey(collection, user, from))));
}
setDateLimit(STATS_RECOMMENDED_USERITEM, startDate, endDate, filters);
Scan scan = new Scan().addFamily(STATS_RECOMMENDED_USERITEM).setFilter(filters);
return getResults(cp, scan, STATS_RECOMMENDED_USERITEM, size);
}
示例15: getUserRated
import org.apache.hadoop.hbase.filter.RowFilter; //导入依赖的package包/类
public List<GroupedData> getUserRated(String cp, String collection, Long from, Long startDate, Long endDate, int size)
{
// Put put = new Put(RowKeys.getStatRatingsPerItemKey(collection, item, date));
Scan scan = new Scan();
scan.addFamily(STATS_USER_RATINGS);
FilterList filters = new FilterList();
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRatingsPerUserKey(collection))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRatingsPerUserKey(collection, from.longValue()))));
}
setDateLimit(STATS_USER_RATINGS, startDate, endDate, filters);
scan.setFilter(filters);
return getResults(cp, scan, STATS_USER_RATINGS, size);
}