本文整理汇总了Java中org.apache.hadoop.hbase.filter.CompareFilter.CompareOp类的典型用法代码示例。如果您正苦于以下问题:Java CompareOp类的具体用法?Java CompareOp怎么用?Java CompareOp使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CompareOp类属于org.apache.hadoop.hbase.filter.CompareFilter包,在下文中一共展示了CompareOp类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: configure
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
TableName tableName = TableName.valueOf("exampleJobConfigurableTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
} catch (IOException exception) {
throw new RuntimeException("Failed to initialize.", exception);
}
}
示例2: QueryByCondition2
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
public static void QueryByCondition2(String tableName) {
try {
HTablePool pool = new HTablePool(configuration, 1000);
HTable table = (HTable) pool.getTable(tableName);
Filter filter = new SingleColumnValueFilter(Bytes
.toBytes("column1"), null, CompareOp.EQUAL, Bytes
.toBytes("aaa")); // 当列column1的值为aaa时进行查询
Scan s = new Scan();
s.setFilter(filter);
ResultScanner rs = table.getScanner(s);
for (Result r : rs) {
System.out.println("获得到rowkey:" + new String(r.getRow()));
for (KeyValue keyValue : r.raw()) {
System.out.println("列:" + new String(keyValue.getFamily())
+ "====值:" + new String(keyValue.getValue()));
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
示例3: parseFrom
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
/**
* @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance
* @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray
*/
public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {
FilterProtos.SingleColumnValueExcludeFilter proto;
try {
proto = FilterProtos.SingleColumnValueExcludeFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
FilterProtos.SingleColumnValueFilter parentProto = proto.getSingleColumnValueFilter();
final CompareOp compareOp =
CompareOp.valueOf(parentProto.getCompareOp().name());
final ByteArrayComparable comparator;
try {
comparator = ProtobufUtil.toComparator(parentProto.getComparator());
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new SingleColumnValueExcludeFilter(parentProto.hasColumnFamily() ? parentProto
.getColumnFamily().toByteArray() : null, parentProto.hasColumnQualifier() ? parentProto
.getColumnQualifier().toByteArray() : null, compareOp, comparator, parentProto
.getFilterIfMissing(), parentProto.getLatestVersionOnly());
}
示例4: parseFrom
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
/**
* @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
* @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray
*/
public static SingleColumnValueFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {
FilterProtos.SingleColumnValueFilter proto;
try {
proto = FilterProtos.SingleColumnValueFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOp compareOp =
CompareOp.valueOf(proto.getCompareOp().name());
final ByteArrayComparable comparator;
try {
comparator = ProtobufUtil.toComparator(proto.getComparator());
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new SingleColumnValueFilter(proto.hasColumnFamily() ? proto.getColumnFamily()
.toByteArray() : null, proto.hasColumnQualifier() ? proto.getColumnQualifier()
.toByteArray() : null, compareOp, comparator, proto.getFilterIfMissing(), proto
.getLatestVersionOnly());
}
示例5: printRange
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
public static String printRange(Range r) {
StringBuilder sb = new StringBuilder();
sb.append("[" + Bytes.toString(r.getFamily()) + ":" + Bytes.toString(r.getQualifier())
+ "], values (");
if (r.getStartValue() != null) {
sb.append(LCIndexConstant.getStringOfValueAndType(r.getDataType(), r.getStartValue()));
if (r.getStartType() == CompareOp.EQUAL || r.getStartType() == CompareOp.NOT_EQUAL) {
sb.append(" <== ").append(r.getStartType()).append(" )");
return sb.toString();
}
} else {
sb.append("null");
}
sb.append(", ");
if (r.getStopValue() != null) {
sb.append(LCIndexConstant.getStringOfValueAndType(r.getDataType(), r.getStopValue()));
} else {
sb.append("MAX");
}
sb.append(")");
return sb.toString();
}
示例6: constructScan
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
protected Scan constructScan(byte[] valuePrefix) throws IOException {
FilterList list = new FilterList();
Filter filter = new SingleColumnValueFilter(
FAMILY_NAME, COLUMN_ZERO, CompareFilter.CompareOp.EQUAL,
new BinaryComparator(valuePrefix)
);
list.addFilter(filter);
if(opts.filterAll) {
list.addFilter(new FilterAllFilter());
}
Scan scan = new Scan();
scan.setCaching(opts.caching);
if (opts.addColumns) {
scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
} else {
scan.addFamily(FAMILY_NAME);
}
scan.setFilter(list);
return scan;
}
示例7: configure
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
HTable exampleTable = new HTable(HBaseConfiguration.create(job),
Bytes.toBytes("exampleDeprecatedTable"));
// mandatory
setHTable(exampleTable);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
// mandatory
setInputColumns(inputColumns);
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
// optional
setRowFilter(exampleFilter);
} catch (IOException exception) {
throw new RuntimeException("Failed to configure for job.", exception);
}
}
示例8: testJira6912
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
@Test
public void testJira6912() throws Exception {
TableName TABLE = TableName.valueOf("testJira6912");
Table foo = TEST_UTIL.createTable(TABLE, new byte[][] {FAMILY}, 10);
List<Put> puts = new ArrayList<Put>();
for (int i=0;i !=100; i++){
Put put = new Put(Bytes.toBytes(i));
put.add(FAMILY, FAMILY, Bytes.toBytes(i));
puts.add(put);
}
foo.put(puts);
// If i comment this out it works
TEST_UTIL.flush();
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(1));
scan.setStopRow(Bytes.toBytes(3));
scan.addColumn(FAMILY, FAMILY);
scan.setFilter(new RowFilter(CompareFilter.CompareOp.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1))));
ResultScanner scanner = foo.getScanner(scan);
Result[] bar = scanner.next(100);
assertEquals(1, bar.length);
}
示例9: initialize
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
@Override
protected void initialize(JobContext job) throws IOException {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
job.getConfiguration()));
TableName tableName = TableName.valueOf("exampleTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
}
示例10: buildScanner
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
throws IOException {
// Defaults FilterList.Operator.MUST_PASS_ALL.
FilterList allFilters = new FilterList();
allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
// Only return rows where this column value exists in the row.
SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
filter.setFilterIfMissing(true);
allFilters.addFilter(filter);
Scan scan = new Scan();
scan.addFamily(Bytes.toBytes("trans-blob"));
scan.addFamily(Bytes.toBytes("trans-type"));
scan.addFamily(Bytes.toBytes("trans-date"));
scan.addFamily(Bytes.toBytes("trans-tags"));
scan.addFamily(Bytes.toBytes("trans-group"));
scan.setFilter(allFilters);
return r.getScanner(scan);
}
示例11: testWhileMatchFilterWithFilterKeyValue
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
/**
* Tests the the {@link WhileMatchFilter} works in combination with a
* {@link Filter} that uses the
* {@link Filter#filterKeyValue(org.apache.hadoop.hbase.KeyValue)} method.
*
* See HBASE-2258.
*
* @throws Exception
*/
@Test
public void testWhileMatchFilterWithFilterKeyValue() throws Exception {
Scan s = new Scan();
WhileMatchFilter filter = new WhileMatchFilter(
new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOp.EQUAL, Bytes.toBytes("foo"))
);
s.setFilter(filter);
InternalScanner scanner = this.region.getScanner(s);
while (true) {
ArrayList<Cell> values = new ArrayList<Cell>();
boolean isMoreResults = scanner.next(values);
assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining());
if (!isMoreResults) {
break;
}
}
}
示例12: testSingleColumnValueExcludeFilter
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
@Test
public void testSingleColumnValueExcludeFilter() throws Exception {
// null family/column SingleColumnValueExcludeFilter
SingleColumnValueExcludeFilter singleColumnValueExcludeFilter =
new SingleColumnValueExcludeFilter(null, null,
CompareFilter.CompareOp.GREATER_OR_EQUAL, Bytes.toBytes("value"));
assertTrue(singleColumnValueExcludeFilter.areSerializedFieldsEqual(
ProtobufUtil.toFilter(ProtobufUtil.toFilter(singleColumnValueExcludeFilter))));
// non-null family/column SingleColumnValueFilter
singleColumnValueExcludeFilter =
new SingleColumnValueExcludeFilter(Bytes.toBytes("fam"), Bytes.toBytes("qual"),
CompareFilter.CompareOp.LESS_OR_EQUAL, new NullComparator(), false, false);
assertTrue(singleColumnValueExcludeFilter.areSerializedFieldsEqual(
ProtobufUtil.toFilter(ProtobufUtil.toFilter(singleColumnValueExcludeFilter))));
}
示例13: readFields
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
column = Bytes.readByteArray(in);
dataType = WritableUtils.readEnum(in, DataType.class);
if (in.readBoolean()) {
startType = WritableUtils.readEnum(in, CompareOp.class);
startValue = Bytes.readByteArray(in);
} else {
startType = CompareOp.NO_OP;
startValue = null;
}
if (in.readBoolean()) {
stopType = WritableUtils.readEnum(in, CompareOp.class);
stopValue = Bytes.readByteArray(in);
} else {
stopType = CompareOp.NO_OP;
stopValue = null;
}
}
示例14: HbaseServiceConditonModel
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
public HbaseServiceConditonModel(String family, String col, String value,
CompareOp op) {
super();
this.family = family;
this.col = col;
this.value = value;
this.op = op;
}
示例15: initScans
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; //导入依赖的package包/类
/**
* 初始化scan集合
*
* @param job
* @return
*/
private List<Scan> initScans(Job job) {
// 时间戳+....
Configuration conf = job.getConfiguration();
// 获取运行时间: yyyy-MM-dd
String date = conf.get(GlobalConstants.RUNNING_DATE_PARAMES);
long startDate = TimeUtil.parseString2Long(date);
long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;
Scan scan = new Scan();
// 定义hbase扫描的开始rowkey和结束rowkey
scan.setStartRow(Bytes.toBytes(Long.toString(startDate)));
scan.setStopRow(Bytes.toBytes(Long.toString(endDate)));
FilterList filterList = new FilterList();
// 过滤数据,只分析launch事件
filterList.addFilter(new SingleColumnValueFilter(Bytes.toBytes(EventLogConstants.EVENT_LOGS_FAMILY_NAME),
Bytes.toBytes(EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME), CompareOp.EQUAL,
Bytes.toBytes(EventEnum.LAUNCH.alias)));
// 定义mapper中需要获取的列名
String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME,
EventLogConstants.LOG_COLUMN_NAME_UUID, EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME,
EventLogConstants.LOG_COLUMN_NAME_PLATFORM, EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME,
EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION };
// scan.addColumn(family, qualifier)
filterList.addFilter(this.getColumnFilter(columns));
scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
scan.setFilter(filterList);
return Lists.newArrayList(scan);
}