本文整理汇总了Java中org.apache.hadoop.hbase.filter.CompareFilter.CompareOp方法的典型用法代码示例。如果您正苦于以下问题:Java CompareFilter.CompareOp方法的具体用法?Java CompareFilter.CompareOp怎么用?Java CompareFilter.CompareOp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.filter.CompareFilter
的用法示例。
在下文中一共展示了CompareFilter.CompareOp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkAndDelete
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException {
if (!Arrays.equals(delete.getRow(), row)) {
throw new UnsupportedOperationException("CheckAndDelete does not support check one row but delete other row");
}
ODelete odelete = ElementConvertor.toOtsDelete(delete, this.tablestoreColumnMapping);
Condition condition = ElementConvertor.toOtsCondition(family, qualifier, compareOp, value, this.tablestoreColumnMapping);
odelete.setCondition(condition);
try {
this.tablestoreAdaptor.delete(tableNameStr, odelete);
} catch (IOException ex) {
if (ex.getCause().getCause() instanceof TableStoreException) {
TableStoreException exception = (TableStoreException)ex.getCause().getCause();
if (exception.getErrorCode().equals("OTSConditionCheckFail")) {
return false;
}
}
throw ex;
}
return true;
}
示例2: checkAndPut
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException {
if (!Arrays.equals(put.getRow(), row)) {
throw new UnsupportedOperationException("CheckAndPut does not support check one row but put other row");
}
OPut oput = ElementConvertor.toOtsPut(put, this.tablestoreColumnMapping);
Condition condition = ElementConvertor.toOtsCondition(family, qualifier, compareOp, value, this.tablestoreColumnMapping);
oput.setCondition(condition);
try {
this.tablestoreAdaptor.put(tableNameStr, oput);
} catch (IOException ex) {
if (ex.getCause().getCause() instanceof TableStoreException) {
TableStoreException exception = (TableStoreException)ex.getCause().getCause();
if (exception.getErrorCode().equals("OTSConditionCheckFail")) {
return false;
}
}
throw ex;
}
return true;
}
示例3: checkAndMutate
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException {
if (!Arrays.equals(mutation.getRow(), row)) {
throw new UnsupportedOperationException("CheckAndMutation does not support check one row but Mutate other row");
}
OUpdate oupdate = ElementConvertor.toOtsUpdate(mutation, this.tablestoreColumnMapping);
Condition condition = ElementConvertor.toOtsCondition(family, qualifier, compareOp, value, this.tablestoreColumnMapping);
oupdate.setCondition(condition);
try {
this.tablestoreAdaptor.update(tableNameStr, oupdate);
} catch (IOException ex) {
if (ex.getCause().getCause() instanceof TableStoreException) {
TableStoreException exception = (TableStoreException)ex.getCause().getCause();
if (exception.getErrorCode().equals("OTSConditionCheckFail")) {
return false;
}
}
throw ex;
}
return true;
}
示例4: doRawScan
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
private void doRawScan() throws IOException {
FilterList filterList = new FilterList();
CompareFilter.CompareOp startOp = CompareFilter.CompareOp.GREATER_OR_EQUAL;
CompareFilter.CompareOp stopOp = CompareFilter.CompareOp.LESS_OR_EQUAL;
for (int i = 0; i < indexColumnNames.length && i < scanValues.length; i++) {
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), startOp,
Bytes.toBytes(scanValues[i][0])));
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), stopOp,
Bytes.toBytes(scanValues[i][1])));
}
Scan scan = new Scan();
scan.setFilter(filterList);
scan.setId("raw-scan");
Table table = conn.getTable(tableName);
ResultScanner scanner = table.getScanner(scan);
Result result;
int count = 0;
while ((result = scanner.next()) != null) {
++count;
if (PRINT_RESULT) printResult(result);
}
scanner.close();
System.out.println("raw scan has " + count + " records");
}
示例5: preCheckAndPutAfterRowLock
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result) throws IOException {
if (put.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
AuthResult authResult = null;
if (checkCoveringPermission(OpType.CHECK_AND_PUT, c.getEnvironment(), row, families,
HConstants.LATEST_TIMESTAMP, Action.READ)) {
authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), "Covering cell set",
getActiveUser(), Action.READ, table, families);
} else {
authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), "Covering cell set",
getActiveUser(), Action.READ, table, families);
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
return result;
}
示例6: getCompare
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
protected CompareFilter.CompareOp getCompare(Where where) {
if (where == Where.Equals)
return CompareFilter.CompareOp.EQUAL;
if (where == Where.NotEquals)
return CompareFilter.CompareOp.NOT_EQUAL;
if (where == Where.Less)
return CompareFilter.CompareOp.LESS;
if (where == Where.LessEquals)
return CompareFilter.CompareOp.LESS_OR_EQUAL;
if (where == Where.Greater)
return CompareFilter.CompareOp.GREATER;
if (where == Where.GreaterEquals)
return CompareFilter.CompareOp.GREATER_OR_EQUAL;
return CompareFilter.CompareOp.EQUAL;
}
示例7: preCheckAndPutAfterRowLock
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result) throws IOException {
if (put.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
AuthResult authResult = null;
if (checkCoveringPermission(OpType.CHECK_AND_PUT, c.getEnvironment(), row, families,
HConstants.LATEST_TIMESTAMP, Action.READ)) {
authResult = AuthResult.allow(OpType.CHECK_AND_PUT.toString(), "Covering cell set",
getActiveUser(), Action.READ, table, families);
} else {
authResult = AuthResult.deny(OpType.CHECK_AND_PUT.toString(), "Covering cell set",
getActiveUser(), Action.READ, table, families);
}
logResult(authResult);
if (!authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
return result;
}
示例8: checkAndPut
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
private boolean checkAndPut(MutationProto mutation, Condition condition) throws IOException {
boolean success;
final byte[] row = condition.getRow().array();
final byte[] cf = condition.getFamily().array();
final byte[] cq = condition.getQualifier().array();
final CompareFilter.CompareOp compareOp = CompareFilter.CompareOp.valueOf(condition.getCompareType().name());
final ByteArrayComparable comparator = ReverseProtobufUtil.toComparator(condition.getComparator());
success = theRegion.checkAndMutate(row,
cf,
cq,
compareOp,
comparator,
ReverseProtobufUtil.toPut(mutation),
true);
return success;
}
示例9: checkAndDelete
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
private boolean checkAndDelete(MutationProto mutation, Condition condition) throws IOException {
boolean success;
final byte[] row = condition.getRow().array();
final byte[] cf = condition.getFamily().array();
final byte[] cq = condition.getQualifier().array();
final CompareFilter.CompareOp compareOp = CompareFilter.CompareOp.valueOf(condition.getCompareType().name());
final ByteArrayComparable comparator = ReverseProtobufUtil.toComparator(condition.getComparator());
success = theRegion.checkAndMutate(row,
cf,
cq,
compareOp,
comparator,
ReverseProtobufUtil.toDelete(mutation),
true);
return success;
}
示例10: SingleFieldEntityFilter
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
public SingleFieldEntityFilter(EntitySchema entitySchema,
EntitySerDe<?> entitySerDe, String fieldName, Object filterValue,
CompareFilter.CompareOp equalityOperator) {
FieldMapping fieldMapping = entitySchema.getFieldMapping(fieldName);
if (fieldMapping.getMappingType() != MappingType.COLUMN) {
throw new DatasetException(
"SingleColumnValueFilter only compatible with COLUMN mapping types.");
}
byte[] family = fieldMapping.getFamily();
byte[] qualifier = fieldMapping.getQualifier();
byte[] comparisonBytes = entitySerDe.serializeColumnValueToBytes(fieldName,
filterValue);
this.filter = new SingleColumnValueFilter(family, qualifier,
equalityOperator, comparisonBytes);
}
示例11: doIndexScan
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
private void doIndexScan() throws IOException {
ScanRange.ScanRangeList rangeList = new ScanRange.ScanRangeList();
FilterList filterList = new FilterList();
CompareFilter.CompareOp startOp = CompareFilter.CompareOp.GREATER_OR_EQUAL;
CompareFilter.CompareOp stopOp = CompareFilter.CompareOp.LESS_OR_EQUAL;
for (int i = 0; i < indexColumnNames.length && i < scanValues.length; i++) {
rangeList.addScanRange(new ScanRange(familyName, Bytes.toBytes(indexColumnNames[i]),
Bytes.toBytes(scanValues[i][0]), Bytes.toBytes(scanValues[i][1]), startOp, stopOp,
DataType.INT));
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), startOp,
Bytes.toBytes(scanValues[i][0])));
filterList.addFilter(
new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), stopOp,
Bytes.toBytes(scanValues[i][1])));
}
Scan scan = new Scan();
scan.setFilter(filterList);
if (rangeList.getRanges().size() > 0) {
scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, rangeList.toBytesAttribute());
}
scan.setId("LMD-scan");
scan.setCaching(1);
ResultScanner scanner = BaseIndexScanner.getIndexScanner(conn, relation, scan);
Result result;
int count = 0;
while ((result = scanner.next()) != null) {
count++;
if (PRINT_RESULT) printResult(result);
}
scanner.close();
System.out.println("LMDIndex scan has " + count + " records");
}
示例12: preCheckAndPut
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result) throws IOException {
User user = getActiveUser();
checkForReservedTagPresence(user, put);
// Require READ and WRITE permissions on the table, CF, and KV to update
RegionCoprocessorEnvironment env = c.getEnvironment();
Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, user, env, families,
Action.READ, Action.WRITE);
logResult(authResult);
if (!authResult.isAllowed()) {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
put.setAttribute(CHECK_COVERING_PERM, TRUE);
} else if (authorizationEnabled) {
throw new AccessDeniedException("Insufficient permissions " +
authResult.toContextString());
}
}
byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
if (bytes != null) {
if (cellFeaturesEnabled) {
addCellPermissions(bytes, put.getFamilyCellMap());
} else {
throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
}
}
return result;
}
示例13: preCheckAndDelete
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result) throws IOException {
// An ACL on a delete is useless, we shouldn't allow it
if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) {
throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " +
delete.toString());
}
// Require READ and WRITE permissions on the table, CF, and the KV covered
// by the delete
RegionCoprocessorEnvironment env = c.getEnvironment();
Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
User user = getActiveUser();
AuthResult authResult = permissionGranted(OpType.CHECK_AND_DELETE, user, env, families,
Action.READ, Action.WRITE);
logResult(authResult);
if (!authResult.isAllowed()) {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
delete.setAttribute(CHECK_COVERING_PERM, TRUE);
} else if (authorizationEnabled) {
throw new AccessDeniedException("Insufficient permissions " +
authResult.toContextString());
}
}
return result;
}
示例14: preCheckAndDeleteAfterRowLock
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
@Override
public boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result)
throws IOException {
if (delete.getAttribute(CHECK_COVERING_PERM) != null) {
// We had failure with table, cf and q perm checks and now giving a chance for cell
// perm check
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
AuthResult authResult = null;
if (checkCoveringPermission(OpType.CHECK_AND_DELETE, c.getEnvironment(), row, families,
HConstants.LATEST_TIMESTAMP, Action.READ)) {
authResult = AuthResult.allow(OpType.CHECK_AND_DELETE.toString(), "Covering cell set",
getActiveUser(), Action.READ, table, families);
} else {
authResult = AuthResult.deny(OpType.CHECK_AND_DELETE.toString(), "Covering cell set",
getActiveUser(), Action.READ, table, families);
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
return result;
}
示例15: ScanRange
import org.apache.hadoop.hbase.filter.CompareFilter; //导入方法依赖的package包/类
public ScanRange(byte[] family, byte[] qualifier, byte[] start, byte[] stop,
CompareFilter.CompareOp startOp, CompareFilter.CompareOp stopOp, long startTs, long stopTs,
DataType dataType) {
this.family = family;
this.qualifier = qualifier;
this.start = start;
this.stop = stop;
this.startOp = startOp;
this.stopOp = stopOp;
this.startTs = startTs;
this.stopTs = stopTs;
this.dataType = dataType;
}