本文整理汇总了Java中org.apache.hadoop.hbase.client.Get.setFilter方法的典型用法代码示例。如果您正苦于以下问题:Java Get.setFilter方法的具体用法?Java Get.setFilter怎么用?Java Get.setFilter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Get
的用法示例。
在下文中一共展示了Get.setFilter方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public void run() {
try {
Get get = new Get(rowkey);
get.setFilter(ftlist);
get.setCacheBlocks(false);
if (resultColumns != null && resultColumns.length != 0) {
for (byte[] column : resultColumns) {
byte[][] tmp = KeyValue.parseColumn(column);
if (tmp.length == 1) {
get.addFamily(tmp[0]);
} else {
get.addColumn(tmp[0], tmp[1]);
}
}
}
rsnew = table.get(get);
table.close();
} catch (Exception e) {
rsnew = null;
exception = e;
}
}
示例2: preGetOp
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
List<Cell> results) throws IOException {
if (!initialized) {
throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized");
}
// Nothing useful to do if authorization is not enabled
if (!authorizationEnabled) {
return;
}
Region region = e.getEnvironment().getRegion();
Authorizations authorizations = null;
try {
authorizations = get.getAuthorizations();
} catch (DeserializationException de) {
throw new IOException(de);
}
if (authorizations == null) {
// No Authorizations present for this scan/Get!
// In case of system tables other than "labels" just scan with out visibility check and
// filtering. Checking visibility labels for META and NAMESPACE table is not needed.
TableName table = region.getRegionInfo().getTable();
if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
return;
}
}
Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment()
.getRegion(), authorizations);
if (visibilityLabelFilter != null) {
Filter filter = get.getFilter();
if (filter != null) {
get.setFilter(new FilterList(filter, visibilityLabelFilter));
} else {
get.setFilter(visibilityLabelFilter);
}
}
}
示例3: testRow
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException, InterruptedException {
if (opts.randomSleep > 0) {
Thread.sleep(rd.nextInt(opts.randomSleep));
}
Get get = new Get(getRandomRow(this.rand, opts.totalRows));
if (opts.addColumns) {
get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
} else {
get.addFamily(FAMILY_NAME);
}
if (opts.filterAll) {
get.setFilter(new FilterAllFilter());
}
get.setConsistency(consistency);
if (LOG.isTraceEnabled()) LOG.trace(get.toString());
if (opts.multiGet > 0) {
this.gets.add(get);
if (this.gets.size() == opts.multiGet) {
Result [] rs = this.table.get(this.gets);
updateValueSize(rs);
this.gets.clear();
}
} else {
updateValueSize(this.table.get(get));
}
}
示例4: verifyInvocationResults
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public void verifyInvocationResults(Integer[] selectQualifiers,
Integer[] expectedQualifiers) throws Exception {
Get get = new Get(ROW_BYTES);
for (int i = 0; i < selectQualifiers.length; i++) {
get.addColumn(FAMILY_NAME_BYTES,
Bytes.toBytes(QUALIFIER_PREFIX + selectQualifiers[i]));
}
get.setFilter(new InvocationRecordFilter());
List<KeyValue> expectedValues = new ArrayList<KeyValue>();
for (int i = 0; i < expectedQualifiers.length; i++) {
expectedValues.add(new KeyValue(ROW_BYTES, FAMILY_NAME_BYTES, Bytes
.toBytes(QUALIFIER_PREFIX + expectedQualifiers[i]),
expectedQualifiers[i], Bytes.toBytes(VALUE_PREFIX
+ expectedQualifiers[i])));
}
Scan scan = new Scan(get);
List<Cell> actualValues = new ArrayList<Cell>();
List<Cell> temp = new ArrayList<Cell>();
InternalScanner scanner = this.region.getScanner(scan);
while (scanner.next(temp)) {
actualValues.addAll(temp);
temp.clear();
}
actualValues.addAll(temp);
Assert.assertTrue("Actual values " + actualValues
+ " differ from the expected values:" + expectedValues,
expectedValues.equals(actualValues));
}
示例5: RowResultGenerator
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter, final boolean cacheBlocks)
throws IllegalArgumentException, IOException {
Table table = RESTServlet.getInstance().getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 1) {
get.addFamily(split[0]);
} else if (split.length == 2) {
get.addColumn(split[0], split[1]);
} else {
throw new IllegalArgumentException("Invalid column specifier.");
}
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
get.setCacheBlocks(cacheBlocks);
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.listCells().iterator();
}
} catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
// Warn here because Stargate will return 404 in the case if multiple
// column families were specified but one did not exist -- currently
// HBase will fail the whole Get.
// Specifying multiple columns in a URI should be uncommon usage but
// help to avoid confusion by leaving a record of what happened here in
// the log.
LOG.warn(StringUtils.stringifyException(e));
} finally {
table.close();
}
}
示例6: getFromThrift
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
/**
* Creates a {@link Get} (HBase) from a {@link TGet} (Thrift).
*
* This ignores any timestamps set on {@link TColumn} objects.
*
* @param in the <code>TGet</code> to convert
*
* @return <code>Get</code> object
*
* @throws IOException if an invalid time range or max version parameter is given
*/
public static Get getFromThrift(TGet in) throws IOException {
Get out = new Get(in.getRow());
// Timestamp overwrites time range if both are set
if (in.isSetTimestamp()) {
out.setTimeStamp(in.getTimestamp());
} else if (in.isSetTimeRange()) {
out.setTimeRange(in.getTimeRange().getMinStamp(), in.getTimeRange().getMaxStamp());
}
if (in.isSetMaxVersions()) {
out.setMaxVersions(in.getMaxVersions());
}
if (in.isSetFilterString()) {
ParseFilter parseFilter = new ParseFilter();
out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
}
if (in.isSetAttributes()) {
addAttributes(out,in.getAttributes());
}
if (in.isSetAuthorizations()) {
out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
}
if (!in.isSetColumns()) {
return out;
}
for (TColumn column : in.getColumns()) {
if (column.isSetQualifier()) {
out.addColumn(column.getFamily(), column.getQualifier());
} else {
out.addFamily(column.getFamily());
}
}
return out;
}
示例7: prePrepareTimeStampForDeleteVersion
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Override
public void prePrepareTimeStampForDeleteVersion(
ObserverContext<RegionCoprocessorEnvironment> ctx, Mutation delete, Cell cell,
byte[] byteNow, Get get) throws IOException {
// Nothing to do if we are not filtering by visibility
if (!authorizationEnabled) {
return;
}
CellVisibility cellVisibility = null;
try {
cellVisibility = delete.getCellVisibility();
} catch (DeserializationException de) {
throw new IOException("Invalid cell visibility specified " + delete, de);
}
// The check for checkForReservedVisibilityTagPresence happens in preBatchMutate happens.
// It happens for every mutation and that would be enough.
List<Tag> visibilityTags = new ArrayList<Tag>();
if (cellVisibility != null) {
String labelsExp = cellVisibility.getExpression();
try {
visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, false,
false);
} catch (InvalidLabelException e) {
throw new IOException("Invalid cell visibility specified " + labelsExp, e);
}
}
get.setFilter(new DeleteVersionVisibilityExpressionFilter(visibilityTags,
VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT));
List<Cell> result = ctx.getEnvironment().getRegion().get(get, false);
if (result.size() < get.getMaxVersions()) {
// Nothing to delete
CellUtil.updateLatestStamp(cell, Long.MIN_VALUE);
return;
}
if (result.size() > get.getMaxVersions()) {
throw new RuntimeException("Unexpected size: " + result.size()
+ ". Results more than the max versions obtained.");
}
Cell getCell = result.get(get.getMaxVersions() - 1);
CellUtil.setTimestamp(cell, getCell.getTimestamp());
// We are bypassing here because in the HRegion.updateDeleteLatestVersionTimeStamp we would
// update with the current timestamp after again doing a get. As the hook as already determined
// the needed timestamp we need to bypass here.
// TODO : See if HRegion.updateDeleteLatestVersionTimeStamp() could be
// called only if the hook is not called.
ctx.bypass();
}
示例8: testGet_Basic
import org.apache.hadoop.hbase.client.Get; //导入方法依赖的package包/类
@Test
public void testGet_Basic() throws IOException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
byte[] col1 = Bytes.toBytes("col1");
byte[] col2 = Bytes.toBytes("col2");
byte[] col3 = Bytes.toBytes("col3");
byte[] col4 = Bytes.toBytes("col4");
byte[] col5 = Bytes.toBytes("col5");
// Setting up region
String method = this.getName();
this.region = initHRegion(tableName, method, CONF, fam1);
try {
// Add to memstore
Put put = new Put(row1);
put.add(fam1, col1, null);
put.add(fam1, col2, null);
put.add(fam1, col3, null);
put.add(fam1, col4, null);
put.add(fam1, col5, null);
region.put(put);
Get get = new Get(row1);
get.addColumn(fam1, col2);
get.addColumn(fam1, col4);
// Expected result
KeyValue kv1 = new KeyValue(row1, fam1, col2);
KeyValue kv2 = new KeyValue(row1, fam1, col4);
KeyValue[] expected = { kv1, kv2 };
// Test
Result res = region.get(get);
assertEquals(expected.length, res.size());
for (int i = 0; i < res.size(); i++) {
assertTrue(CellUtil.matchingRow(expected[i], res.rawCells()[i]));
assertTrue(CellUtil.matchingFamily(expected[i], res.rawCells()[i]));
assertTrue(CellUtil.matchingQualifier(expected[i], res.rawCells()[i]));
}
// Test using a filter on a Get
Get g = new Get(row1);
final int count = 2;
g.setFilter(new ColumnCountGetFilter(count));
res = region.get(g);
assertEquals(count, res.size());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}