本文整理汇总了Java中org.apache.hadoop.hbase.filter.ColumnPrefixFilter类的典型用法代码示例。如果您正苦于以下问题:Java ColumnPrefixFilter类的具体用法?Java ColumnPrefixFilter怎么用?Java ColumnPrefixFilter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ColumnPrefixFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了ColumnPrefixFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: adapt
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
@Override
public RowFilter adapt(FilterAdapterContext context, ColumnPrefixFilter filter)
throws IOException {
byte[] prefix = filter.getPrefix();
// Quoting for RE2 can result in at most length * 2 characters written. Pre-allocate
// that much space in the ByteArrayOutputStream to prevent reallocation later.
ByteArrayOutputStream outputStream =
new ByteArrayOutputStream(prefix.length * 2);
readerExpressionHelper.writeQuotedRegularExpression(prefix, outputStream);
outputStream.write(ReaderExpressionHelper.ALL_QUALIFIERS_BYTES);
return RowFilter.newBuilder()
.setColumnQualifierRegexFilter(
ByteString.copyFrom(
outputStream.toByteArray()))
.build();
}
示例2: createScanForSpaceSnapshotSizes
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
static Scan createScanForSpaceSnapshotSizes(TableName table) {
Scan s = new Scan();
if (null == table) {
// Read all tables, just look at the row prefix
s.setRowPrefixFilter(QUOTA_TABLE_ROW_KEY_PREFIX);
} else {
// Fetch the exact row for the table
byte[] rowkey = getTableRowKey(table);
// Fetch just this one row
s.withStartRow(rowkey).withStopRow(rowkey, true);
}
// Just the usage family and only the snapshot size qualifiers
return s.addFamily(QUOTA_FAMILY_USAGE).setFilter(
new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER));
}
示例3: testPartialResultsWithColumnFilter
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
/**
* Test partial Result re-assembly in the presence of different filters. The Results from the
* partial scanner should match the Results returned from a scanner that receives all of the
* results in one RPC to the server. The partial scanner is tested with a variety of different
* result sizes (all of which are less than the size necessary to fetch an entire row)
* @throws Exception
*/
@Test
public void testPartialResultsWithColumnFilter() throws Exception {
testPartialResultsWithColumnFilter(new FirstKeyOnlyFilter());
testPartialResultsWithColumnFilter(new ColumnPrefixFilter(Bytes.toBytes("testQualifier5")));
testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true,
Bytes.toBytes("testQualifier7"), true));
Set<byte[]> qualifiers = new LinkedHashSet<>();
qualifiers.add(Bytes.toBytes("testQualifier5"));
testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
示例4: operationScan
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
@Override
public Scan operationScan()
{
Scan scan = new Scan();
Filter f = new ColumnPrefixFilter(Bytes.toBytes("col-0"));
scan.setFilter(f);
return scan;
}
示例5: columnPrefixFiltersAreAdapted
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
@Test
public void columnPrefixFiltersAreAdapted() throws IOException {
ColumnPrefixFilter filter = new ColumnPrefixFilter(Bytes.toBytes("prefix"));
RowFilter rowFilter = adapter.adapt(emptyScanContext, filter);
Assert.assertEquals(
"prefix\\C*",
Bytes.toString(rowFilter.getColumnQualifierRegexFilter().toByteArray()));
}
示例6: testColumnPrefixFilter
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
/**
* Requirement 9.3 - ColumnPrefixFilter - select keys with columns that match a particular prefix
*/
@Test
public void testColumnPrefixFilter() throws Exception {
// Initialize
int numGoodCols = 5;
int numBadCols = 20;
String goodColPrefix = "bueno";
Table table = getConnection().getTable(TABLE_NAME);
byte[] rowKey = dataHelper.randomData("testRow-");
Put put = new Put(rowKey);
for (int i = 0; i < numBadCols; ++i) {
put.addColumn(COLUMN_FAMILY, dataHelper.randomData(""), Bytes.toBytes("someval"));
}
for (int i = 0; i < numGoodCols; ++i) {
put.addColumn(COLUMN_FAMILY, dataHelper.randomData(goodColPrefix), Bytes.toBytes("someval"));
}
table.put(put);
// Filter for results
Filter filter = new ColumnPrefixFilter(Bytes.toBytes("bueno"));
Get get = new Get(rowKey).setFilter(filter);
Result result = table.get(get);
Assert.assertEquals("Should only return good columns", numGoodCols, result.size());
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
Assert.assertTrue("Should have good column prefix",
Bytes.toString(CellUtil.cloneQualifier(cell)).startsWith(goodColPrefix));
}
table.close();
}
示例7: scanForAttributeNames
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
@Override
public String[] scanForAttributeNames(String accountId, String componentId, long start, long stop) throws IOException {
logger.debug("Scanning HBase: acc: {} cid: {} start: {} stop: {}", accountId, componentId, start, stop);
Scan scan = new HbaseScanManager(accountId, componentId)
.create(start, stop)
.setFilter(new ColumnPrefixFilter(Columns.BYTES_ATTRIBUTE_COLUMN_PREFIX))
.getScan();
Set<String> attributeNames = retrieveAttributeNames(scan);
return attributeNames.toArray(new String[attributeNames.size()]);
}
示例8: testGetWithColumnPrefixFilter
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
@Test(timeOut = 60_000)
public void testGetWithColumnPrefixFilter(ITestContext context) throws Exception {
testGet(context, new ColumnPrefixFilter(prefix));
}
示例9: testScanWithColumnPrefixFilter
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
@Test(timeOut = 60_000)
public void testScanWithColumnPrefixFilter(ITestContext context) throws Exception {
testScan(context, new ColumnPrefixFilter(prefix));
}
示例10: deleteMapFamily
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
/**
* Deletes all cells from a map-type family with a timestamp less than or
* equal to a specified timestamp.
*
* <p>
* This call requires an HBase row lock, so it should be used with care.
* </p>
*
* @param entityId
* The entity (row) to delete from.
* @param familyLayout
* A family layout.
* @param upToTimestamp
* A timestamp.
* @throws IOException
* If there is an IO error.
*/
private void deleteMapFamily(EntityId entityId, FamilyLayout familyLayout,
long upToTimestamp) throws IOException {
// Since multiple Kiji column families are mapped into a single HBase
// column family,
// we have to do this delete in a two-step transaction:
//
// 1. Send a get() to retrieve the names of all HBase qualifiers within
// the HBase
// family that belong to the Kiji column family.
// 2. Send a delete() for each of the HBase qualifiers found in the
// previous step.
final String familyName = familyLayout.getName();
final HBaseColumnName hbaseColumnName = mWriterLayoutCapsule
.getColumnNameTranslator().toHBaseColumnName(
new KijiColumnName(familyName));
final byte[] hbaseRow = entityId.getHBaseRowKey();
// Lock the row.
// final RowLock rowLock = mHTable.lockRow(hbaseRow);
try {
// Step 1.
final Get get = new Get(hbaseRow);
get.addFamily(hbaseColumnName.getFamily());
final FilterList filter = new FilterList(
FilterList.Operator.MUST_PASS_ALL);
filter.addFilter(new KeyOnlyFilter());
filter.addFilter(new ColumnPrefixFilter(hbaseColumnName
.getQualifier()));
get.setFilter(filter);
final Result result = mHTable.get(get);
// Step 2.
if (result.isEmpty()) {
LOG.debug("No qualifiers to delete in map family: "
+ familyName);
} else {
final Delete delete = SchemaPlatformBridge.get().createDelete(
hbaseRow, HConstants.LATEST_TIMESTAMP);
for (byte[] hbaseQualifier : result.getFamilyMap(
hbaseColumnName.getFamily()).keySet()) {
LOG.debug("Deleting HBase column "
+ hbaseColumnName.getFamilyAsString() + ":"
+ Bytes.toString(hbaseQualifier));
delete.deleteColumns(hbaseColumnName.getFamily(),
hbaseQualifier, upToTimestamp);
}
updateBuffer(delete);
}
} finally {
// Make sure to unlock the row!
// mHTable.unlockRow(rowLock);
}
}
示例11: isFilterSupported
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
@Override
public FilterSupportStatus isFilterSupported(
FilterAdapterContext context,
ColumnPrefixFilter filter) {
return FilterSupportStatus.SUPPORTED;
}
示例12: buildAdapter
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
/**
* Create a new FilterAdapter
*/
public static FilterAdapter buildAdapter() {
FilterAdapter adapter = new FilterAdapter();
adapter.addFilterAdapter(
ColumnPrefixFilter.class, new ColumnPrefixFilterAdapter());
adapter.addFilterAdapter(
ColumnRangeFilter.class, new ColumnRangeFilterAdapter());
adapter.addFilterAdapter(
KeyOnlyFilter.class, new KeyOnlyFilterAdapter());
adapter.addFilterAdapter(
MultipleColumnPrefixFilter.class, new MultipleColumnPrefixFilterAdapter());
adapter.addFilterAdapter(
TimestampsFilter.class, new TimestampsFilterAdapter());
ValueFilterAdapter valueFilterAdapter = new ValueFilterAdapter();
adapter.addFilterAdapter(
ValueFilter.class, valueFilterAdapter);
SingleColumnValueFilterAdapter scvfa =
new SingleColumnValueFilterAdapter(valueFilterAdapter);
adapter.addFilterAdapter(
SingleColumnValueFilter.class, scvfa);
adapter.addFilterAdapter(
SingleColumnValueExcludeFilter.class,
new SingleColumnValueExcludeFilterAdapter(scvfa));
adapter.addFilterAdapter(
ColumnPaginationFilter.class, new ColumnPaginationFilterAdapter());
adapter.addFilterAdapter(
FirstKeyOnlyFilter.class, new FirstKeyOnlyFilterAdapter());
adapter.addFilterAdapter(
ColumnCountGetFilter.class, new ColumnCountGetFilterAdapter());
adapter.addFilterAdapter(
RandomRowFilter.class, new RandomRowFilterAdapter());
adapter.addFilterAdapter(
PrefixFilter.class, new PrefixFilterAdapter());
adapter.addFilterAdapter(
QualifierFilter.class, new QualifierFilterAdapter());
// Passing the FilterAdapter in to the FilterListAdapter is a bit
// unfortunate, but makes adapting the FilterList's subfilters simpler.
FilterListAdapter filterListAdapter = new FilterListAdapter(adapter);
// FilterList implements UnsupportedStatusCollector so it should
// be used when possible (third parameter to addFilterAdapter()).
adapter.addFilterAdapter(
FilterList.class, filterListAdapter, filterListAdapter);
return adapter;
}
示例13: addFiltersWithColumnPrefix
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
/**
* If we have a qualifier with a prefix and a wildcard (i.e. cf:foo*), we need a filter on every
* possible column to be returned as shown below. This will become very inneficient for long
* lists of columns mixed with a prefixed wildcard.
*
* FilterList - must pass ALL of
* - FamilyFilter
* - AND a must pass ONE FilterList of
* - either Qualifier
* - or ColumnPrefixFilter
*
* If we have only column family filters (i.e. cf:*) or explicit column descriptors
* (i.e., cf:foo) or a mix of both then we don't need filters, since the scan will take
* care of that.
*/
private void addFiltersWithColumnPrefix(List<ColumnInfo> columnInfos) {
// we need to apply a CF AND column list filter for each family
FilterList allColumnFilters = null;
Map<String, List<ColumnInfo>> groupedMap = groupByFamily(columnInfos);
for (String cfString : groupedMap.keySet()) {
List<ColumnInfo> columnInfoList = groupedMap.get(cfString);
byte[] cf = Bytes.toBytes(cfString);
// all filters roll up to one parent OR filter
if (allColumnFilters == null) {
allColumnFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
}
// each group contains a column family filter AND (all) and an OR (one of) of
// the column filters
FilterList thisColumnGroupFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL);
thisColumnGroupFilter.addFilter(new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(cf)));
FilterList columnFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
for (ColumnInfo colInfo : columnInfoList) {
if (colInfo.isColumnMap()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding family:prefix filters with values " +
Bytes.toString(colInfo.getColumnFamily()) + COLON +
Bytes.toString(colInfo.getColumnPrefix()));
}
// add a PrefixFilter to the list of column filters
if (colInfo.getColumnPrefix() != null) {
columnFilters.addFilter(new ColumnPrefixFilter(
colInfo.getColumnPrefix()));
}
}
else {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding family:descriptor filters with values " +
Bytes.toString(colInfo.getColumnFamily()) + COLON +
Bytes.toString(colInfo.getColumnName()));
}
// add a QualifierFilter to the list of column filters
columnFilters.addFilter(new QualifierFilter(CompareOp.EQUAL,
new BinaryComparator(colInfo.getColumnName())));
}
}
thisColumnGroupFilter.addFilter(columnFilters);
allColumnFilters.addFilter(thisColumnGroupFilter);
}
if (allColumnFilters != null) {
addFilter(allColumnFilters);
}
}
示例14: assertPrefixFilter
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
private void assertPrefixFilter(Filter filter, String prefix) {
assertTrue("Filter is not a ColumnPrefixFilter: " + filter.getClass().getSimpleName(),
filter instanceof ColumnPrefixFilter);
ColumnPrefixFilter familyFilter = (ColumnPrefixFilter)filter;
assertEquals("Unexpected prefix", prefix, Bytes.toString(familyFilter.getPrefix()));
}
示例15: initScan
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; //导入依赖的package包/类
private void initScan() {
scan = new Scan();
// Map-reduce jobs should not run with cacheBlocks
scan.setCacheBlocks(false);
// Set filters, if any.
if (configuredOptions_.hasOption("gt")) {
gt_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("gt")));
addRowFilter(CompareOp.GREATER, gt_);
}
if (configuredOptions_.hasOption("lt")) {
lt_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("lt")));
addRowFilter(CompareOp.LESS, lt_);
}
if (configuredOptions_.hasOption("gte")) {
gte_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("gte")));
addRowFilter(CompareOp.GREATER_OR_EQUAL, gte_);
}
if (configuredOptions_.hasOption("lte")) {
lte_ = Bytes.toBytesBinary(Utils.slashisize(configuredOptions_.getOptionValue("lte")));
addRowFilter(CompareOp.LESS_OR_EQUAL, lte_);
}
// apply any column filters
FilterList allColumnFilters = null;
for (ColumnInfo colInfo : columnInfo_) {
// all column family filters roll up to one parent OR filter
if (allColumnFilters == null) {
allColumnFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
}
// and each filter contains a column family filter
FilterList thisColumnFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL);
thisColumnFilter.addFilter(new FamilyFilter(CompareOp.EQUAL,
new BinaryComparator(colInfo.getColumnFamily())));
if (colInfo.isColumnMap()) {
if (LOG.isInfoEnabled()) {
LOG.info("Adding family:prefix filters with values " +
Bytes.toString(colInfo.getColumnFamily()) + COLON +
Bytes.toString(colInfo.getColumnPrefix()));
}
// each column map filter consists of a FamilyFilter AND
// optionally a PrefixFilter
if (colInfo.getColumnPrefix() != null) {
thisColumnFilter.addFilter(new ColumnPrefixFilter(
colInfo.getColumnPrefix()));
}
}
else {
if (LOG.isInfoEnabled()) {
LOG.info("Adding family:descriptor filters with values " +
Bytes.toString(colInfo.getColumnFamily()) + COLON +
Bytes.toString(colInfo.getColumnName()));
}
// each column value filter consists of a FamilyFilter AND
// a QualifierFilter
thisColumnFilter.addFilter(new QualifierFilter(CompareOp.EQUAL,
new BinaryComparator(colInfo.getColumnName())));
}
allColumnFilters.addFilter(thisColumnFilter);
}
if (allColumnFilters != null) {
addFilter(allColumnFilters);
}
}