本文整理汇总了Java中org.apache.hadoop.hbase.filter.BinaryPrefixComparator类的典型用法代码示例。如果您正苦于以下问题:Java BinaryPrefixComparator类的具体用法?Java BinaryPrefixComparator怎么用?Java BinaryPrefixComparator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BinaryPrefixComparator类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了BinaryPrefixComparator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ByteArrayComparableModel
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public ByteArrayComparableModel(
ByteArrayComparable comparator) {
String typeName = comparator.getClass().getSimpleName();
ComparatorType type = ComparatorType.valueOf(typeName);
this.type = typeName;
switch (type) {
case BinaryComparator:
case BinaryPrefixComparator:
this.value = Base64.encodeBytes(comparator.getValue());
break;
case BitComparator:
this.value = Base64.encodeBytes(comparator.getValue());
this.op = ((BitComparator)comparator).getOperator().toString();
break;
case NullComparator:
break;
case RegexStringComparator:
case SubstringComparator:
this.value = Bytes.toString(comparator.getValue());
break;
default:
throw new RuntimeException("unhandled filter type: " + type);
}
}
示例2: makeResponseTimeFilter
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
/**
* make the hbase filter for selecting values of y-axis(response time) in order to select transactions in scatter chart.
* 4 bytes for elapsed time should be attached for the prefix of column qualifier for to use this filter.
*
* @param area
* @param offsetTransactionId
* @param offsetTransactionElapsed
* @return
*/
private Filter makeResponseTimeFilter(final SelectedScatterArea area, final TransactionId offsetTransactionId, int offsetTransactionElapsed) {
// filter by response time
ResponseTimeRange responseTimeRange = area.getResponseTimeRange();
byte[] responseFrom = Bytes.toBytes(responseTimeRange.getFrom());
byte[] responseTo = Bytes.toBytes(responseTimeRange.getTo());
FilterList filterList = new FilterList(Operator.MUST_PASS_ALL);
filterList.addFilter(new QualifierFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(responseFrom)));
filterList.addFilter(new QualifierFilter(CompareOp.LESS_OR_EQUAL, new BinaryPrefixComparator(responseTo)));
// add offset
if (offsetTransactionId != null) {
final Buffer buffer = new AutomaticBuffer(32);
buffer.putInt(offsetTransactionElapsed);
buffer.putPrefixedString(offsetTransactionId.getAgentId());
buffer.putSVLong(offsetTransactionId.getAgentStartTime());
buffer.putVLong(offsetTransactionId.getTransactionSequence());
byte[] qualifierOffset = buffer.getBuffer();
filterList.addFilter(new QualifierFilter(CompareOp.GREATER, new BinaryPrefixComparator(qualifierOffset)));
}
return filterList;
}
示例3: testAddColumnFilterToScanCompareOpNull
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
@Test
public void testAddColumnFilterToScanCompareOpNull() throws Exception {
ColumnFilter cf = new ColumnFilter( "Family" );
cf.setConstant( "123" );
cf.setSignedComparison( true );
HBaseValueMeta meta = new HBaseValueMeta( "colFamly,colname,Family", 1, 20, 1 );
meta.setIsLongOrDouble( true );
VariableSpace space = mockVariableSpace();
connectionSpy.m_sourceScan = new Scan();
doReturn( null ).when( connectionSpy ).getCompareOpByComparisonType( any( ColumnFilter.ComparisonType.class ) );
connectionSpy.addColumnFilterToScan( cf, meta, space, true );
FilterList filter = (FilterList) connectionSpy.m_sourceScan.getFilter();
assertFalse( filter.getFilters().isEmpty() );
Assert.assertEquals( filter.getFilters().size(), 1 );
Assert.assertEquals( BinaryPrefixComparator.class,
( (CompareFilter) filter.getFilters().get( 0 ) ).getComparator().getClass() );
}
示例4: getRecommendedUserItem
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public List<GroupedData> getRecommendedUserItem(String cp, String collection, long user, Long from, Long startDate,
Long endDate, int size)
{
FilterList filters = new FilterList();
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys.getStatRecommendedItemKey(
collection, user))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRecommendedItemKey(collection, user, from))));
}
setDateLimit(STATS_RECOMMENDED_USERITEM, startDate, endDate, filters);
Scan scan = new Scan().addFamily(STATS_RECOMMENDED_USERITEM).setFilter(filters);
return getResults(cp, scan, STATS_RECOMMENDED_USERITEM, size);
}
示例5: getUserRated
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public List<GroupedData> getUserRated(String cp, String collection, Long from, Long startDate, Long endDate, int size)
{
// Put put = new Put(RowKeys.getStatRatingsPerItemKey(collection, item, date));
Scan scan = new Scan();
scan.addFamily(STATS_USER_RATINGS);
FilterList filters = new FilterList();
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRatingsPerUserKey(collection))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRatingsPerUserKey(collection, from.longValue()))));
}
setDateLimit(STATS_USER_RATINGS, startDate, endDate, filters);
scan.setFilter(filters);
return getResults(cp, scan, STATS_USER_RATINGS, size);
}
示例6: getItemRated
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public List<GroupedData> getItemRated(String cp, String collection, Long from, Long startDate, Long endDate, int size)
{
// Put put = new Put(RowKeys.getStatRatingsPerItemKey(collection, item, date));
Scan scan = new Scan();
scan.addFamily(STATS_ITEM_RATINGS);
FilterList filters = new FilterList();
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRatingsPerItemKey(collection))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRatingsPerItemKey(collection, from.longValue()))));
}
setDateLimit(STATS_ITEM_RATINGS, startDate, endDate, filters);
scan.setFilter(filters);
return getResults(cp, scan, STATS_ITEM_RATINGS, size);
}
示例7: getSources
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
/**
* return the list of content providers
* @param cp
* @param recommender
* @return
* @throws IOException
* @see nl.gridline.zieook.statistics.SourcesByRecommenderMap
* @see nl.gridline.zieook.statistics.SourcesByRecommenderReduce
*/
public List<GroupedData> getSources(String cp, String recommender, String from, Long startDate, Long endDate,
int size)
{
Scan scan = new Scan();
scan.addFamily(STATS_VIEWED_SOURCE);
FilterList filters = new FilterList();
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys
.getStatSourcesKey(recommender))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatSourcesKey(recommender, from))));
}
setDateLimit(STATS_VIEWED_SOURCE, startDate, endDate, filters);
scan.setFilter(filters);
return getResults(cp, scan, STATS_VIEWED_SOURCE, size);
}
示例8: getViewed
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public List<GroupedData> getViewed(String cp, String recommender, Long from, Long startDate, Long endDate, int size)
{
Scan scan = new Scan();
scan.addFamily(STATS_VIEWED_ITEM);
FilterList filters = new FilterList();
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys
.getStatViewedKey(recommender))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys.getStatViewedKey(
recommender, from.longValue()))));
}
setDateLimit(STATS_VIEWED_ITEM, startDate, endDate, filters);
scan.setFilter(filters);
return getResults(cp, scan, STATS_VIEWED_ITEM, size);
}
示例9: deleteViews
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public long deleteViews(String cp, String recommender)
{
FilterList filters = new FilterList();
// filter column-family & recommender name
filters.addFilter(new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(EVENTLOG_COLUMN_USERVIEW)));
filters
.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys.getUserViewKey(recommender))));
Scan scan = new Scan().addFamily(EVENTLOG_COLUMN_USERVIEW).setFilter(filters);
// TODO put this in a map-reduce delete.. that executes in background..
// we only need to pass the table & a scan object. should be quite easy
long count = deleteAll(scan, cp, EVENTLOG_COLUMN_RECOMMENDED);
return count;
}
示例10: getEventLogRecommendedScanner
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
/**
* return a recommended scanner with an optional start date and end date
* @param startDate start date
* @param endDate end date
* @return
*/
public Scan getEventLogRecommendedScanner(Long startDate, Long endDate)
{
Scan scan = new Scan().addFamily(EVENTLOG_COLUMN_RECOMMENDED);
FilterList filters = new FilterList();
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys.getRecommendedItemKey())));
// timestamp filter:
if (startDate != null)
{
SingleColumnValueFilter startFilter = new SingleColumnValueFilter(EVENTLOG_COLUMN_RECOMMENDED,
ModelConstants.TIMESTAMP, CompareOp.GREATER_OR_EQUAL, Bytes.toBytes(startDate.longValue()));
startFilter.setFilterIfMissing(true);
filters.addFilter(startFilter);
}
if (endDate != null)
{
SingleColumnValueFilter endFilter = new SingleColumnValueFilter(EVENTLOG_COLUMN_RECOMMENDED,
ModelConstants.TIMESTAMP, CompareOp.LESS, Bytes.toBytes(endDate.longValue()));
endFilter.setFilterIfMissing(true);
filters.addFilter(endFilter);
}
return scan.setFilter(filters);
}
示例11: getTopRecommended
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public List<GroupedData> getTopRecommended(String cp, String collection, long user, Long startDate, Long endDate,
int size)
{
FilterList filters = new FilterList();
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys.getStatRecommendedItemKey(
collection, user))));
setDateLimit(STATS_RECOMMENDED_USERITEM, startDate, endDate, filters);
Scan scan = new Scan().addFamily(STATS_RECOMMENDED_USERITEM).setFilter(filters);
return getSortedResults(cp, scan, STATS_RECOMMENDED_USERITEM, size);
}
示例12: getRecommendedUser
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
public List<GroupedData> getRecommendedUser(String cp, String collection, long user, Long startDate, Long endDate,
int size)
{
FilterList filters = new FilterList();
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatRecommendedKey(collection, user))));
setDateLimit(STATS_RECOMMENDED_USER, startDate, endDate, filters);
Scan scan = new Scan().addFamily(STATS_RECOMMENDED_USER).setFilter(filters);
return getResults(cp, scan, STATS_RECOMMENDED_USER, size);
}
示例13: getCollectionSources
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
/**
* @param cp
* @param collection
* @param from
* @param startDate
* @param endDate
* @param size
* @return
*/
public List<GroupedData> getCollectionSources(String cp, String collection, String from, Long startDate,
Long endDate, Integer size)
{
Scan scan = new Scan().addFamily(STATS_COLLECTION_SOURCE);
FilterList filters = new FilterList();
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys
.getStatSourcesCollectionKey(collection))));
if (from == null)
{
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(RowKeys
.getStatSourcesCollectionKey(collection))));
}
else
{
filters.addFilter(new RowFilter(CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator(RowKeys
.getStatSourcesCollectionKey(collection, from))));
}
setDateLimit(STATS_COLLECTION_SOURCE, startDate, endDate, filters);
scan.setFilter(filters);
return getResults(cp, scan, STATS_COLLECTION_SOURCE, size);
}
示例14: getItemFilter
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
private FilterList getItemFilter(String collection, String regexp, Set<Long> items)
{
// Filter on the given items (this is an OR filter on the rows)
FilterList itemFilter = new FilterList(Operator.MUST_PASS_ONE);
for (long i : items)
{
// row filter is probably faster, (don't know for sure) - otherwise single column-value filter is also
// possible.
itemFilter.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryComparator(RowKeys.getCollectionKey(collection,
i))));
}
FilterList filters = new FilterList();
// filter on collection:
filters.addFilter(new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(Bytes.toBytes(collection))));
// filter on items:
filters.addFilter(itemFilter);
SingleColumnValueFilter value = new SingleColumnValueFilter(COLUMN_INTR, ModelConstants.TITLE, CompareOp.EQUAL,
new RegexStringComparator(regexp));
value.setFilterIfMissing(true);
filters.addFilter(value);
return filters;
}
示例15: getItems
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; //导入依赖的package包/类
/**
* Return the first 100 collection items for the given content provider and collection
* @param cp content provider name
* @param collection collection name
* @return list of collection items
*/
public List<CollectionItem> getItems(String cp, String collection, int size)
{
Filter filter = new RowFilter(CompareOp.EQUAL, new BinaryPrefixComparator(Bytes.toBytes(collection)));
Scan scan = new Scan().addFamily(COLUMN_INTR).setFilter(filter);
return getItems(cp, scan, size);
}