本文整理汇总了Java中org.apache.cassandra.db.filter.IDiskAtomFilter类的典型用法代码示例。如果您正苦于以下问题:Java IDiskAtomFilter类的具体用法?Java IDiskAtomFilter怎么用?Java IDiskAtomFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IDiskAtomFilter类属于org.apache.cassandra.db.filter包,在下文中一共展示了IDiskAtomFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: AbstractQueryPager
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
protected AbstractQueryPager(ConsistencyLevel consistencyLevel,
int toFetch,
boolean localQuery,
CFMetaData cfm,
IDiskAtomFilter columnFilter,
long timestamp)
{
this.consistencyLevel = consistencyLevel;
this.localQuery = localQuery;
this.cfm = cfm;
this.columnFilter = columnFilter;
this.timestamp = timestamp;
this.remaining = toFetch;
}
示例2: deserialize
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
public RangeSliceCommand deserialize(DataInput in, int version) throws IOException
{
String keyspace = in.readUTF();
String columnFamily = in.readUTF();
long timestamp = in.readLong();
CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily);
IDiskAtomFilter predicate = metadata.comparator.diskAtomFilterSerializer().deserialize(in, version);
List<IndexExpression> rowFilter;
int filterCount = in.readInt();
rowFilter = new ArrayList<>(filterCount);
for (int i = 0; i < filterCount; i++)
{
rowFilter.add(IndexExpression.readFrom(in));
}
AbstractBounds<RowPosition> range = AbstractBounds.serializer.deserialize(in, version).toRowBounds();
int maxResults = in.readInt();
boolean countCQL3Rows = in.readBoolean();
boolean isPaging = in.readBoolean();
return new RangeSliceCommand(keyspace, columnFamily, timestamp, predicate, range, rowFilter, maxResults, countCQL3Rows, isPaging);
}
示例3: isFilterFullyCoveredBy
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
public boolean isFilterFullyCoveredBy(IDiskAtomFilter filter, ColumnFamily cachedCf, long now)
{
// We can use the cached value only if we know that no data it doesn't contain could be covered
// by the query filter, that is if:
// 1) either the whole partition is cached
// 2) or we can ensure than any data the filter selects are in the cached partition
// When counting rows to decide if the whole row is cached, we should be careful with expiring
// columns: if we use a timestamp newer than the one that was used when populating the cache, we might
// end up deciding the whole partition is cached when it's really not (just some rows expired since the
// cf was cached). This is the reason for Integer.MIN_VALUE below.
boolean wholePartitionCached = cachedCf.liveCQL3RowCount(Integer.MIN_VALUE) < metadata.getCaching().rowCache.rowsToCache;
// Contrarily to the "wholePartitionCached" check above, we do want isFullyCoveredBy to take the
// timestamp of the query into account when dealing with expired columns. Otherwise, we could think
// the cached partition has enough live rows to satisfy the filter when it doesn't because some
// are now expired.
return wholePartitionCached || filter.isFullyCoveredBy(cachedCf, now);
}
示例4: makeExtendedFilter
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
public ExtendedFilter makeExtendedFilter(AbstractBounds<RowPosition> range,
IDiskAtomFilter columnFilter,
List<IndexExpression> rowFilter,
int maxResults,
boolean countCQL3Rows,
boolean isPaging,
long timestamp)
{
DataRange dataRange;
if (isPaging)
{
assert columnFilter instanceof SliceQueryFilter;
SliceQueryFilter sfilter = (SliceQueryFilter)columnFilter;
assert sfilter.slices.length == 1;
// create a new SliceQueryFilter that selects all cells, but pass the original slice start and finish
// through to DataRange.Paging to be used on the first and last partitions
SliceQueryFilter newFilter = new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, sfilter.isReversed(), sfilter.count);
dataRange = new DataRange.Paging(range, newFilter, sfilter.start(), sfilter.finish(), metadata.comparator);
}
else
{
dataRange = new DataRange(range, columnFilter);
}
return ExtendedFilter.create(this, dataRange, rowFilter, maxResults, countCQL3Rows, timestamp);
}
示例5: makePrefix
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
private Composite makePrefix(CompositesIndex index, ByteBuffer key, ExtendedFilter filter, boolean isStart)
{
if (key.remaining() == 0)
return Composites.EMPTY;
Composite prefix;
IDiskAtomFilter columnFilter = filter.columnFilter(key);
if (columnFilter instanceof SliceQueryFilter)
{
SliceQueryFilter sqf = (SliceQueryFilter)columnFilter;
Composite columnName = isStart ? sqf.start() : sqf.finish();
prefix = columnName.isEmpty() ? index.getIndexComparator().make(key) : index.makeIndexColumnPrefix(key, columnName);
}
else
{
prefix = index.getIndexComparator().make(key);
}
return isStart ? prefix.start() : prefix.end();
}
示例6: AbstractQueryPager
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
protected AbstractQueryPager(ConsistencyLevel consistencyLevel,
int toFetch,
boolean localQuery,
String keyspace,
String columnFamily,
IDiskAtomFilter columnFilter,
long timestamp)
{
this.consistencyLevel = consistencyLevel;
this.localQuery = localQuery;
this.cfm = Schema.instance.getCFMetaData(keyspace, columnFamily);
this.columnFilter = columnFilter;
this.timestamp = timestamp;
this.remaining = toFetch;
}
示例7: getIndexed
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
private static List<Row> getIndexed(ColumnFamilyStore store, IDiskAtomFilter columnFilter, DecoratedKey startKey, int maxResults, IndexExpression... expressions)
{
IPartitioner p = StorageService.getPartitioner();
AbstractBounds<RowPosition> bounds;
if (startKey == null)
{
bounds = new Range<>(p.getMinimumToken(), p.getMinimumToken()).toRowBounds();
}
else
{
bounds = new Bounds<>(startKey, p.getMinimumToken().maxKeyBound(p));
}
return store.indexManager.search(ExtendedFilter.create(store,
new DataRange(bounds, columnFilter),
Arrays.asList(expressions),
maxResults,
false,
System.currentTimeMillis()));
}
示例8: getSliceTravelCommands
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
private List<ReadCommand> getSliceTravelCommands(QueryOptions options, long now, int step) throws RequestValidationException
{
List<ReadCommand> commands = new ArrayList<>();
Collection<ByteBuffer> keys = getKeys(options, step);
//if (keys.isEmpty()) // in case of IN () for (the last column of) the partition key.
IDiskAtomFilter filter = makeFilter(options, step);
if (filter == null)
return null;
if (keys.isEmpty()){
//if we do not have keys yet, just create a Read Command with empty key.
ByteBuffer empty = ByteBuffer.wrap("EMPTY".getBytes());
//logger.info("@daidong debug: create read command with empty key ");
commands.add(ReadCommand.create(keyspace(), ByteBufferUtil.clone(empty), columnFamily(), now, filter.cloneShallow()));
} else {
for (ByteBuffer key : keys)
{
QueryProcessor.validateKey(key);
//logger.info("@daidong debug: create read command with keys " + key.toString());
commands.add(ReadCommand.create(keyspace(), ByteBufferUtil.clone(key), columnFamily(), now, filter.cloneShallow()));
}
}
return commands;
}
示例9: makeFilter
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
private IDiskAtomFilter makeFilter(QueryOptions options, int step) throws InvalidRequestException {
int toGroup = cfm.comparator.isDense() ? -1 : cfm.clusteringColumns().size();
//logger.info("@daidong debug: " + "in makeFilter. toGroup: " + toGroup);
List<Composite> startBounds = getRequestedBound(Bound.START, options, step);
List<Composite> endBounds = getRequestedBound(Bound.END, options, step);
assert startBounds.size() == endBounds.size();
List<ColumnSlice> l = new ArrayList<ColumnSlice>(startBounds.size());
for (int i = 0; i < startBounds.size(); i++) {
ColumnSlice slice = new ColumnSlice(startBounds.get(i), endBounds.get(i));
if (!slice.isAlwaysEmpty(cfm.comparator, isReversed))
l.add(slice);
}
if (l.isEmpty())
return null;
return sliceFilter(l.toArray(new ColumnSlice[l.size()]), limit, toGroup);
}
示例10: makeExtendedFilter
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
public ExtendedFilter makeExtendedFilter(AbstractBounds<RowPosition> range,
IDiskAtomFilter columnFilter,
List<IndexExpression> rowFilter,
int maxResults,
boolean countCQL3Rows,
boolean isPaging,
long timestamp)
{
DataRange dataRange;
if (isPaging)
{
assert columnFilter instanceof SliceQueryFilter;
SliceQueryFilter sfilter = (SliceQueryFilter)columnFilter;
assert sfilter.slices.length == 1;
SliceQueryFilter newFilter = new SliceQueryFilter(ColumnSlice.ALL_COLUMNS_ARRAY, sfilter.isReversed(), sfilter.count);
dataRange = new DataRange.Paging(range, newFilter, sfilter.start(), sfilter.finish(), metadata.comparator);
}
else
{
dataRange = new DataRange(range, columnFilter);
}
return ExtendedFilter.create(this, dataRange, rowFilter, maxResults, countCQL3Rows, timestamp);
}
示例11: RangeSliceCommand
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
public RangeSliceCommand(String keyspace,
String columnFamily,
long timestamp,
IDiskAtomFilter predicate,
AbstractBounds<RowPosition> range,
List<IndexExpression> rowFilter,
int maxResults,
boolean countCQL3Rows,
boolean isPaging)
{
super(keyspace, columnFamily, timestamp, range, predicate, rowFilter);
this.maxResults = maxResults;
this.countCQL3Rows = countCQL3Rows;
this.isPaging = isPaging;
logger.info(">>> STRATIO >>> RangeSliceCommand constructor 9");
}
示例12: makePrefix
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
private Composite makePrefix(CompositesIndex index, ByteBuffer key, ExtendedFilter filter, boolean isStart)
{
if (key.remaining() == 0)
return Composites.EMPTY;
Composite prefix;
IDiskAtomFilter columnFilter = filter.columnFilter(key);
if (columnFilter instanceof SliceQueryFilter)
{
SliceQueryFilter sqf = (SliceQueryFilter)columnFilter;
prefix = index.makeIndexColumnPrefix(key, isStart ? sqf.start() : sqf.finish());
}
else
{
prefix = index.getIndexComparator().make(key);
}
return isStart ? prefix.start() : prefix.end();
}
示例13: RangeSliceCommand
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
public RangeSliceCommand(String keyspace,
String columnFamily,
long timestamp,
IDiskAtomFilter predicate,
AbstractBounds<RowPosition> range,
int maxResults)
{
this(keyspace, columnFamily, timestamp, predicate, range, null, maxResults, false, false);
}
示例14: serializedSize
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
public long serializedSize(RangeSliceCommand rsc, int version)
{
long size = TypeSizes.NATIVE.sizeof(rsc.keyspace);
size += TypeSizes.NATIVE.sizeof(rsc.columnFamily);
size += TypeSizes.NATIVE.sizeof(rsc.timestamp);
CFMetaData metadata = Schema.instance.getCFMetaData(rsc.keyspace, rsc.columnFamily);
IDiskAtomFilter filter = rsc.predicate;
size += metadata.comparator.diskAtomFilterSerializer().serializedSize(filter, version);
if (rsc.rowFilter == null)
{
size += TypeSizes.NATIVE.sizeof(0);
}
else
{
size += TypeSizes.NATIVE.sizeof(rsc.rowFilter.size());
for (IndexExpression expr : rsc.rowFilter)
{
size += TypeSizes.NATIVE.sizeofWithShortLength(expr.column);
size += TypeSizes.NATIVE.sizeof(expr.operator.ordinal());
size += TypeSizes.NATIVE.sizeofWithShortLength(expr.value);
}
}
size += AbstractBounds.serializer.serializedSize(rsc.keyRange, version);
size += TypeSizes.NATIVE.sizeof(rsc.maxResults);
size += TypeSizes.NATIVE.sizeof(rsc.countCQL3Rows);
size += TypeSizes.NATIVE.sizeof(rsc.isPaging);
return size;
}
示例15: getRangeSlice
import org.apache.cassandra.db.filter.IDiskAtomFilter; //导入依赖的package包/类
@VisibleForTesting
public List<Row> getRangeSlice(final AbstractBounds<RowPosition> range,
List<IndexExpression> rowFilter,
IDiskAtomFilter columnFilter,
int maxResults)
{
return getRangeSlice(range, rowFilter, columnFilter, maxResults, System.currentTimeMillis());
}