本文整理汇总了Java中org.apache.cassandra.db.filter.SliceQueryFilter类的典型用法代码示例。如果您正苦于以下问题:Java SliceQueryFilter类的具体用法?Java SliceQueryFilter怎么用?Java SliceQueryFilter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SliceQueryFilter类属于org.apache.cassandra.db.filter包,在下文中一共展示了SliceQueryFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: queryNextPage
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
throws RequestExecutionException
{
SliceQueryFilter sf = (SliceQueryFilter)columnFilter;
AbstractBounds<RowPosition> keyRange = lastReturnedKey == null ? command.keyRange : makeIncludingKeyBounds(lastReturnedKey);
Composite start = lastReturnedName == null ? sf.start() : lastReturnedName;
PagedRangeCommand pageCmd = new PagedRangeCommand(command.keyspace,
command.columnFamily,
command.timestamp,
keyRange,
sf,
start,
sf.finish(),
command.rowFilter,
pageSize,
command.countCQL3Rows);
return localQuery
? pageCmd.executeLocally()
: StorageProxy.getRangeSlice(pageCmd, consistencyLevel);
}
示例2: countPaged
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
/**
* Convenience method that count (live) cells/rows for a given slice of a row, but page underneath.
*/
public static int countPaged(String keyspace,
String columnFamily,
ByteBuffer key,
SliceQueryFilter filter,
ConsistencyLevel consistencyLevel,
ClientState cState,
final int pageSize,
long now) throws RequestValidationException, RequestExecutionException
{
SliceFromReadCommand command = new SliceFromReadCommand(keyspace, key, columnFamily, now, filter);
final SliceQueryPager pager = new SliceQueryPager(command, consistencyLevel, cState, false);
ColumnCounter counter = filter.columnCounter(Schema.instance.getCFMetaData(keyspace, columnFamily).comparator, now);
while (!pager.isExhausted())
{
List<Row> next = pager.fetchPage(pageSize);
if (!next.isEmpty())
counter.countAll(next.get(0).cf);
}
return counter.live();
}
示例3: queryNextPage
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
throws RequestValidationException, RequestExecutionException
{
// For some queries, such as a DISTINCT query on static columns, the limit for slice queries will be lower
// than the page size (in the static example, it will be 1). We use the min here to ensure we don't fetch
// more rows than we're supposed to. See CASSANDRA-8108 for more details.
SliceQueryFilter filter = command.filter.withUpdatedCount(Math.min(command.filter.count, pageSize));
if (lastReturned != null)
filter = filter.withUpdatedStart(lastReturned, cfm.comparator);
logger.debug("Querying next page of slice query; new filter: {}", filter);
ReadCommand pageCmd = command.withUpdatedFilter(filter);
return localQuery
? Collections.singletonList(pageCmd.getRow(Keyspace.open(command.ksName)))
: StorageProxy.read(Collections.singletonList(pageCmd), consistencyLevel, cstate);
}
示例4: maybeGenerateRetryCommand
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
@Override
public ReadCommand maybeGenerateRetryCommand(RowDataResolver resolver, Row row)
{
int maxLiveColumns = resolver.getMaxLiveCount();
int count = filter.count;
// We generate a retry if at least one node reply with count live columns but after merge we have less
// than the total number of column we are interested in (which may be < count on a retry).
// So in particular, if no host returned count live columns, we know it's not a short read.
if (maxLiveColumns < count)
return null;
int liveCountInRow = row == null || row.cf == null ? 0 : filter.getLiveCount(row.cf, timestamp);
if (liveCountInRow < getOriginalRequestedCount())
{
// We asked t (= count) live columns and got l (=liveCountInRow) ones.
// From that, we can estimate that on this row, for x requested
// columns, only l/t end up live after reconciliation. So for next
// round we want to ask x column so that x * (l/t) == t, i.e. x = t^2/l.
int retryCount = liveCountInRow == 0 ? count + 1 : ((count * count) / liveCountInRow) + 1;
SliceQueryFilter newFilter = filter.withUpdatedCount(retryCount);
return new RetriedSliceFromReadCommand(ksName, key, cfName, timestamp, newFilter, getOriginalRequestedCount());
}
return null;
}
示例5: makePrefix
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
private Composite makePrefix(CompositesIndex index, ByteBuffer key, ExtendedFilter filter, boolean isStart)
{
if (key.remaining() == 0)
return Composites.EMPTY;
Composite prefix;
IDiskAtomFilter columnFilter = filter.columnFilter(key);
if (columnFilter instanceof SliceQueryFilter)
{
SliceQueryFilter sqf = (SliceQueryFilter)columnFilter;
Composite columnName = isStart ? sqf.start() : sqf.finish();
prefix = columnName.isEmpty() ? index.getIndexComparator().make(key) : index.makeIndexColumnPrefix(key, columnName);
}
else
{
prefix = index.getIndexComparator().make(key);
}
return isStart ? prefix.start() : prefix.end();
}
示例6: queryNextPage
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
throws RequestExecutionException
{
SliceQueryFilter sf = (SliceQueryFilter)columnFilter;
AbstractBounds<RowPosition> keyRange = lastReturnedKey == null ? command.keyRange : makeIncludingKeyBounds(lastReturnedKey);
ByteBuffer start = lastReturnedName == null ? sf.start() : lastReturnedName;
PagedRangeCommand pageCmd = new PagedRangeCommand(command.keyspace,
command.columnFamily,
command.timestamp,
keyRange,
sf,
start,
sf.finish(),
command.rowFilter,
pageSize);
return localQuery
? pageCmd.executeLocally()
: StorageProxy.getRangeSlice(pageCmd, consistencyLevel);
}
示例7: countPaged
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
/**
* Convenience method that count (live) cells/rows for a given slice of a row, but page underneath.
*/
public static int countPaged(String keyspace,
String columnFamily,
ByteBuffer key,
SliceQueryFilter filter,
ConsistencyLevel consistencyLevel,
final int pageSize,
long now) throws RequestValidationException, RequestExecutionException
{
SliceFromReadCommand command = new SliceFromReadCommand(keyspace, key, columnFamily, now, filter);
final SliceQueryPager pager = new SliceQueryPager(command, consistencyLevel, false);
ColumnCounter counter = filter.columnCounter(Schema.instance.getComparator(keyspace, columnFamily), now);
while (!pager.isExhausted())
{
List<Row> next = pager.fetchPage(pageSize);
if (!next.isEmpty())
counter.countAll(next.get(0).cf);
}
return counter.live();
}
示例8: intersects
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
@Override
public boolean intersects(List<ByteBuffer> minColumnNames, List<ByteBuffer> maxColumnNames, SliceQueryFilter filter)
{
assert minColumnNames.size() == maxColumnNames.size();
for (ColumnSlice slice : filter.slices)
{
ByteBuffer[] start = split(filter.isReversed() ? slice.finish : slice.start);
ByteBuffer[] finish = split(filter.isReversed() ? slice.start : slice.finish);
for (int i = 0; i < minColumnNames.size(); i++)
{
AbstractType<?> t = types.get(i);
ByteBuffer s = i < start.length ? start[i] : ByteBufferUtil.EMPTY_BYTE_BUFFER;
ByteBuffer f = i < finish.length ? finish[i] : ByteBufferUtil.EMPTY_BYTE_BUFFER;
if (!t.intersects(minColumnNames.get(i), maxColumnNames.get(i), s, f))
return false;
}
}
return true;
}
示例9: countPaged
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
/**
* Convenience method that count (live) cells/rows for a given slice of a row, but page underneath.
*/
public static int countPaged(String keyspace,
String columnFamily,
ByteBuffer key,
SliceQueryFilter filter,
ConsistencyLevel consistencyLevel,
final int pageSize,
long now) throws RequestValidationException, RequestExecutionException
{
SliceFromReadCommand command = new SliceFromReadCommand(keyspace, key, columnFamily, now, filter);
final SliceQueryPager pager = new SliceQueryPager(command, consistencyLevel, false);
ColumnCounter counter = filter.columnCounter(Schema.instance.getCFMetaData(keyspace, columnFamily).comparator, now);
while (!pager.isExhausted())
{
List<Row> next = pager.fetchPage(pageSize);
if (!next.isEmpty())
counter.countAll(next.get(0).cf);
}
return counter.live();
}
示例10: maybeGenerateRetryCommand
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
@Override
public ReadCommand maybeGenerateRetryCommand(RowDataResolver resolver, Row row)
{
int maxLiveColumns = resolver.getMaxLiveCount();
int count = filter.count;
// We generate a retry if at least one node reply with count live columns but after merge we have less
// than the total number of column we are interested in (which may be < count on a retry).
// So in particular, if no host returned count live columns, we know it's not a short read.
if (maxLiveColumns < count)
return null;
int liveCountInRow = row == null || row.cf == null ? 0 : filter.getLiveCount(row.cf);
if (liveCountInRow < getOriginalRequestedCount())
{
// We asked t (= count) live columns and got l (=liveCountInRow) ones.
// From that, we can estimate that on this row, for x requested
// columns, only l/t end up live after reconciliation. So for next
// round we want to ask x column so that x * (l/t) == t, i.e. x = t^2/l.
int retryCount = liveCountInRow == 0 ? count + 1 : ((count * count) / liveCountInRow) + 1;
SliceQueryFilter newFilter = filter.withUpdatedCount(retryCount);
return new RetriedSliceFromReadCommand(table, key, queryPath, newFilter, getOriginalRequestedCount());
}
return null;
}
示例11: queryNextPage
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
throws RequestExecutionException
{
SliceQueryFilter sf = (SliceQueryFilter)columnFilter;
AbstractBounds<RowPosition> keyRange = lastReturnedKey == null ? command.keyRange : makeIncludingKeyBounds(lastReturnedKey);
Composite start = lastReturnedName == null ? sf.start() : lastReturnedName;
PagedRangeCommand pageCmd = new PagedRangeCommand(command.keyspace,
command.columnFamily,
command.timestamp,
keyRange,
sf,
start,
sf.finish(),
command.rowFilter,
pageSize);
return localQuery
? pageCmd.executeLocally()
: StorageProxy.getRangeSlice(pageCmd, consistencyLevel);
}
示例12: makePrefix
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
private Composite makePrefix(CompositesIndex index, ByteBuffer key, ExtendedFilter filter, boolean isStart)
{
if (key.remaining() == 0)
return Composites.EMPTY;
Composite prefix;
IDiskAtomFilter columnFilter = filter.columnFilter(key);
if (columnFilter instanceof SliceQueryFilter)
{
SliceQueryFilter sqf = (SliceQueryFilter)columnFilter;
prefix = index.makeIndexColumnPrefix(key, isStart ? sqf.start() : sqf.finish());
}
else
{
prefix = index.getIndexComparator().make(key);
}
return isStart ? prefix.start() : prefix.end();
}
示例13: makeExtendedFilter
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
/**
* Allows generic range paging with the slice column filter.
* Typically, suppose we have rows A, B, C ... Z having each some columns in [1, 100].
* And suppose we want to page through the query that for all rows returns the columns
* within [25, 75]. For that, we need to be able to do a range slice starting at (row r, column c)
* and ending at (row Z, column 75), *but* that only return columns in [25, 75].
* That is what this method allows. The columnRange is the "window" of columns we are interested
* in each row, and columnStart (resp. columnEnd) is the start (resp. end) for the first
* (resp. last) requested row.
*/
public ExtendedFilter makeExtendedFilter(AbstractBounds<RowPosition> keyRange,
SliceQueryFilter columnRange,
Composite columnStart,
Composite columnStop,
List<IndexExpression> rowFilter,
int maxResults,
boolean countCQL3Rows,
long now)
{
DataRange dataRange = new DataRange.Paging(keyRange, columnRange, columnStart, columnStop, metadata.comparator);
return ExtendedFilter.create(this, dataRange, rowFilter, maxResults, countCQL3Rows, now);
}
示例14: AbstractCType
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
protected AbstractCType(boolean isByteOrderComparable)
{
reverseComparator = new Comparator<Composite>()
{
public int compare(Composite c1, Composite c2)
{
return AbstractCType.this.compare(c2, c1);
}
};
indexComparator = new Comparator<IndexInfo>()
{
public int compare(IndexInfo o1, IndexInfo o2)
{
return AbstractCType.this.compare(o1.lastName, o2.lastName);
}
};
indexReverseComparator = new Comparator<IndexInfo>()
{
public int compare(IndexInfo o1, IndexInfo o2)
{
return AbstractCType.this.compare(o1.firstName, o2.firstName);
}
};
serializer = new Serializer(this);
indexSerializer = new IndexInfo.Serializer(this);
sliceSerializer = new ColumnSlice.Serializer(this);
sliceQueryFilterSerializer = new SliceQueryFilter.Serializer(this);
deletionInfoSerializer = new DeletionInfo.Serializer(this);
rangeTombstoneSerializer = new RangeTombstone.Serializer(this);
rowIndexEntrySerializer = new RowIndexEntry.Serializer(this);
this.isByteOrderComparable = isByteOrderComparable;
}
示例15: create
import org.apache.cassandra.db.filter.SliceQueryFilter; //导入依赖的package包/类
public static ReadCommand create(String ksName, ByteBuffer key, String cfName, long timestamp, IDiskAtomFilter filter)
{
if (filter instanceof SliceQueryFilter)
return new SliceFromReadCommand(ksName, key, cfName, timestamp, (SliceQueryFilter)filter);
else
return new SliceByNamesReadCommand(ksName, key, cfName, timestamp, (NamesQueryFilter)filter);
}