本文整理汇总了Java中org.apache.cassandra.db.Row类的典型用法代码示例。如果您正苦于以下问题:Java Row类的具体用法?Java Row怎么用?Java Row使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Row类属于org.apache.cassandra.db包,在下文中一共展示了Row类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resolve
import org.apache.cassandra.db.Row; //导入依赖的package包/类
public Iterable<Row> resolve()
{
ArrayList<RowIterator> iters = new ArrayList<RowIterator>(responses.size());
int n = 0;
for (MessageIn<RangeSliceReply> response : responses)
{
RangeSliceReply reply = response.payload;
n = Math.max(n, reply.rows.size());
iters.add(new RowIterator(reply.rows.iterator(), response.from));
}
// for each row, compute the combination of all different versions seen, and repair incomplete versions
// TODO do we need to call close?
CloseableIterator<Row> iter = MergeIterator.get(iters, pairComparator, new Reducer());
List<Row> resolvedRows = new ArrayList<Row>(n);
while (iter.hasNext())
resolvedRows.add(iter.next());
return resolvedRows;
}
示例2: getReduced
import org.apache.cassandra.db.Row; //导入依赖的package包/类
protected Row getReduced()
{
ColumnFamily resolved = versions.size() > 1
? RowDataResolver.resolveSuperset(versions, timestamp)
: versions.get(0);
if (versions.size() < sources.size())
{
// add placeholder rows for sources that didn't have any data, so maybeScheduleRepairs sees them
for (InetAddress source : sources)
{
if (!versionSources.contains(source))
{
versions.add(null);
versionSources.add(source);
}
}
}
// resolved can be null even if versions doesn't have all nulls because of the call to removeDeleted in resolveSuperSet
if (resolved != null)
repairResults.addAll(RowDataResolver.scheduleRepairs(resolved, keyspaceName, key, versions, versionSources));
versions.clear();
versionSources.clear();
return new Row(key, resolved);
}
示例3: search
import org.apache.cassandra.db.Row; //导入依赖的package包/类
/**
* Performs a search across a number of column indexes
*
* @param filter the column range to restrict to
* @return found indexed rows
*/
public List<Row> search(ExtendedFilter filter)
{
List<SecondaryIndexSearcher> indexSearchers = getIndexSearchersForQuery(filter.getClause());
if (indexSearchers.isEmpty())
return Collections.emptyList();
SecondaryIndexSearcher mostSelective = null;
long bestEstimate = Long.MAX_VALUE;
for (SecondaryIndexSearcher searcher : indexSearchers)
{
SecondaryIndex highestSelectivityIndex = searcher.highestSelectivityIndex(filter.getClause());
long estimate = highestSelectivityIndex.estimateResultRows();
if (estimate <= bestEstimate)
{
bestEstimate = estimate;
mostSelective = searcher;
}
}
return mostSelective.search(filter);
}
示例4: reload
import org.apache.cassandra.db.Row; //导入依赖的package包/类
public void reload()
{
Row cfDefRow = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF, ksName, cfName);
if (cfDefRow.cf == null || !cfDefRow.cf.hasColumns())
throw new RuntimeException(String.format("%s not found in the schema definitions keyspace.", ksName + ":" + cfName));
try
{
apply(fromSchema(cfDefRow));
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
}
示例5: createSecondaryIndexSearcher
import org.apache.cassandra.db.Row; //导入依赖的package包/类
@Override
protected SecondaryIndexSearcher createSecondaryIndexSearcher(Set<ByteBuffer> columns)
{
return new SecondaryIndexSearcher(baseCfs.indexManager, columns)
{
@Override
public List<Row> search(ExtendedFilter filter)
{
return Arrays.asList(new Row(LAST_INDEXED_KEY, LAST_INDEXED_ROW));
}
@Override
public void validate(IndexExpression indexExpression) throws InvalidRequestException
{
if (indexExpression.value.equals(ByteBufferUtil.bytes("invalid")))
throw new InvalidRequestException("Invalid search!");
}
};
}
示例6: remoteStorageQuery
import org.apache.cassandra.db.Row; //导入依赖的package包/类
public static ColumnFamily remoteStorageQuery(String target, String dataTag){
try {
List<ReadCommand> command = new ArrayList<ReadCommand>();
command.add(new SliceFromReadCommand(
Metadata.MetaData_KS,
ByteBufferUtil.bytes(target),
new QueryPath(Metadata.MetadataLog_CF),
Column.decomposeName(String.valueOf(0) , "", dataTag, "value"),
Column.decomposeName(String.valueOf(Long.MAX_VALUE) , "", dataTag, "value"),
false,
Integer.MAX_VALUE));
List<Row> rows = StorageProxy.read(command, ConsistencyLevel.ANY);
return rows.get(0).cf;
} catch (Exception e) {
return null;
}
}
示例7: remoteStorageQuery
import org.apache.cassandra.db.Row; //导入依赖的package包/类
private ColumnFamily remoteStorageQuery(String target, String dataTag){
try {
List<ReadCommand> command = new ArrayList<ReadCommand>();
command.add(new SliceFromReadCommand(
Metadata.MetaData_KS,
ByteBufferUtil.bytes(target),
new QueryPath(Metadata.MetadataRegistry_CF),
Column.decomposeName(dataTag, "admin_tag"),
Column.decomposeName(dataTag, "admin_tag"),
false,
Integer.MAX_VALUE));
List<Row> rows = StorageProxy.read(command, ConsistencyLevel.ANY);
return rows.get(0).cf;
} catch (Exception e) {
return null;
}
}
示例8: getData
import org.apache.cassandra.db.Row; //导入依赖的package包/类
/**
* Special case of resolve() so that CL.ONE reads never throw DigestMismatchException in the foreground
*/
public Row getData()
{
for (MessageIn<ReadResponse> message : replies)
{
ReadResponse result = message.payload;
if (!result.isDigestQuery())
return result.row();
}
return null;
}
示例9: resolve
import org.apache.cassandra.db.Row; //导入依赖的package包/类
public Row resolve() throws DigestMismatchException
{
if (logger.isDebugEnabled())
logger.debug("resolving {} responses", replies.size());
long start = System.nanoTime();
// validate digests against each other; throw immediately on mismatch.
// also extract the data reply, if any.
ColumnFamily data = null;
ByteBuffer digest = null;
for (MessageIn<ReadResponse> message : replies)
{
ReadResponse response = message.payload;
ByteBuffer newDigest;
if (response.isDigestQuery())
{
newDigest = response.digest();
}
else
{
// note that this allows for multiple data replies, post-CASSANDRA-5932
data = response.row().cf;
newDigest = ColumnFamily.digest(data);
}
if (digest == null)
digest = newDigest;
else if (!digest.equals(newDigest))
throw new DigestMismatchException(key, digest, newDigest);
}
if (logger.isDebugEnabled())
logger.debug("resolve: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
return new Row(key, data);
}
示例10: search
import org.apache.cassandra.db.Row; //导入依赖的package包/类
@Override
public List<Row> search(ExtendedFilter filter)
{
assert filter.getClause() != null && !filter.getClause().isEmpty();
final IndexExpression primary = highestSelectivityPredicate(filter.getClause());
final CompositesIndex index = (CompositesIndex)indexManager.getIndexForColumn(primary.column);
// TODO: this should perhaps not open and maintain a writeOp for the full duration, but instead only *try* to delete stale entries, without blocking if there's no room
// as it stands, we open a writeOp and keep it open for the duration to ensure that should this CF get flushed to make room we don't block the reclamation of any room being made
try (OpOrder.Group writeOp = baseCfs.keyspace.writeOrder.start(); OpOrder.Group baseOp = baseCfs.readOrdering.start(); OpOrder.Group indexOp = index.getIndexCfs().readOrdering.start())
{
return baseCfs.filter(getIndexedIterator(writeOp, filter, primary, index), filter);
}
}
示例11: execute
import org.apache.cassandra.db.Row; //导入依赖的package包/类
private ResultMessage.Rows execute(Pageable command, QueryOptions options, int limit, long now) throws RequestValidationException, RequestExecutionException
{
List<Row> trows;
if (command == null)
trows = Collections.<Row>emptyList();
else
trows = StorageProxy.travel(((TravelCommand) command), options.getConsistency());
return processResults(trows, options, limit, now);
}
示例12: processResults
import org.apache.cassandra.db.Row; //导入依赖的package包/类
public ResultMessage.Rows processResults(List<Row> rows, QueryOptions options, int limit, long now) throws RequestValidationException
{
// Even for count, we need to process the result as it'll group some column together in sparse column families
ResultSet rset = process(rows, options, limit, now);
ResultMessage.Rows rtn = new ResultMessage.Rows(rset);
//logger.info("@daidong debug: get results: " + rtn.toString());
return rtn;
}
示例13: process
import org.apache.cassandra.db.Row; //导入依赖的package包/类
private ResultSet process(List<Row> rows, QueryOptions options, int limit, long now) throws InvalidRequestException
{
Selection.ResultSetBuilder result = selection.resultSetBuilder(now);
for (org.apache.cassandra.db.Row row : rows)
{
// Not columns match the query, skip
if (row.cf == null)
continue;
processColumnFamily(row.key.getKey(), row.cf, options, now, result);
}
ResultSet cqlRows = result.build();
return cqlRows;
}
示例14: fromThriftCqlRow
import org.apache.cassandra.db.Row; //导入依赖的package包/类
/**
* Create CFMetaData from thrift {@link CqlRow} that contains columns from schema_columnfamilies.
*
* @param columnsRes CqlRow containing columns from schema_columnfamilies.
* @return CFMetaData derived from CqlRow
*/
public static CFMetaData fromThriftCqlRow(CqlRow cf, CqlResult columnsRes)
{
UntypedResultSet.Row cfRow = new UntypedResultSet.Row(convertThriftCqlRow(cf));
List<Map<String, ByteBuffer>> cols = new ArrayList<>(columnsRes.rows.size());
for (CqlRow row : columnsRes.rows)
cols.add(convertThriftCqlRow(row));
UntypedResultSet colsRow = UntypedResultSet.create(cols);
return fromSchemaNoTriggers(cfRow, colsRow);
}
示例15: fromSchema
import org.apache.cassandra.db.Row; //导入依赖的package包/类
/**
* Deserialize CF metadata from low-level representation
*
* @return Thrift-based metadata deserialized from schema
*/
public static CFMetaData fromSchema(UntypedResultSet.Row result)
{
String ksName = result.getString("keyspace_name");
String cfName = result.getString("columnfamily_name");
Row serializedColumns = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_COLUMNS_CF, ksName, cfName);
CFMetaData cfm = fromSchemaNoTriggers(result, ColumnDefinition.resultify(serializedColumns));
Row serializedTriggers = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_TRIGGERS_CF, ksName, cfName);
addTriggerDefinitionsFromSchema(cfm, serializedTriggers);
return cfm;
}