本文整理汇总了Java中com.netflix.astyanax.model.Rows类的典型用法代码示例。如果您正苦于以下问题:Java Rows类的具体用法?Java Rows怎么用?Java Rows使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Rows类属于com.netflix.astyanax.model包,在下文中一共展示了Rows类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: findMaxRecords
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
@Override
public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) {
// Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since
// we always delete smaller column values before deleting larger column values--scanning will hit the max
// before needing to skip over tombstones.
Map<UUID, ByteBuffer> resultMap = Maps.newHashMap();
for (List<UUID> batch : Iterables.partition(dataIds, 10)) {
Rows<UUID, ByteBuffer> rows = execute(
_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
.getKeySlice(batch)
.withColumnRange(new RangeBuilder()
.setReversed(true)
.setLimit(1)
.build()));
for (Row<UUID, ByteBuffer> row : rows) {
UUID dataId = row.getKey();
for (Column<ByteBuffer> column : row.getColumns()) {
resultMap.put(dataId, column.getName());
}
}
}
return resultMap;
}
示例2: rowQuery
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
/**
* Queries for rows given an enumerated list of Cassandra row keys.
*/
private Iterator<Record> rowQuery(DeltaPlacement placement,
List<Map.Entry<ByteBuffer, Key>> keys,
ReadConsistency consistency) {
// Build the list of row IDs to query for.
List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction());
// Query for Delta & Compaction info, just the first 50 columns for now.
final Rows<ByteBuffer, DeltaKey> rows = execute(placement.getKeyspace()
.prepareQuery(placement.getBlockedDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency))
.getKeySlice(rowIds)
.withColumnRange(_maxColumnsRange),
"query %d keys from placement %s", rowIds.size(), placement.getName());
// Track metrics
_randomReadMeter.mark(rowIds.size());
// Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once.
return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency);
}
示例3: decodeRows
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
/**
* Decodes rows returned by querying for a specific set of rows.
*/
private Iterator<Record> decodeRows(List<Map.Entry<ByteBuffer, Key>> keys, final Rows<ByteBuffer, DeltaKey> rows,
final int largeRowThreshold, final ReadConsistency consistency) {
// Avoiding pinning multiple decoded rows into memory at once.
return Iterators.transform(keys.iterator(), new Function<Map.Entry<ByteBuffer, Key>, Record>() {
@Override
public Record apply(Map.Entry<ByteBuffer, Key> entry) {
Row<ByteBuffer, DeltaKey> row = rows.getRow(entry.getKey());
if (row == null) {
return emptyRecord(entry.getValue());
}
// Convert the results into a Record object, lazily fetching the rest of the columns as necessary.
return newRecord(entry.getValue(), row.getRawKey(), row.getColumns(), largeRowThreshold, consistency, null);
}
});
}
示例4: rowQuery
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
/**
* Queries for rows given an enumerated list of Cassandra row keys.
*/
private Iterator<Record> rowQuery(DeltaPlacement placement,
List<Map.Entry<ByteBuffer, Key>> keys,
ReadConsistency consistency) {
// Build the list of row IDs to query for.
List<ByteBuffer> rowIds = Lists.transform(keys, entryKeyFunction());
// Query for Delta & Compaction info, just the first 50 columns for now.
final Rows<ByteBuffer, UUID> rows = execute(placement.getKeyspace()
.prepareQuery(placement.getDeltaColumnFamily(), SorConsistencies.toAstyanax(consistency))
.getKeySlice(rowIds)
.withColumnRange(_maxColumnsRange),
"query %d keys from placement %s", rowIds.size(), placement.getName());
// Track metrics
_randomReadMeter.mark(rowIds.size());
// Return an iterator that decodes the row results, avoiding pinning multiple decoded rows into memory at once.
return decodeRows(keys, rows, _maxColumnsRange.getLimit(), consistency);
}
示例5: decodeRows
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
/**
* Decodes rows returned by querying for a specific set of rows.
*/
private Iterator<Record> decodeRows(List<Map.Entry<ByteBuffer, Key>> keys, final Rows<ByteBuffer, UUID> rows,
final int largeRowThreshold, final ReadConsistency consistency) {
// Avoiding pinning multiple decoded rows into memory at once.
return Iterators.transform(keys.iterator(), new Function<Map.Entry<ByteBuffer, Key>, Record>() {
@Override
public Record apply(Map.Entry<ByteBuffer, Key> entry) {
Row<ByteBuffer, UUID> row = rows.getRow(entry.getKey());
if (row == null) {
return emptyRecord(entry.getValue());
}
// Convert the results into a Record object, lazily fetching the rest of the columns as necessary.
return newRecord(entry.getValue(), row.getRawKey(), row.getColumns(), largeRowThreshold, consistency, null);
}
});
}
示例6: initialize
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void initialize() {
try {
ByteBufferRange range = new RangeBuilder().setReversed(reversed).build();
ColumnFamily<byte[], byte[]> cf = cfInfo.getColumnFamilyObj();
ColumnFamilyQuery<byte[], byte[]> cfQuery = keyspace.prepareQuery(cf);
AllRowsQuery<byte[], byte[]> query = cfQuery.getAllRows();
query.withColumnRange(range)
.setExceptionCallback(new ExcCallback());
if(batchSize != null) {
if(batchSize < 10)
throw new RuntimeException("batchSize must be 10 or greater and preferably around 500 is good.");
query.setRowLimit(batchSize);
}
OperationResult<Rows<byte[], byte[]>> opResult = query.execute();
iterator = opResult.getResult().iterator();
} catch (ConnectionException e) {
throw new RuntimeException(e);
}
}
示例7: doFindAll
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
protected List<T> doFindAll(ID restrict, Sort sort) {
try {
Map<String, ByteBuffer> serializedKeyValues = spec.getSerializedKeyValues(restrict);
List<String> keysSet = EntitySpecificationUtils.getKeysSet(serializedKeyValues);
String keysCql = cqlGen.buildLimitedFindAllKeysStatement(keysSet, sort, 0);
PreparedCqlQuery<String, String> preparedStatement = doPreparedCqlRead(keysCql);
for (String column : keysSet) {
preparedStatement = preparedStatement.withValue(serializedKeyValues.get(column));
}
OperationResult<CqlResult<String, String>> keysResult = preparedStatement.execute();
LOGGER.debug("attempts: {}, latency: {}ms",
keysResult.getAttemptsCount(),
keysResult.getLatency(TimeUnit.MILLISECONDS));
CqlResult<String, String> cqlKeysResult = keysResult.getResult();
Rows<String, String> keysSetRows = cqlKeysResult.getRows();
List<T> keysAsEnts = spec.map(keysSetRows);
List<ID> keys = spec.getKey(keysAsEnts);
return findAll(keys);
} catch (ConnectionException e) {
throw new DataRetrievalFailureException("Error while executing CQL3 query", e);
}
}
示例8: containsSingleRowOnly
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
/**
* Return true if we have < 2 rows with columns, false otherwise
*/
private boolean containsSingleRowOnly( final Rows<R, C> result ) {
int count = 0;
for ( R key : result.getKeys() ) {
if ( result.getRow( key ).getColumns().size() > 0 ) {
count++;
//we have more than 1 row with values, return them
if ( count > 1 ) {
return false;
}
}
}
return true;
}
示例9: readAliases
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
public Set<Long> readAliases(Publisher source, Iterable<Alias> aliases)
throws ConnectionException {
ImmutableSet<Alias> uniqueAliases = ImmutableSet.copyOf(aliases);
String columnName = checkNotNull(source).key();
RowSliceQuery<String, String> aliasQuery = keyspace.prepareQuery(columnFamily)
.getRowSlice(serialize(uniqueAliases.asList()))
.withColumnSlice(columnName);
Rows<String, String> rows = aliasQuery.execute().getResult();
List<Long> ids = Lists.newArrayListWithCapacity(rows.size());
for (Row<String, String> row : rows) {
Column<String> idCell = row.getColumns().getColumnByName(columnName);
if (idCell != null) {
ids.add(idCell.getLongValue());
}
}
return ImmutableSet.copyOf(ids);
}
示例10: resolveCurrentScheduleBlocks
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
@Override
protected List<ChannelSchedule> resolveCurrentScheduleBlocks(Publisher source, Channel channel,
Interval interval) throws WriteException {
Rows<String, String> rows = fetchRows(source, channel, interval);
List<ChannelSchedule> channelSchedules = Lists.newArrayList();
for (LocalDate date : new ScheduleIntervalDates(interval)) {
DateTime start = date.toDateTimeAtStartOfDay(DateTimeZones.UTC);
Interval dayInterval = new Interval(start, start.plusDays(1));
channelSchedules.add(schedule(
channel,
dayInterval,
rows.getRow(keyFor(source, channel.getId().longValue(), date))
));
}
return channelSchedules;
}
示例11: resolveStaleScheduleBlocks
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
@Override
protected List<ChannelSchedule> resolveStaleScheduleBlocks(Publisher source, Channel channel,
Interval interval) throws WriteException {
Rows<String, String> rows = fetchRows(source, channel, interval);
List<ChannelSchedule> channelSchedules = Lists.newArrayList();
for (LocalDate date : new ScheduleIntervalDates(interval)) {
channelSchedules.add(
new ChannelSchedule(
channel,
interval,
pastSchedule(
rows.getRow(
keyFor(source, channel.getId().longValue(), date)
)
)
)
);
}
return channelSchedules;
}
示例12: resolveAliases
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
@Override
public OptionalMap<Alias, Topic> resolveAliases(Iterable<Alias> aliases, Publisher source) {
try {
Set<Alias> uniqueAliases = ImmutableSet.copyOf(aliases);
Set<Long> ids = aliasIndex.readAliases(source, uniqueAliases);
// TODO: move timeout to config
Rows<Long, String> resolved = resolveLongs(ids).get(1, TimeUnit.MINUTES);
Iterable<Topic> topics = Iterables.transform(resolved, rowToTopic);
ImmutableMap.Builder<Alias, Optional<Topic>> aliasMap = ImmutableMap.builder();
for (Topic topic : topics) {
topic.getAliases()
.stream()
.filter(uniqueAliases::contains)
.forEach(alias -> aliasMap.put(alias, Optional.of(topic)));
}
return ImmutableOptionalMap.copyOf(aliasMap.build());
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
示例13: listAuthorsByEvents
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
public static HashMap<String, ArrayList<String>> listAuthorsByEvents() throws ConnectionException {
HashMap<String, ArrayList<String>> result = new HashMap<String, ArrayList<String>>();
Rows<String, String> rows = DARDB.get().getKeyspace().prepareQuery(DARDB.CF_DAR).getAllRows().withColumnSlice("account_user_key", "event_name").execute().getResult();
for (Row<String, String> row : rows) {
String account_user_key = row.getColumns().getStringValue("account_user_key", "");
String event_name = row.getColumns().getStringValue("event_name", "");
if (event_name.equals("") | account_user_key.equals("")) {
Loggers.DAReport.warn("Empty values for event_name or account_user_key in [" + row.getKey() + "]");
continue;
}
if (result.containsKey(event_name) == false) {
result.put(event_name, new ArrayList<>(5));
}
result.get(event_name).add(account_user_key);
}
return result;
}
示例14: isRequireIsDone
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
boolean isRequireIsDone() throws ConnectionException {
if (required_keys == null) {
return true;
}
if (required_keys.isEmpty()) {
return true;
}
Rows<String, String> rows = keyspace.prepareQuery(CF_QUEUE).getKeySlice(required_keys).withColumnSlice("status").execute().getResult();
if (rows == null) {
return false;
}
if (rows.isEmpty()) {
return false;
}
for (Row<String, String> row : rows) {
if (row.getColumns().getStringValue("status", JobStatus.WAITING.name()).equals(JobStatus.DONE.name()) == false) {
return false;
}
}
return true;
}
示例15: removeMaxDateForPostponedJobs
import com.netflix.astyanax.model.Rows; //导入依赖的package包/类
static void removeMaxDateForPostponedJobs(MutationBatch mutator, String creator_hostname) throws ConnectionException {
if (Loggers.Job.isDebugEnabled()) {
Loggers.Job.debug("Search for remove max date for postponed jobs");
}
IndexQuery<String, String> index_query = keyspace.prepareQuery(CF_QUEUE).searchWithIndex();
index_query.addExpression().whereColumn("status").equals().value(JobStatus.POSTPONED.name());
index_query.addExpression().whereColumn("creator_hostname").equals().value(creator_hostname);
index_query.addExpression().whereColumn("expiration_date").lessThan().value(System.currentTimeMillis() + default_max_execution_time);
index_query.withColumnSlice("source", "context_class");
OperationResult<Rows<String, String>> rows = index_query.execute();
for (Row<String, String> row : rows.getResult()) {
if (MyDMAM.factory.isClassExists(row.getColumns().getStringValue("context_class", "null")) == false) {
continue;
}
JobNG job = JobNG.Utility.importFromDatabase(row.getColumns());
job.expiration_date = System.currentTimeMillis() + (default_max_execution_time * 7l);
job.update_date = System.currentTimeMillis();
if (Loggers.Job.isDebugEnabled()) {
Loggers.Job.info("Remove max date for this postponed job:\t" + job);
}
job.exportToDatabase(mutator.withRow(CF_QUEUE, job.key));
}
}