本文整理汇总了Java中com.netflix.astyanax.model.Row.getColumns方法的典型用法代码示例。如果您正苦于以下问题:Java Row.getColumns方法的具体用法?Java Row.getColumns怎么用?Java Row.getColumns使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.netflix.astyanax.model.Row
的用法示例。
在下文中一共展示了Row.getColumns方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: decodeMetadataRows
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
private Iterator<Map.Entry<String, StorageSummary>> decodeMetadataRows(
final Iterator<Row<ByteBuffer, Composite>> rowIter, final AstyanaxTable table) {
return new AbstractIterator<Map.Entry<String, StorageSummary>>() {
@Override
protected Map.Entry<String, StorageSummary> computeNext() {
while (rowIter.hasNext()) {
Row<ByteBuffer, Composite> row = rowIter.next();
ByteBuffer key = row.getKey();
ColumnList<Composite> columns = row.getColumns();
String blobId = AstyanaxStorage.getContentKey(key);
StorageSummary summary = toStorageSummary(columns);
if (summary == null) {
continue; // Partial blob, parts may still be replicating.
}
// Cleanup older versions of the blob, if any (unlikely).
deleteOldColumns(table, blobId, columns, summary.getTimestamp());
return Maps.immutableEntry(blobId, summary);
}
return endOfData();
}
};
}
示例2: findMaxRecords
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
@Override
public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) {
// Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since
// we always delete smaller column values before deleting larger column values--scanning will hit the max
// before needing to skip over tombstones.
Map<UUID, ByteBuffer> resultMap = Maps.newHashMap();
for (List<UUID> batch : Iterables.partition(dataIds, 10)) {
Rows<UUID, ByteBuffer> rows = execute(
_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
.getKeySlice(batch)
.withColumnRange(new RangeBuilder()
.setReversed(true)
.setLimit(1)
.build()));
for (Row<UUID, ByteBuffer> row : rows) {
UUID dataId = row.getKey();
for (Column<ByteBuffer> column : row.getColumns()) {
resultMap.put(dataId, column.getName());
}
}
}
return resultMap;
}
示例3: getArchiveSummaries
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
/**
* Get a summary of all archives in this Repository
* @return List of summaries
*/
@Override
public List<ArchiveSummary> getArchiveSummaries() throws IOException {
List<ArchiveSummary> summaries = new LinkedList<ArchiveSummary>();
Iterable<Row<String, String>> rows;
try {
rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update, Columns.module_spec));
} catch (Exception e) {
throw new IOException(e);
}
for (Row<String, String> row : rows) {
String moduleId = row.getKey();
ColumnList<String> columns = row.getColumns();
Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name());
long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : 0;
ScriptModuleSpec moduleSpec = getModuleSpec(columns);
ArchiveSummary summary = new ArchiveSummary(ModuleId.fromString(moduleId), moduleSpec, updateTime, null);
summaries.add(summary);
}
return summaries;
}
示例4: AstyanaxResultSet
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
public AstyanaxResultSet(OperationResult<Rows<K, String>> result) {
this.result = result;
rows = result.getResult();
this.rowIterator = rows.iterator();
if (rowIterator.hasNext()) {
Row<K, String> row = rowIterator.next();
this.columns = row.getColumns();
this.key = row.getKey();
} else {
this.columns = new EmptyColumnList<String>();
}
}
示例5: findOne
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public synchronized T findOne(ID id) {
try {
String cql = cqlGen.buildFindOneStatement();
PreparedCqlQuery<String, String> preparedStatement = doPreparedCqlRead(cql);
Map<String, ByteBuffer> serializedKeyValues = spec.getSerializedKeyValues(id);
for (String column : spec.getKeyColumns()) {
preparedStatement = preparedStatement.withValue(serializedKeyValues.get(column));
}
OperationResult<CqlResult<String, String>> opResult = preparedStatement.execute();
LOGGER.debug("attempts: {}, latency: {}ms", opResult.getAttemptsCount(),
opResult.getLatency(TimeUnit.MILLISECONDS));
CqlResult<String, String> resultSet = opResult.getResult();
Rows<String, String> resultSetRows = resultSet.getRows();
if (resultSetRows.isEmpty()) {
return null;
} else if (resultSetRows.size() > 1) {
throw new DataRetrievalFailureException("Got several rows for single key");
} else {
Row<String, String> row = resultSetRows.getRowByIndex(0);
ColumnList<String> columns = row.getColumns();
return spec.map(columns);
}
} catch (ConnectionException e) {
throw new DataRetrievalFailureException("Error while executing CQL3 query", e);
}
}
示例6: runQuery
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
public Map<String, JsonObject> runQuery(String key, String col) {
OperationResult<CqlStatementResult> rs;
Map<String, JsonObject> resultMap = new HashMap<String, JsonObject>();
try {
String queryStr = "";
if (col != null && !col.equals("*")) {
queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='"
+ key + "' and column1='" + col + "';";
} else {
queryStr = "select column1, value from "+MetaConstants.META_KEY_SPACE + "." + MetaConstants.META_COLUMN_FAMILY +" where key='"
+ key + "';";
}
rs = keyspace.prepareCqlStatement().withCql(queryStr).execute();
for (Row<String, String> row : rs.getResult().getRows(METACF)) {
ColumnList<String> columns = row.getColumns();
String key1 = columns.getStringValue("column1", null);
String val1 = columns.getStringValue("value", null);
resultMap.put(key1, new JsonObject(val1));
}
} catch (ConnectionException e) {
e.printStackTrace();
throw new RuntimeException(e.getMessage());
}
return resultMap;
}
示例7: listRows
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
@Override
public QueryResult listRows(String cursor, Integer rowLimit, Integer columnLimit) throws PaasException {
try {
invariant();
// Execute the query
Partitioner partitioner = keyspace.getPartitioner();
Rows<ByteBuffer, ByteBuffer> result = keyspace
.prepareQuery(columnFamily)
.getKeyRange(null, null, cursor != null ? cursor : partitioner.getMinToken(), partitioner.getMaxToken(), rowLimit)
.execute()
.getResult();
// Convert raw data into a simple sparse tree
SchemalessRows.Builder builder = SchemalessRows.builder();
for (Row<ByteBuffer, ByteBuffer> row : result) {
Map<String, String> columns = Maps.newHashMap();
for (Column<ByteBuffer> column : row.getColumns()) {
columns.put(serializers.columnAsString(column.getRawName()), serializers.valueAsString(column.getRawName(), column.getByteBufferValue()));
}
builder.addRow(serializers.keyAsString(row.getKey()), columns);
}
QueryResult dr = new QueryResult();
dr.setSrows(builder.build());
if (!result.isEmpty()) {
dr.setCursor(partitioner.getTokenForKey(Iterables.getLast(result).getKey()));
}
return dr;
} catch (ConnectionException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
示例8: deserializePastBlocks
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
private Iterable<ItemAndBroadcast> deserializePastBlocks(Row<String, String> row) {
if (row == null) {
return ImmutableList.of();
}
ColumnList<String> columns = row.getColumns();
Column<String> idColumn = columns.getColumnByName(IDS_COL);
Set<String> ids;
if (idColumn == null) {
ids = ImmutableSet.of();
} else {
ids = ImmutableSet.copyOf(Splitter.on(',')
.omitEmptyStrings()
.split(idColumn.getStringValue()));
}
ArrayList<ItemAndBroadcast> iabs = Lists.newArrayListWithCapacity(columns.size());
for (Column<String> column : columns) {
if (IDS_COL.equals(column.getName())
|| UPDATED_COL.equals(column.getName())
|| ids.contains(column.getName())) {
continue;
}
iabs.add(serializer.deserialize(column.getByteArrayValue()));
}
return iabs;
}
示例9: onFoundRow
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
public void onFoundRow(Row<String, String> row) throws Exception {
AttributesImpl atts = new AttributesImpl();
atts.addAttribute("", "", "name", "CDATA", row.getKey());
content.startElement("", "", "key", atts);
columnlist = row.getColumns();
String columnvalue = null;
for (int poscol = 0; poscol < columnlist.size(); poscol++) {
column = columnlist.getColumnByIndex(poscol);
atts.clear();
atts.addAttribute("", "", "name", "CDATA", column.getName());
atts.addAttribute("", "", "at", "CDATA", String.valueOf(column.getTimestamp() / 1000));
atts.addAttribute("", "", "ttl", "CDATA", String.valueOf(column.getTtl()));
columnvalue = new String(quotedprintablecodec.encode(column.getByteArrayValue()));
if (BackupDb.mode_debug) {
atts.addAttribute("", "", "at_date", "CDATA", (new Date(column.getTimestamp() / 1000)).toString());
if (column.getStringValue().equals(columnvalue) == false) {
atts.addAttribute("", "", "hex_value", "CDATA", MyDMAM.byteToString(column.getByteArrayValue()));
}
}
content.startElement("", "", "col", atts);
content.characters(columnvalue.toCharArray(), 0, columnvalue.length());
content.endElement("", "", column.getName());
}
content.endElement("", "", "key");
count++;
}
示例10: copyRange
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
private <C> void copyRange(DeltaPlacement sourcePlacement, ColumnFamily<ByteBuffer, C> sourceCf,
AstyanaxStorage dest, DeltaPlacement destPlacement, ColumnFamily<ByteBuffer, C> destCf, ColumnInc<C> columnInc,
ByteBufferRange keyRange, Runnable progress) {
ConsistencyLevel writeConsistency = SorConsistencies.toAstyanax(WriteConsistency.STRONG);
Iterator<List<Row<ByteBuffer, C>>> rowsIter = Iterators.partition(
rowScan(sourcePlacement, sourceCf, keyRange, _maxColumnsRange, LimitCounter.max(), ReadConsistency.STRONG),
MAX_SCAN_ROWS_BATCH);
int largeRowThreshold = _maxColumnsRange.getLimit();
while (rowsIter.hasNext()) {
List<Row<ByteBuffer, C>> rows = rowsIter.next();
MutationBatch mutation = destPlacement.getKeyspace().prepareMutationBatch(writeConsistency);
for (Row<ByteBuffer, C> row : rows) {
ColumnList<C> columns = row.getColumns();
// Map the source row key to the destination row key. Its table uuid and shard key will be different.
ByteBuffer newRowKey = dest.getRowKey(AstyanaxStorage.getContentKey(row.getRawKey()));
// Copy the first N columns to the multi-row mutation.
putAll(mutation.withRow(destCf, newRowKey), columns);
// If this is a wide row, copy the remaining columns w/separate mutation objects.
// This is especially common with the audit column family.
if (columns.size() >= largeRowThreshold) {
C lastColumn = columns.getColumnByIndex(columns.size() - 1).getName();
Iterator<List<Column<C>>> columnsIter = Iterators.partition(
columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null,
false, columnInc, Long.MAX_VALUE, 1, ReadConsistency.STRONG),
MAX_COLUMN_SCAN_BATCH);
while (columnsIter.hasNext()) {
List<Column<C>> moreColumns = columnsIter.next();
MutationBatch wideRowMutation = destPlacement.getKeyspace().prepareMutationBatch(writeConsistency);
putAll(wideRowMutation.withRow(destCf, newRowKey), moreColumns);
progress.run();
execute(wideRowMutation,
"copy key range %s to %s from placement %s, column family %s to placement %s, column family %s",
keyRange.getStart(), keyRange.getEnd(), sourcePlacement.getName(), sourceCf.getName(),
destPlacement.getName(), destCf.getName());
}
}
}
progress.run();
execute(mutation,
"copy key range %s to %s from placement %s, column family %s to placement %s, column family %s",
keyRange.getStart(), keyRange.getEnd(), sourcePlacement.getName(), sourceCf.getName(),
destPlacement.getName(), destCf.getName());
_copyMeter.mark(rows.size());
}
}
示例11: copyRange
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
private void copyRange(DeltaPlacement sourcePlacement, ColumnFamily<ByteBuffer, UUID> sourceCf,
AstyanaxStorage dest, DeltaPlacement destPlacement, ColumnFamily<ByteBuffer, UUID> destCf,
ByteBufferRange keyRange, Runnable progress) {
ConsistencyLevel writeConsistency = SorConsistencies.toAstyanax(WriteConsistency.STRONG);
Iterator<List<Row<ByteBuffer, UUID>>> rowsIter = Iterators.partition(
rowScan(sourcePlacement, sourceCf, keyRange, _maxColumnsRange, LimitCounter.max(), ReadConsistency.STRONG),
MAX_SCAN_ROWS_BATCH);
int largeRowThreshold = _maxColumnsRange.getLimit();
while (rowsIter.hasNext()) {
List<Row<ByteBuffer, UUID>> rows = rowsIter.next();
MutationBatch mutation = destPlacement.getKeyspace().prepareMutationBatch(writeConsistency);
for (Row<ByteBuffer, UUID> row : rows) {
ColumnList<UUID> columns = row.getColumns();
// Map the source row key to the destination row key. Its table uuid and shard key will be different.
ByteBuffer newRowKey = dest.getRowKey(AstyanaxStorage.getContentKey(row.getRawKey()));
// Copy the first N columns to the multi-row mutation.
putAll(mutation.withRow(destCf, newRowKey), columns);
// If this is a wide row, copy the remaining columns w/separate mutation objects.
// This is especially common with the audit column family.
if (columns.size() >= largeRowThreshold) {
UUID lastColumn = columns.getColumnByIndex(columns.size() - 1).getName();
Iterator<List<Column<UUID>>> columnsIter = Iterators.partition(
columnScan(row.getRawKey(), sourcePlacement, sourceCf, lastColumn, null,
false, Long.MAX_VALUE, 1, ReadConsistency.STRONG),
MAX_COLUMN_SCAN_BATCH);
while (columnsIter.hasNext()) {
List<Column<UUID>> moreColumns = columnsIter.next();
MutationBatch wideRowMutation = destPlacement.getKeyspace().prepareMutationBatch(writeConsistency);
putAll(wideRowMutation.withRow(destCf, newRowKey), moreColumns);
progress.run();
execute(wideRowMutation,
"copy key range %s to %s from placement %s, column family %s to placement %s, column family %s",
keyRange.getStart(), keyRange.getEnd(), sourcePlacement.getName(), sourceCf.getName(),
destPlacement.getName(), destCf.getName());
}
}
}
progress.run();
execute(mutation,
"copy key range %s to %s from placement %s, column family %s to placement %s, column family %s",
keyRange.getStart(), keyRange.getEnd(), sourcePlacement.getName(), sourceCf.getName(),
destPlacement.getName(), destCf.getName());
_copyMeter.mark(rows.size());
}
}
示例12: run
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
@Override
public void run() {
if (families == null || families.length < 1) {
deferred.callback(new UnsupportedOperationException(
"Can't scan cassandra without a column family: " + this));
return;
}
if (iterator == null) {
try {
final OperationResult<Rows<byte[], byte[]>> results =
keyspace.prepareQuery(client.getColumnFamilySchemas().get(families[0]))
.withCaching(populate_blockcache)
.getRowRange(start_key, stop_key, null, null, Integer.MAX_VALUE).execute();
iterator = results.getResult().iterator();
} catch (ConnectionException e) {
deferred.callback(e);
return;
}
}
if (!iterator.hasNext()) {
deferred.callback(null);
//return Deferred.fromResult(null);
return;
}
// dunno how to size this since we don't have the low level deets
final ArrayList<ArrayList<KeyValue>> rows =
new ArrayList<ArrayList<KeyValue>>();
int kv_count = 0;
while (rows.size() < max_num_rows && iterator.hasNext()) {
final Row<byte[], byte[]> result = iterator.next();
if (filter != null) {
// TODO - post filtering SUCKS!!!!!
final KeyRegexpFilter regex = (KeyRegexpFilter)filter;
if (!regex.matches(result.getKey())) {
continue;
}
}
final ArrayList<KeyValue> row = new ArrayList<KeyValue>(result.getColumns().size());
// TODO - iterator on the columns too so we can satisfy max kvs
for (final Column<byte[]> column : result.getColumns()) {
final KeyValue kv = new KeyValue(result.getKey(), families[0],
column.getName(), column.getTimestamp() / 1000, // micro to ms
column.getByteArrayValue());
row.add(kv);
}
rows.add(row);
kv_count += row.size();
}
deferred.callback(rows);
}
示例13: nextResult
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
@Override
public void nextResult() {
Row<K, String> row = rowIterator.next();
this.columns = row.getColumns();
this.key = row.getKey();
}
示例14: getScriptArchives
import com.netflix.astyanax.model.Row; //导入方法依赖的package包/类
/**
* Get all of the {@link ScriptArchive}s for the given set of moduleIds. Will perform the operation in batches
* as specified by {@link CassandraArchiveRepositoryConfig#getArchiveFetchBatchSize()} and outputs the jar files in
* the path specified by {@link CassandraArchiveRepositoryConfig#getArchiveOutputDirectory()}.
*
* @param moduleIds keys to search for
* @return set of ScriptArchives retrieved from the database
*/
@Override
public Set<ScriptArchive> getScriptArchives(Set<ModuleId> moduleIds) throws IOException {
Set<ScriptArchive> archives = new LinkedHashSet<ScriptArchive>(moduleIds.size()*2);
Path archiveOuputDir = getConfig().getArchiveOutputDirectory();
List<ModuleId> moduleIdList = new LinkedList<ModuleId>(moduleIds);
int batchSize = getConfig().getArchiveFetchBatchSize();
int start = 0;
try {
while (start < moduleIdList.size()) {
int end = Math.min(moduleIdList.size(), start + batchSize);
List<ModuleId> batchModuleIds = moduleIdList.subList(start, end);
List<String> rowKeys = new ArrayList<String>(batchModuleIds.size());
for (ModuleId batchModuleId:batchModuleIds) {
rowKeys.add(batchModuleId.toString());
}
Rows<String, String> rows = cassandra.getRows(rowKeys.toArray(new String[0]));
for (Row<String, String> row : rows) {
String moduleId = row.getKey();
ColumnList<String> columns = row.getColumns();
Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name());
Column<String> hashColumn = columns.getColumnByName(Columns.archive_content_hash.name());
Column<String> contentColumn = columns.getColumnByName(Columns.archive_content.name());
if (lastUpdateColumn == null || hashColumn == null || contentColumn == null) {
continue;
}
ScriptModuleSpec moduleSpec = getModuleSpec(columns);
long lastUpdateTime = lastUpdateColumn.getLongValue();
byte[] hash = hashColumn.getByteArrayValue();
byte[] content = contentColumn.getByteArrayValue();
// verify the hash
if (hash != null && hash.length > 0 && !verifyHash(hash, content)) {
logger.warn("Content hash validation failed for moduleId {}. size: {}", moduleId, content.length);
continue;
}
String fileName = new StringBuilder().append(moduleId).append("-").append(lastUpdateTime).append(".jar").toString();
Path jarFile = archiveOuputDir.resolve(fileName);
Files.write(jarFile, content);
JarScriptArchive scriptArchive = new JarScriptArchive.Builder(jarFile)
.setModuleSpec(moduleSpec)
.setCreateTime(lastUpdateTime)
.build();
archives.add(scriptArchive);
}
start = end;
}
} catch (Exception e) {
throw new IOException(e);
}
return archives;
}