本文整理汇总了Java中org.apache.cassandra.service.StorageProxy类的典型用法代码示例。如果您正苦于以下问题:Java StorageProxy类的具体用法?Java StorageProxy怎么用?Java StorageProxy使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StorageProxy类属于org.apache.cassandra.service包,在下文中一共展示了StorageProxy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: mutate
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private void mutate(List<org.apache.cassandra.db.Mutation> cmds, org.apache.cassandra.db.ConsistencyLevel clvl) throws BackendException {
try {
schedule(DatabaseDescriptor.getRpcTimeout());
try {
if (atomicBatch) {
StorageProxy.mutateAtomically(cmds, clvl);
} else {
StorageProxy.mutate(cmds, clvl);
}
} catch (RequestExecutionException e) {
throw new TemporaryBackendException(e);
} finally {
release();
}
} catch (TimeoutException ex) {
log.debug("Cassandra TimeoutException", ex);
throw new TemporaryBackendException(ex);
}
}
示例2: executeWithCondition
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
public ResultMessage executeWithCondition(QueryState queryState, QueryOptions options)
throws RequestExecutionException, RequestValidationException
{
List<ByteBuffer> keys = buildPartitionKeyNames(options);
// We don't support IN for CAS operation so far
if (keys.size() > 1)
throw new InvalidRequestException("IN on the partition key is not supported with conditional updates");
ByteBuffer key = keys.get(0);
long now = options.getTimestamp(queryState);
Composite prefix = createClusteringPrefix(options);
CQL3CasRequest request = new CQL3CasRequest(cfm, key, false);
addConditions(prefix, request, options);
request.addRowUpdate(prefix, this, options, now);
ColumnFamily result = StorageProxy.cas(keyspace(),
columnFamily(),
key,
request,
options.getSerialConsistency(),
options.getConsistency(),
queryState.getClientState());
return new ResultMessage.Rows(buildCasResultSet(key, result, options));
}
示例3: execute
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private ResultMessage.Rows execute(Pageable command, QueryOptions options, int limit, long now, QueryState state) throws RequestValidationException, RequestExecutionException
{
List<Row> rows;
if (command == null)
{
rows = Collections.<Row>emptyList();
}
else
{
rows = command instanceof Pageable.ReadCommands
? StorageProxy.read(((Pageable.ReadCommands)command).commands, options.getConsistency(), state.getClientState())
: StorageProxy.getRangeSlice((RangeSliceCommand)command, options.getConsistency());
}
return processResults(rows, options, limit, now);
}
示例4: queryNextPage
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
throws RequestExecutionException
{
SliceQueryFilter sf = (SliceQueryFilter)columnFilter;
AbstractBounds<RowPosition> keyRange = lastReturnedKey == null ? command.keyRange : makeIncludingKeyBounds(lastReturnedKey);
Composite start = lastReturnedName == null ? sf.start() : lastReturnedName;
PagedRangeCommand pageCmd = new PagedRangeCommand(command.keyspace,
command.columnFamily,
command.timestamp,
keyRange,
sf,
start,
sf.finish(),
command.rowFilter,
pageSize,
command.countCQL3Rows);
return localQuery
? pageCmd.executeLocally()
: StorageProxy.getRangeSlice(pageCmd, consistencyLevel);
}
示例5: queryNextPage
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
throws RequestValidationException, RequestExecutionException
{
// For some queries, such as a DISTINCT query on static columns, the limit for slice queries will be lower
// than the page size (in the static example, it will be 1). We use the min here to ensure we don't fetch
// more rows than we're supposed to. See CASSANDRA-8108 for more details.
SliceQueryFilter filter = command.filter.withUpdatedCount(Math.min(command.filter.count, pageSize));
if (lastReturned != null)
filter = filter.withUpdatedStart(lastReturned, cfm.comparator);
logger.debug("Querying next page of slice query; new filter: {}", filter);
ReadCommand pageCmd = command.withUpdatedFilter(filter);
return localQuery
? Collections.singletonList(pageCmd.getRow(Keyspace.open(command.ksName)))
: StorageProxy.read(Collections.singletonList(pageCmd), consistencyLevel, cstate);
}
示例6: writeHintsForUndeliveredEndpoints
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private void writeHintsForUndeliveredEndpoints(int startFrom)
{
try
{
// Here we deserialize mutations 2nd time from byte buffer.
// but this is ok, because timeout on batch direct delivery is rare
// (it can happen only several seconds until node is marked dead)
// so trading some cpu to keep less objects
List<Mutation> replayingMutations = replayingMutations();
for (int i = startFrom; i < replayHandlers.size(); i++)
{
Mutation undeliveredMutation = replayingMutations.get(i);
int ttl = calculateHintTTL(replayingMutations);
ReplayWriteResponseHandler handler = replayHandlers.get(i);
if (ttl > 0 && handler != null)
for (InetAddress endpoint : handler.undelivered)
StorageProxy.writeHintForMutation(undeliveredMutation, writtenAt, ttl, endpoint);
}
}
catch (IOException e)
{
logger.error("Cannot schedule hints for undelivered batch", e);
}
}
示例7: doInsert
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations, boolean mutateAtomically)
throws UnavailableException, TimedOutException, org.apache.cassandra.exceptions.InvalidRequestException
{
org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
consistencyLevel.validateForWrite(state().getKeyspace());
if (mutations.isEmpty())
return;
long timeout = Long.MAX_VALUE;
for (IMutation m : mutations)
timeout = Longs.min(timeout, m.getTimeout());
schedule(timeout);
try
{
StorageProxy.mutateWithTriggers(mutations, consistencyLevel, mutateAtomically);
}
catch (RequestExecutionException e)
{
ThriftConversion.rethrow(e);
}
finally
{
release();
}
}
示例8: execute
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private ResultMessage.Rows execute(Pageable command, ConsistencyLevel cl, List<ByteBuffer> variables, int limit, long now) throws RequestValidationException, RequestExecutionException
{
List<Row> rows;
if (command == null)
{
rows = Collections.<Row>emptyList();
}
else
{
rows = command instanceof Pageable.ReadCommands
? StorageProxy.read(((Pageable.ReadCommands)command).commands, cl)
: StorageProxy.getRangeSlice((RangeSliceCommand)command, cl);
}
return processResults(rows, variables, limit, now);
}
示例9: queryNextPage
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
protected List<Row> queryNextPage(int pageSize, ConsistencyLevel consistencyLevel, boolean localQuery)
throws RequestExecutionException
{
SliceQueryFilter sf = (SliceQueryFilter)columnFilter;
AbstractBounds<RowPosition> keyRange = lastReturnedKey == null ? command.keyRange : makeIncludingKeyBounds(lastReturnedKey);
ByteBuffer start = lastReturnedName == null ? sf.start() : lastReturnedName;
PagedRangeCommand pageCmd = new PagedRangeCommand(command.keyspace,
command.columnFamily,
command.timestamp,
keyRange,
sf,
start,
sf.finish(),
command.rowFilter,
pageSize);
return localQuery
? pageCmd.executeLocally()
: StorageProxy.getRangeSlice(pageCmd, consistencyLevel);
}
示例10: replaySerializedMutation
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private void replaySerializedMutation(RowMutation mutation, long writtenAt)
{
int ttl = calculateHintTTL(mutation, writtenAt);
if (ttl <= 0)
return; // the mutation isn't safe to replay.
Set<InetAddress> liveEndpoints = new HashSet<InetAddress>();
String ks = mutation.getKeyspaceName();
Token<?> tk = StorageService.getPartitioner().getToken(mutation.key());
for (InetAddress endpoint : Iterables.concat(StorageService.instance.getNaturalEndpoints(ks, tk),
StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, ks)))
{
if (endpoint.equals(FBUtilities.getBroadcastAddress()))
mutation.apply();
else if (FailureDetector.instance.isAlive(endpoint))
liveEndpoints.add(endpoint); // will try delivering directly instead of writing a hint.
else
StorageProxy.writeHintForMutation(mutation, ttl, endpoint);
}
if (!liveEndpoints.isEmpty())
attemptDirectDelivery(mutation, writtenAt, liveEndpoints);
}
示例11: doInsert
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private void doInsert(ConsistencyLevel consistency_level, List<? extends IMutation> mutations, boolean mutateAtomically)
throws UnavailableException, TimedOutException, org.apache.cassandra.exceptions.InvalidRequestException
{
org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
consistencyLevel.validateForWrite(state().getKeyspace());
if (mutations.isEmpty())
return;
schedule(DatabaseDescriptor.getWriteRpcTimeout());
try
{
StorageProxy.mutateWithTriggers(mutations, consistencyLevel, mutateAtomically);
}
catch (RequestExecutionException e)
{
ThriftConversion.rethrow(e);
}
finally
{
release();
}
}
示例12: trace
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
public static void trace(final ByteBuffer sessionIdBytes, final String message, final int elapsed)
{
final ByteBuffer eventId = ByteBuffer.wrap(UUIDGen.getTimeUUIDBytes());
final String threadName = Thread.currentThread().getName();
StageManager.getStage(Stage.TRACING).execute(new WrappedRunnable()
{
public void runMayThrow() throws Exception
{
CFMetaData cfMeta = CFMetaData.TraceEventsCf;
ColumnFamily cf = ArrayBackedSortedColumns.factory.create(cfMeta);
Tracing.addColumn(cf, Tracing.buildName(cfMeta, eventId, ByteBufferUtil.bytes("activity")), message);
Tracing.addColumn(cf, Tracing.buildName(cfMeta, eventId, ByteBufferUtil.bytes("source")), FBUtilities.getBroadcastAddress());
if (elapsed >= 0)
Tracing.addColumn(cf, Tracing.buildName(cfMeta, eventId, ByteBufferUtil.bytes("source_elapsed")), elapsed);
Tracing.addColumn(cf, Tracing.buildName(cfMeta, eventId, ByteBufferUtil.bytes("thread")), threadName);
RowMutation mutation = new RowMutation(Tracing.TRACE_KS, sessionIdBytes, cf);
StorageProxy.mutate(Arrays.asList(mutation), ConsistencyLevel.ANY);
}
});
}
示例13: execute
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
@Override
public void execute() throws MojoExecutionException, MojoFailureException
{
try
{
createCassandraHome();
getLog().debug("Truncating Column Family \"" + columnFamily + "\" in Keyspace \"" + keyspace + "\"...");
StorageProxy.truncateBlocking(keyspace, columnFamily);
getLog().info("Truncated Column Family \"" + columnFamily + "\" in Keyspace \"" + keyspace + "\"...");
} catch (UnavailableException ue)
{
throw new MojoExecutionException("Host(s) must be up in order for a truncate operation to be successful.", ue);
} catch (TimeoutException te)
{
throw new MojoExecutionException("Host did not reply for truncate operation.", te);
} catch (IOException ioe)
{
// unlikely in our case
throw new MojoExecutionException("Could not construct truncate message",ioe);
}
}
示例14: mutate
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private void mutate(List<RowMutation> cmds, org.apache.cassandra.db.ConsistencyLevel clvl) throws BackendException {
try {
schedule(DatabaseDescriptor.getRpcTimeout());
try {
if (atomicBatch) {
StorageProxy.mutateAtomically(cmds, clvl);
} else {
StorageProxy.mutate(cmds, clvl);
}
} catch (RequestExecutionException e) {
throw new TemporaryBackendException(e);
} finally {
release();
}
} catch (TimeoutException ex) {
log.debug("Cassandra TimeoutException", ex);
throw new TemporaryBackendException(ex);
}
}
示例15: retryDummyRead
import org.apache.cassandra.service.StorageProxy; //导入依赖的package包/类
private void retryDummyRead(String ks, String cf) throws PermanentBackendException {
final long limit = System.currentTimeMillis() + (60L * 1000L);
while (System.currentTimeMillis() < limit) {
try {
SortedSet<ByteBuffer> ss = new TreeSet<ByteBuffer>();
ss.add(ByteBufferUtil.zeroByteBuffer(1));
NamesQueryFilter nqf = new NamesQueryFilter(ss);
SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(ks, ByteBufferUtil.zeroByteBuffer(1), cf, 1L, nqf);
StorageProxy.read(ImmutableList.<ReadCommand> of(cmd), ConsistencyLevel.QUORUM);
log.info("Read on CF {} in KS {} succeeded", cf, ks);
return;
} catch (Throwable t) {
log.warn("Failed to read CF {} in KS {} following creation", cf, ks, t);
}
try {
Thread.sleep(1000L);
} catch (InterruptedException e) {
throw new PermanentBackendException(e);
}
}
throw new PermanentBackendException("Timed out while attempting to read CF " + cf + " in KS " + ks + " following creation");
}