本文整理汇总了Java中org.apache.cassandra.db.ConsistencyLevel.blockFor方法的典型用法代码示例。如果您正苦于以下问题:Java ConsistencyLevel.blockFor方法的具体用法?Java ConsistencyLevel.blockFor怎么用?Java ConsistencyLevel.blockFor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.db.ConsistencyLevel
的用法示例。
在下文中一共展示了ConsistencyLevel.blockFor方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ReadCallback
import org.apache.cassandra.db.ConsistencyLevel; //导入方法依赖的package包/类
/**
* Constructor when response count has to be calculated and blocked for.
*/
public ReadCallback(IResponseResolver<TMessage, TResolved> resolver, ConsistencyLevel consistencyLevel, IReadCommand command, List<InetAddress> filteredEndpoints)
{
this(resolver, consistencyLevel, consistencyLevel.blockFor(Keyspace.open(command.getKeyspace())), command, Keyspace.open(command.getKeyspace()), filteredEndpoints);
if (logger.isTraceEnabled())
logger.trace(String.format("Blockfor is %s; setting up requests to %s", blockfor, StringUtils.join(this.endpoints, ",")));
}
示例2: ReadCallback
import org.apache.cassandra.db.ConsistencyLevel; //导入方法依赖的package包/类
/**
* Constructor when response count has to be calculated and blocked for.
*/
public ReadCallback(ResponseResolver resolver, ConsistencyLevel consistencyLevel, ReadCommand command, List<InetAddress> filteredEndpoints)
{
this(resolver,
consistencyLevel,
consistencyLevel.blockFor(Keyspace.open(command.metadata().ksName)),
command,
Keyspace.open(command.metadata().ksName),
filteredEndpoints);
}
示例3: getReadExecutor
import org.apache.cassandra.db.ConsistencyLevel; //导入方法依赖的package包/类
/**
* @return an executor appropriate for the configured speculative read policy
*/
public static AbstractReadExecutor getReadExecutor(ReadCommand command, ConsistencyLevel consistencyLevel) throws UnavailableException
{
Keyspace keyspace = Keyspace.open(command.ksName);
List<InetAddress> allReplicas = StorageProxy.getLiveSortedEndpoints(keyspace, command.key);
ReadRepairDecision repairDecision = Schema.instance.getCFMetaData(command.ksName, command.cfName).newReadRepairDecision();
List<InetAddress> targetReplicas = consistencyLevel.filterForQuery(keyspace, allReplicas, repairDecision);
// Throw UAE early if we don't have enough replicas.
consistencyLevel.assureSufficientLiveNodes(keyspace, targetReplicas);
// Fat client. Speculating read executors need access to cfs metrics and sampled latency, and fat clients
// can't provide that. So, for now, fat clients will always use NeverSpeculatingReadExecutor.
if (StorageService.instance.isClientMode())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (repairDecision != ReadRepairDecision.NONE)
ReadRepairMetrics.attempted.mark();
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.cfName);
RetryType retryType = cfs.metadata.getSpeculativeRetry().type;
// Speculative retry is disabled *OR* there are simply no extra replicas to speculate.
if (retryType == RetryType.NONE || consistencyLevel.blockFor(keyspace) == allReplicas.size())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (targetReplicas.size() == allReplicas.size())
{
// CL.ALL, RRD.GLOBAL or RRD.DC_LOCAL and a single-DC.
// We are going to contact every node anyway, so ask for 2 full data requests instead of 1, for redundancy
// (same amount of requests in total, but we turn 1 digest request into a full blown data request).
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
// RRD.NONE or RRD.DC_LOCAL w/ multiple DCs.
InetAddress extraReplica = allReplicas.get(targetReplicas.size());
// With repair decision DC_LOCAL all replicas/target replicas may be in different order, so
// we might have to find a replacement that's not already in targetReplicas.
if (repairDecision == ReadRepairDecision.DC_LOCAL && targetReplicas.contains(extraReplica))
{
for (InetAddress address : allReplicas)
{
if (!targetReplicas.contains(address))
{
extraReplica = address;
break;
}
}
}
targetReplicas.add(extraReplica);
if (retryType == RetryType.ALWAYS)
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
else // PERCENTILE or CUSTOM.
return new SpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
示例4: getReadExecutor
import org.apache.cassandra.db.ConsistencyLevel; //导入方法依赖的package包/类
/**
* @return an executor appropriate for the configured speculative read policy
*/
public static AbstractReadExecutor getReadExecutor(SinglePartitionReadCommand command, ConsistencyLevel consistencyLevel) throws UnavailableException
{
Keyspace keyspace = Keyspace.open(command.metadata().ksName);
List<InetAddress> allReplicas = StorageProxy.getLiveSortedEndpoints(keyspace, command.partitionKey());
// 11980: Excluding EACH_QUORUM reads from potential RR, so that we do not miscount DC responses
ReadRepairDecision repairDecision = consistencyLevel == ConsistencyLevel.EACH_QUORUM
? ReadRepairDecision.NONE
: command.metadata().newReadRepairDecision();
List<InetAddress> targetReplicas = consistencyLevel.filterForQuery(keyspace, allReplicas, repairDecision);
// Throw UAE early if we don't have enough replicas.
consistencyLevel.assureSufficientLiveNodes(keyspace, targetReplicas);
if (repairDecision != ReadRepairDecision.NONE)
{
Tracing.trace("Read-repair {}", repairDecision);
ReadRepairMetrics.attempted.mark();
}
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.metadata().cfId);
SpeculativeRetryParam retry = cfs.metadata.params.speculativeRetry;
// Speculative retry is disabled *OR* there are simply no extra replicas to speculate.
// 11980: Disable speculative retry if using EACH_QUORUM in order to prevent miscounting DC responses
if (retry.equals(SpeculativeRetryParam.NONE)
|| consistencyLevel == ConsistencyLevel.EACH_QUORUM
|| consistencyLevel.blockFor(keyspace) == allReplicas.size())
return new NeverSpeculatingReadExecutor(keyspace, command, consistencyLevel, targetReplicas);
if (targetReplicas.size() == allReplicas.size())
{
// CL.ALL, RRD.GLOBAL or RRD.DC_LOCAL and a single-DC.
// We are going to contact every node anyway, so ask for 2 full data requests instead of 1, for redundancy
// (same amount of requests in total, but we turn 1 digest request into a full blown data request).
return new AlwaysSpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);
}
// RRD.NONE or RRD.DC_LOCAL w/ multiple DCs.
InetAddress extraReplica = allReplicas.get(targetReplicas.size());
// With repair decision DC_LOCAL all replicas/target replicas may be in different order, so
// we might have to find a replacement that's not already in targetReplicas.
if (repairDecision == ReadRepairDecision.DC_LOCAL && targetReplicas.contains(extraReplica))
{
for (InetAddress address : allReplicas)
{
if (!targetReplicas.contains(address))
{
extraReplica = address;
break;
}
}
}
targetReplicas.add(extraReplica);
if (retry.equals(SpeculativeRetryParam.ALWAYS))
return new AlwaysSpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);
else // PERCENTILE or CUSTOM.
return new SpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);
}