本文整理汇总了Java中org.apache.cassandra.exceptions.UnavailableException类的典型用法代码示例。如果您正苦于以下问题:Java UnavailableException类的具体用法?Java UnavailableException怎么用?Java UnavailableException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
UnavailableException类属于org.apache.cassandra.exceptions包,在下文中一共展示了UnavailableException类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: execute
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
@Override
public void execute() throws MojoExecutionException, MojoFailureException
{
try
{
createCassandraHome();
getLog().debug("Truncating Column Family \"" + columnFamily + "\" in Keyspace \"" + keyspace + "\"...");
StorageProxy.truncateBlocking(keyspace, columnFamily);
getLog().info("Truncated Column Family \"" + columnFamily + "\" in Keyspace \"" + keyspace + "\"...");
} catch (UnavailableException ue)
{
throw new MojoExecutionException("Host(s) must be up in order for a truncate operation to be successful.", ue);
} catch (TimeoutException te)
{
throw new MojoExecutionException("Host did not reply for truncate operation.", te);
} catch (IOException ioe)
{
// unlikely in our case
throw new MojoExecutionException("Could not construct truncate message",ioe);
}
}
示例2: truncate
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
public void truncate(String keyspace, String columnFamily) throws TimeoutException, IOException
{
try
{
StorageProxy.truncateBlocking(keyspace, columnFamily);
}
catch (UnavailableException e)
{
throw new IOException(e.getMessage());
}
}
示例3: truncate
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
public void truncate(String keyspace, String columnFamily, String client) throws TimeoutException, IOException
{
try
{
StorageProxy.truncateBlocking(keyspace, columnFamily, client);
}
catch (UnavailableException e)
{
throw new IOException(e.getMessage());
}
}
示例4: assureSufficientLiveNodes
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
public void assureSufficientLiveNodes() throws UnavailableException
{
consistencyLevel.assureSufficientLiveNodes(keyspace, endpoints);
}
示例5: getReadExecutor
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
/**
* @return an executor appropriate for the configured speculative read policy
*/
public static AbstractReadExecutor getReadExecutor(ReadCommand command, ConsistencyLevel consistencyLevel) throws UnavailableException
{
Keyspace keyspace = Keyspace.open(command.ksName);
List<InetAddress> allReplicas = StorageProxy.getLiveSortedEndpoints(keyspace, command.key);
ReadRepairDecision repairDecision = Schema.instance.getCFMetaData(command.ksName, command.cfName).newReadRepairDecision();
List<InetAddress> targetReplicas = consistencyLevel.filterForQuery(keyspace, allReplicas, repairDecision);
// Throw UAE early if we don't have enough replicas.
consistencyLevel.assureSufficientLiveNodes(keyspace, targetReplicas);
// Fat client. Speculating read executors need access to cfs metrics and sampled latency, and fat clients
// can't provide that. So, for now, fat clients will always use NeverSpeculatingReadExecutor.
if (StorageService.instance.isClientMode())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (repairDecision != ReadRepairDecision.NONE)
ReadRepairMetrics.attempted.mark();
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.cfName);
RetryType retryType = cfs.metadata.getSpeculativeRetry().type;
// Speculative retry is disabled *OR* there are simply no extra replicas to speculate.
if (retryType == RetryType.NONE || consistencyLevel.blockFor(keyspace) == allReplicas.size())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (targetReplicas.size() == allReplicas.size())
{
// CL.ALL, RRD.GLOBAL or RRD.DC_LOCAL and a single-DC.
// We are going to contact every node anyway, so ask for 2 full data requests instead of 1, for redundancy
// (same amount of requests in total, but we turn 1 digest request into a full blown data request).
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
// RRD.NONE or RRD.DC_LOCAL w/ multiple DCs.
InetAddress extraReplica = allReplicas.get(targetReplicas.size());
// With repair decision DC_LOCAL all replicas/target replicas may be in different order, so
// we might have to find a replacement that's not already in targetReplicas.
if (repairDecision == ReadRepairDecision.DC_LOCAL && targetReplicas.contains(extraReplica))
{
for (InetAddress address : allReplicas)
{
if (!targetReplicas.contains(address))
{
extraReplica = address;
break;
}
}
}
targetReplicas.add(extraReplica);
if (retryType == RetryType.ALWAYS)
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
else // PERCENTILE or CUSTOM.
return new SpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
示例6: getReadExecutor
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
/**
* @return an executor appropriate for the configured speculative read policy
*/
public static AbstractReadExecutor getReadExecutor(SinglePartitionReadCommand command, ConsistencyLevel consistencyLevel) throws UnavailableException
{
Keyspace keyspace = Keyspace.open(command.metadata().ksName);
List<InetAddress> allReplicas = StorageProxy.getLiveSortedEndpoints(keyspace, command.partitionKey());
// 11980: Excluding EACH_QUORUM reads from potential RR, so that we do not miscount DC responses
ReadRepairDecision repairDecision = consistencyLevel == ConsistencyLevel.EACH_QUORUM
? ReadRepairDecision.NONE
: command.metadata().newReadRepairDecision();
List<InetAddress> targetReplicas = consistencyLevel.filterForQuery(keyspace, allReplicas, repairDecision);
// Throw UAE early if we don't have enough replicas.
consistencyLevel.assureSufficientLiveNodes(keyspace, targetReplicas);
if (repairDecision != ReadRepairDecision.NONE)
{
Tracing.trace("Read-repair {}", repairDecision);
ReadRepairMetrics.attempted.mark();
}
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.metadata().cfId);
SpeculativeRetryParam retry = cfs.metadata.params.speculativeRetry;
// Speculative retry is disabled *OR* there are simply no extra replicas to speculate.
// 11980: Disable speculative retry if using EACH_QUORUM in order to prevent miscounting DC responses
if (retry.equals(SpeculativeRetryParam.NONE)
|| consistencyLevel == ConsistencyLevel.EACH_QUORUM
|| consistencyLevel.blockFor(keyspace) == allReplicas.size())
return new NeverSpeculatingReadExecutor(keyspace, command, consistencyLevel, targetReplicas);
if (targetReplicas.size() == allReplicas.size())
{
// CL.ALL, RRD.GLOBAL or RRD.DC_LOCAL and a single-DC.
// We are going to contact every node anyway, so ask for 2 full data requests instead of 1, for redundancy
// (same amount of requests in total, but we turn 1 digest request into a full blown data request).
return new AlwaysSpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);
}
// RRD.NONE or RRD.DC_LOCAL w/ multiple DCs.
InetAddress extraReplica = allReplicas.get(targetReplicas.size());
// With repair decision DC_LOCAL all replicas/target replicas may be in different order, so
// we might have to find a replacement that's not already in targetReplicas.
if (repairDecision == ReadRepairDecision.DC_LOCAL && targetReplicas.contains(extraReplica))
{
for (InetAddress address : allReplicas)
{
if (!targetReplicas.contains(address))
{
extraReplica = address;
break;
}
}
}
targetReplicas.add(extraReplica);
if (retry.equals(SpeculativeRetryParam.ALWAYS))
return new AlwaysSpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);
else // PERCENTILE or CUSTOM.
return new SpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);
}
示例7: getReadExecutor
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
/**
* @return an executor appropriate for the configured speculative read policy
*/
public static AbstractReadExecutor getReadExecutor(ReadCommand command, ConsistencyLevel consistencyLevel) throws UnavailableException
{
Keyspace keyspace = Keyspace.open(command.ksName);
List<InetAddress> allReplicas = StorageProxy.getLiveSortedEndpoints(keyspace, command.key);
ReadRepairDecision repairDecision = Schema.instance.getCFMetaData(command.ksName, command.cfName).newReadRepairDecision();
List<InetAddress> targetReplicas = consistencyLevel.filterForQuery(keyspace, allReplicas, repairDecision);
// Throw UAE early if we don't have enough replicas.
consistencyLevel.assureSufficientLiveNodes(keyspace, targetReplicas);
// Fat client. Speculating read executors need access to cfs metrics and sampled latency, and fat clients
// can't provide that. So, for now, fat clients will always use NeverSpeculatingReadExecutor.
if (StorageService.instance.isClientMode())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (repairDecision != ReadRepairDecision.NONE)
ReadRepairMetrics.attempted.mark();
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.cfName);
RetryType retryType = cfs.metadata.getSpeculativeRetry().type;
//@daidong For a single cluster, it will fall into PERCENTILE category.
//logger.info("@daidong debug: " + "RetryType: " + retryType.name());
// Speculative retry is disabled *OR* there are simply no extra replicas to speculate.
if (retryType == RetryType.NONE || consistencyLevel.blockFor(keyspace) == allReplicas.size())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (targetReplicas.size() == allReplicas.size())
{
// CL.ALL, RRD.GLOBAL or RRD.DC_LOCAL and a single-DC.
// We are going to contact every node anyway, so ask for 2 full data requests instead of 1, for redundancy
// (same amount of requests in total, but we turn 1 digest request into a full blown data request).
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
// RRD.NONE or RRD.DC_LOCAL w/ multiple DCs.
InetAddress extraReplica = allReplicas.get(targetReplicas.size());
// With repair decision DC_LOCAL all replicas/target replicas may be in different order, so
// we might have to find a replacement that's not already in targetReplicas.
if (repairDecision == ReadRepairDecision.DC_LOCAL && targetReplicas.contains(extraReplica))
{
for (InetAddress address : allReplicas)
{
if (!targetReplicas.contains(address))
{
extraReplica = address;
break;
}
}
}
targetReplicas.add(extraReplica);
if (retryType == RetryType.ALWAYS)
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
else // PERCENTILE or CUSTOM. @daidong: return a speculatingReadExecutor.
return new SpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
示例8: assureSufficientLiveNodes
import org.apache.cassandra.exceptions.UnavailableException; //导入依赖的package包/类
public void assureSufficientLiveNodes() throws UnavailableException
{
consistencyLevel.assureSufficientLiveNodes(table, endpoints);
}