本文整理汇总了Java中org.apache.cassandra.config.DatabaseDescriptor.getWriteRpcTimeout方法的典型用法代码示例。如果您正苦于以下问题:Java DatabaseDescriptor.getWriteRpcTimeout方法的具体用法?Java DatabaseDescriptor.getWriteRpcTimeout怎么用?Java DatabaseDescriptor.getWriteRpcTimeout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.config.DatabaseDescriptor
的用法示例。
在下文中一共展示了DatabaseDescriptor.getWriteRpcTimeout方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: get
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public void get() throws WriteTimeoutException
{
long requestTimeout = writeType == WriteType.COUNTER
? DatabaseDescriptor.getCounterWriteRpcTimeout()
: DatabaseDescriptor.getWriteRpcTimeout();
long timeout = TimeUnit.MILLISECONDS.toNanos(requestTimeout) - (System.nanoTime() - start);
boolean success;
try
{
success = condition.await(timeout, TimeUnit.NANOSECONDS);
}
catch (InterruptedException ex)
{
throw new AssertionError(ex);
}
if (!success)
{
int acks = ackCount();
int blockedFor = totalBlockFor();
// It's pretty unlikely, but we can race between exiting await above and here, so
// that we could now have enough acks. In that case, we "lie" on the acks count to
// avoid sending confusing info to the user (see CASSANDRA-6491).
if (acks >= blockedFor)
acks = blockedFor - 1;
throw new WriteTimeoutException(writeType, consistencyLevel, acks, blockedFor);
}
}
示例2: get
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public void get() throws WriteTimeoutException, WriteFailureException
{
long requestTimeout = writeType == WriteType.COUNTER
? DatabaseDescriptor.getCounterWriteRpcTimeout()
: DatabaseDescriptor.getWriteRpcTimeout();
long timeout = TimeUnit.MILLISECONDS.toNanos(requestTimeout) - (System.nanoTime() - start);
boolean success;
try
{
success = condition.await(timeout, TimeUnit.NANOSECONDS);
}
catch (InterruptedException ex)
{
throw new AssertionError(ex);
}
if (!success)
{
int blockedFor = totalBlockFor();
int acks = ackCount();
// It's pretty unlikely, but we can race between exiting await above and here, so
// that we could now have enough acks. In that case, we "lie" on the acks count to
// avoid sending confusing info to the user (see CASSANDRA-6491).
if (acks >= blockedFor)
acks = blockedFor - 1;
throw new WriteTimeoutException(writeType, consistencyLevel, acks, blockedFor);
}
if (totalBlockFor() + failures > totalEndpoints())
{
throw new WriteFailureException(consistencyLevel, ackCount(), failures, totalBlockFor(), writeType);
}
}
示例3: testAddBatch
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Test
public void testAddBatch() throws IOException
{
long initialAllBatches = BatchlogManager.instance.countAllBatches();
CFMetaData cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD5).metadata;
long timestamp = (System.currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout() * 2) * 1000;
UUID uuid = UUIDGen.getTimeUUID();
// Add a batch with 10 mutations
List<Mutation> mutations = new ArrayList<>(10);
for (int j = 0; j < 10; j++)
{
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(j))
.clustering("name" + j)
.add("val", "val" + j)
.build());
}
BatchlogManager.store(Batch.createLocal(uuid, timestamp, mutations));
Assert.assertEquals(initialAllBatches + 1, BatchlogManager.instance.countAllBatches());
String query = String.format("SELECT count(*) FROM %s.%s where id = %s",
SystemKeyspace.NAME,
SystemKeyspace.BATCHES,
uuid);
UntypedResultSet result = executeInternal(query);
assertNotNull(result);
assertEquals(1L, result.one().getLong("count"));
}
示例4: testRemoveBatch
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Test
public void testRemoveBatch()
{
long initialAllBatches = BatchlogManager.instance.countAllBatches();
CFMetaData cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD5).metadata;
long timestamp = (System.currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout() * 2) * 1000;
UUID uuid = UUIDGen.getTimeUUID();
// Add a batch with 10 mutations
List<Mutation> mutations = new ArrayList<>(10);
for (int j = 0; j < 10; j++)
{
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(j))
.clustering("name" + j)
.add("val", "val" + j)
.build());
}
// Store the batch
BatchlogManager.store(Batch.createLocal(uuid, timestamp, mutations));
Assert.assertEquals(initialAllBatches + 1, BatchlogManager.instance.countAllBatches());
// Remove the batch
BatchlogManager.remove(uuid);
assertEquals(initialAllBatches, BatchlogManager.instance.countAllBatches());
String query = String.format("SELECT count(*) FROM %s.%s where id = %s",
SystemKeyspace.NAME,
SystemKeyspace.BATCHES,
uuid);
UntypedResultSet result = executeInternal(query);
assertNotNull(result);
assertEquals(0L, result.one().getLong("count"));
}
示例5: testReplayWithNoPeers
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Test
public void testReplayWithNoPeers() throws Exception
{
StorageService.instance.getTokenMetadata().removeEndpoint(InetAddress.getByName("127.0.0.1"));
long initialAllBatches = BatchlogManager.instance.countAllBatches();
long initialReplayedBatches = BatchlogManager.instance.getTotalBatchesReplayed();
CFMetaData cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata;
long timestamp = (System.currentTimeMillis() - DatabaseDescriptor.getWriteRpcTimeout() * 2) * 1000;
UUID uuid = UUIDGen.getTimeUUID();
// Add a batch with 10 mutations
List<Mutation> mutations = new ArrayList<>(10);
for (int j = 0; j < 10; j++)
{
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(j))
.clustering("name" + j)
.add("val", "val" + j)
.build());
}
BatchlogManager.store(Batch.createLocal(uuid, timestamp, mutations));
assertEquals(1, BatchlogManager.instance.countAllBatches() - initialAllBatches);
// Flush the batchlog to disk (see CASSANDRA-6822).
Keyspace.open(SystemKeyspace.NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
assertEquals(1, BatchlogManager.instance.countAllBatches() - initialAllBatches);
assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
// Force batchlog replay and wait for it to complete.
BatchlogManager.instance.startBatchlogReplay().get();
// Replay should be cancelled as there are no peers in the ring.
assertEquals(1, BatchlogManager.instance.countAllBatches() - initialAllBatches);
}
示例6: getTimeout
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public long getTimeout()
{
return DatabaseDescriptor.getWriteRpcTimeout();
}
示例7: getBatchlogTimeout
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public long getBatchlogTimeout()
{
return DatabaseDescriptor.getWriteRpcTimeout() * 2; // enough time for the actual write + BM removal mutation
}
示例8: getBatchlogTimeout
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public static long getBatchlogTimeout()
{
return DatabaseDescriptor.getWriteRpcTimeout() * 2; // enough time for the actual write + BM removal mutation
}
示例9: getWriteRpcTimeout
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public Long getWriteRpcTimeout() { return DatabaseDescriptor.getWriteRpcTimeout(); }