本文整理汇总了Java中com.datastax.driver.core.Token类的典型用法代码示例。如果您正苦于以下问题:Java Token类的具体用法?Java Token怎么用?Java Token使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Token类属于com.datastax.driver.core包,在下文中一共展示了Token类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testVnodeSupport
import com.datastax.driver.core.Token; //导入依赖的package包/类
@Test
public void testVnodeSupport() throws Exception {
// Validate that peers as appropriately discovered when connecting to a node and vnodes are
// assigned.
try (BoundCluster boundCluster =
server.register(ClusterSpec.builder().withNumberOfTokens(256).withNodes(3, 3, 3));
Cluster driverCluster = defaultBuilder(boundCluster).build()) {
driverCluster.init();
// Should be 9 hosts
assertThat(driverCluster.getMetadata().getAllHosts()).hasSize(9);
Set<Token> allTokens = new HashSet<>();
for (Host host : driverCluster.getMetadata().getAllHosts()) {
assertThat(host.getTokens()).hasSize(256);
allTokens.addAll(host.getTokens());
}
// Should be 256*9 unique tokens.
assertThat(allTokens).hasSize(256 * 9);
}
}
示例2: logTokenBatchMap
import com.datastax.driver.core.Token; //导入依赖的package包/类
private void logTokenBatchMap(String name, Map<Token, Deque<BatchStatement>> map) {
if (logger.isDebugEnabled()) {
StringBuilder sb = new StringBuilder(name);
sb.append(": Size: ").append(map.size());
sb.append("; Tokens: |");
for (Entry<Token, Deque<BatchStatement>> entry : map.entrySet()) {
sb.append(entry.getKey().toString()).append(":");
for (BatchStatement bs : entry.getValue()) {
sb.append(bs.size()).append(",");
}
sb.append("|.");
}
logger.debug(sb.toString());
}
}
示例3: applyMicroBatching
import com.datastax.driver.core.Token; //导入依赖的package包/类
private Observable.Transformer<BoundStatement, Integer> applyMicroBatching() {
return tObservable -> tObservable
.groupBy(b -> {
ByteBuffer routingKey = b.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED,
codecRegistry);
Token token = metadata.newToken(routingKey);
for (TokenRange tokenRange : session.getCluster().getMetadata().getTokenRanges()) {
if (tokenRange.contains(token)) {
return tokenRange;
}
}
log.warn("Unable to find any Cassandra node to insert token " + token.toString());
return session.getCluster().getMetadata().getTokenRanges().iterator().next();
})
.flatMap(g -> g.compose(new BoundBatchStatementTransformer()))
.flatMap(batch -> rxSession
.execute(batch)
.compose(applyInsertRetryPolicy())
.map(resultSet -> batch.size())
);
}
示例4: buildQuery
import com.datastax.driver.core.Token; //导入依赖的package包/类
private String buildQuery(TokenRange tokenRange)
{
Token start = tokenRange.getStart();
Token end = tokenRange.getEnd();
List<String> pkColumns = tableMetadata.getPartitionKey().stream().map(ColumnMetadata::getName).collect(Collectors.toList());
String tokenStatement = String.format("token(%s)", String.join(", ", pkColumns));
StringBuilder ret = new StringBuilder();
ret.append("SELECT ");
ret.append(tokenStatement); // add the token(pk) statement so that we can count partitions
ret.append(", ");
ret.append(columns);
ret.append(" FROM ");
ret.append(tableMetadata.getName());
if (start != null || end != null)
ret.append(" WHERE ");
if (start != null)
{
ret.append(tokenStatement);
ret.append(" > ");
ret.append(start.toString());
}
if (start != null && end != null)
ret.append(" AND ");
if (end != null)
{
ret.append(tokenStatement);
ret.append(" <= ");
ret.append(end.toString());
}
return ret.toString();
}
示例5: getToken
import com.datastax.driver.core.Token; //导入依赖的package包/类
@Override
public Token getToken(int i)
{
return row.getToken(i);
}
示例6: getPartitionKeyToken
import com.datastax.driver.core.Token; //导入依赖的package包/类
@Override
public Token getPartitionKeyToken()
{
return row.getPartitionKeyToken();
}
示例7: run
import com.datastax.driver.core.Token; //导入依赖的package包/类
public boolean run() throws Exception
{
State state = currentState.get();
if (state == null)
{ // start processing a new token range
TokenRange range = tokenRangeIterator.next();
if (range == null)
return true; // no more token ranges to process
state = new State(range, buildQuery(range));
currentState.set(state);
}
ResultSet results;
Statement statement = new SimpleStatement(state.query);
statement.setFetchSize(pageSize);
if (state.pagingState != null)
statement.setPagingState(state.pagingState);
results = client.getSession().execute(statement);
state.pagingState = results.getExecutionInfo().getPagingState();
int remaining = results.getAvailableWithoutFetching();
rowCount += remaining;
for (Row row : results)
{
// this call will only succeed if we've added token(partition keys) to the query
Token partition = row.getPartitionKeyToken();
if (!state.partitions.contains(partition))
{
partitionCount += 1;
state.partitions.add(partition);
}
if (--remaining == 0)
break;
}
if (results.isExhausted() || isWarmup)
{ // no more pages to fetch or just warming up, ready to move on to another token range
currentState.set(null);
}
return true;
}
示例8: getToken
import com.datastax.driver.core.Token; //导入依赖的package包/类
@Override
public Token getToken(int i) {
throw new UnsupportedOperationException();
}
示例9: getPartitionKeyToken
import com.datastax.driver.core.Token; //导入依赖的package包/类
@Override
public Token getPartitionKeyToken() {
throw new UnsupportedOperationException();
}