本文整理匯總了Java中com.datastax.driver.core.BatchStatement.setConsistencyLevel方法的典型用法代碼示例。如果您正苦於以下問題:Java BatchStatement.setConsistencyLevel方法的具體用法?Java BatchStatement.setConsistencyLevel怎麽用?Java BatchStatement.setConsistencyLevel使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類com.datastax.driver.core.BatchStatement
的用法示例。
在下文中一共展示了BatchStatement.setConsistencyLevel方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: batchInsert
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
public ResultSet batchInsert(final Class<?> targetClass, final Collection<? extends Map<String, Object>> propsList, final BatchStatement.Type type) {
N.checkArgument(N.notNullOrEmpty(propsList), "'propsList' can't be null or empty.");
final BatchStatement batchStatement = new BatchStatement(type == null ? BatchStatement.Type.LOGGED : type);
if (settings != null) {
batchStatement.setConsistencyLevel(settings.getConsistency());
batchStatement.setSerialConsistencyLevel(settings.getSerialConsistency());
batchStatement.setRetryPolicy(settings.getRetryPolicy());
if (settings.traceQuery) {
batchStatement.enableTracing();
} else {
batchStatement.disableTracing();
}
}
CP pair = null;
for (Map<String, Object> props : propsList) {
pair = prepareAdd(targetClass, props);
batchStatement.add(prepareStatement(pair.cql, pair.parameters.toArray()));
}
return session.execute(batchStatement);
}
示例2: doStore
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
@Override
protected void doStore(ImmutableSet<EquivalenceGraph> graphs) {
BatchStatement updateBatch = new BatchStatement();
updateBatch.setConsistencyLevel(write);
for (EquivalenceGraph graph : graphs) {
Long graphId = lowestId(graph);
ByteBuffer serializedGraph = serializer.serialize(graph);
updateBatch.add(graphInsert(graphId, serializedGraph));
for (Entry<Id, Adjacents> adjacency : graph.getAdjacencyList().entrySet()) {
updateBatch.add(indexInsert(adjacency.getKey().longValue(), graphId));
}
}
session.execute(updateBatch);
}
示例3: batchUpdate
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
private ResultSet batchUpdate(final Class<?> targetClass, final Collection<? extends Map<String, Object>> propsList,
final Collection<String> primaryKeyNames, final BatchStatement.Type type, boolean isFromEntity) {
N.checkArgument(N.notNullOrEmpty(propsList), "'propsList' can't be null or empty.");
final BatchStatement batchStatement = new BatchStatement(type == null ? BatchStatement.Type.LOGGED : type);
if (settings != null) {
batchStatement.setConsistencyLevel(settings.getConsistency());
batchStatement.setSerialConsistencyLevel(settings.getSerialConsistency());
batchStatement.setRetryPolicy(settings.getRetryPolicy());
if (settings.traceQuery) {
batchStatement.enableTracing();
} else {
batchStatement.disableTracing();
}
}
for (Map<String, Object> props : propsList) {
final Map<String, Object> tmp = isFromEntity ? props : new HashMap<>(props);
final And and = new And();
for (String keyName : primaryKeyNames) {
and.add(L.eq(keyName, tmp.remove(keyName)));
}
final CP pair = prepareUpdate(targetClass, tmp, and);
batchStatement.add(prepareStatement(pair.cql, pair.parameters.toArray()));
}
return session.execute(batchStatement);
}
示例4: run
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
public boolean run() throws Exception
{
List<BoundStatement> stmts = new ArrayList<>();
partitionCount = partitions.size();
for (PartitionIterator iterator : partitions)
while (iterator.hasNext())
stmts.add(bindRow(iterator.next()));
rowCount += stmts.size();
// 65535 is max number of stmts per batch, so if we have more, we need to manually batch them
for (int j = 0 ; j < stmts.size() ; j += 65535)
{
List<BoundStatement> substmts = stmts.subList(j, Math.min(j + stmts.size(), j + 65535));
Statement stmt;
if (stmts.size() == 1)
{
stmt = substmts.get(0);
}
else
{
BatchStatement batch = new BatchStatement(batchType);
batch.setConsistencyLevel(JavaDriverClient.from(cl));
batch.addAll(substmts);
stmt = batch;
}
try
{
validate(client.getSession().execute(stmt));
}
catch (ClassCastException e)
{
e.printStackTrace();
}
}
return true;
}
示例5: run
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
public boolean run() throws Exception
{
List<BoundStatement> stmts = new ArrayList<>();
partitionCount = partitions.size();
for (PartitionIterator iterator : partitions)
while (iterator.hasNext())
stmts.add(bindRow(iterator.next()));
rowCount += stmts.size();
// 65535 is max number of stmts per batch, so if we have more, we need to manually batch them
for (int j = 0 ; j < stmts.size() ; j += 65535)
{
List<BoundStatement> substmts = stmts.subList(j, Math.min(j + stmts.size(), j + 65535));
Statement stmt;
if (stmts.size() == 1)
{
stmt = substmts.get(0);
}
else
{
BatchStatement batch = new BatchStatement(batchType);
batch.setConsistencyLevel(JavaDriverClient.from(cl));
batch.addAll(substmts);
stmt = batch;
}
client.getSession().execute(stmt);
}
return true;
}
示例6: save
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
public void save(final List<Event> events) {
final BatchStatement batch = new BatchStatement();
for (final Event event : events) {
final Map<String, Object> parsedEvent = parse(event);
if (parsedEvent.isEmpty()) {
log.warn("Event {} could not be mapped. Suggestion: Cassandra is case sensitive, so maybe you can check field names.", event);
continue;
}
if (!hasPrimaryKey(parsedEvent)) {
break;
}
final Insert insert = QueryBuilder.insertInto(table);
for (final Map.Entry<String, Object> entry : parsedEvent.entrySet()) {
insert.value(entry.getKey(), entry.getValue());
}
if (log.isTraceEnabled()) {
log.trace("Preparing insert for table {}: {}", table.getName(), insert.getQueryString());
}
batch.add(insert);
}
if (batch.getStatements().isEmpty()) {
log.warn("No event produced insert query for table {}", table.getName());
return;
}
batch.setConsistencyLevel(consistencyLevel);
session.execute(batch);
}
示例7: doWrite
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
@Override
protected void doWrite(Publisher source, List<ChannelSchedule> blocks) {
try {
if (blocks.isEmpty()) {
return;
}
BatchStatement batch = new BatchStatement();
batch.setConsistencyLevel(writeCl);
for (ChannelSchedule block : blocks) {
Long channelId = block.getChannel().getId().longValue();
Map<String, ByteBuffer> broadcasts = block.getEntries().stream()
.collect(
Collectors.toMap(
key -> key.getBroadcast().getSourceId(),
value -> ByteBuffer.wrap(serializer.serialize(value))
)
);
batch.add(scheduleUpdate.bind()
.setString("source", source.key())
.setLong("channel", channelId)
.setTimestamp(
"day",
block.getInterval().getStart().toDate()
)
.setMap("broadcastsData", broadcasts)
.setSet("broadcastsIdsData", broadcasts.keySet())
.setTimestamp("updatedData", clock.now().toDate()));
}
session.execute(batch);
} catch (Exception e) {
Throwables.propagate(e);
}
}
示例8: update
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
@Override
protected void update(EquivalenceGraph graph, Content content) {
BatchStatement statement = new BatchStatement();
statement.setConsistencyLevel(writeConsistency);
statement.add(getGraphUpdateRow(graph));
statement.add(getUpdateDataRow(graph, content));
statement.add(index.insertStatement(
content.getId().longValue(), graph.getId().longValue()
));
session.execute(statement);
}
示例9: write
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
@Override
public void write(Event event, Event previous) {
try {
BatchStatement batch = new BatchStatement();
batch.setConsistencyLevel(writeConsistency);
marshaller.marshallInto(event.getId(), batch, event);
aliasIndex.mutateAliasesAndExecute(event, previous);
session.execute(batch);
} catch (Exception e) {
throw new CassandraPersistenceException(event.toString(), e);
}
}
示例10: executeBatch
import com.datastax.driver.core.BatchStatement; //導入方法依賴的package包/類
protected void executeBatch(BatchStatement batch) {
LOG.debug("Execute cassandra batch {}", batch);
batch.setConsistencyLevel(getWriteConsistencyLevel());
ResultSet resultSet = getSession().execute(batch);
LOG.debug("Executed batch {}", resultSet);
}