本文整理汇总了Java中org.apache.beam.sdk.util.BackOffUtils类的典型用法代码示例。如果您正苦于以下问题:Java BackOffUtils类的具体用法?Java BackOffUtils怎么用?Java BackOffUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BackOffUtils类属于org.apache.beam.sdk.util包,在下文中一共展示了BackOffUtils类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: runQueryWithRetries
import org.apache.beam.sdk.util.BackOffUtils; //导入依赖的package包/类
private RunQueryResponse runQueryWithRetries(RunQueryRequest request) throws Exception {
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backoff = RUNQUERY_BACKOFF.backoff();
while (true) {
try {
RunQueryResponse response = datastore.runQuery(request);
rpcSuccesses.inc();
return response;
} catch (DatastoreException exception) {
rpcErrors.inc();
if (NON_RETRYABLE_ERRORS.contains(exception.getCode())) {
throw exception;
}
if (!BackOffUtils.next(sleeper, backoff)) {
LOG.error("Aborting after {} retries.", MAX_RETRIES);
throw exception;
}
}
}
}
示例2: processElement
import org.apache.beam.sdk.util.BackOffUtils; //导入依赖的package包/类
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backoff = BUNDLE_WRITE_BACKOFF.backoff();
Iterable<Mutation> mutations = c.element();
while (true) {
// Batch upsert rows.
try {
spannerAccessor.getDatabaseClient().writeAtLeastOnce(mutations);
// Break if the commit threw no exception.
break;
} catch (AbortedException exception) {
// Only log the code and message for potentially-transient errors. The entire exception
// will be propagated upon the last retry.
if (!BackOffUtils.next(sleeper, backoff)) {
throw exception;
}
}
}
}
示例3: flushBatch
import org.apache.beam.sdk.util.BackOffUtils; //导入依赖的package包/类
private void flushBatch() throws DatastoreException, IOException, InterruptedException {
LOG.info("Writing batch of {} entities", entities.size());
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backoff =
FluentBackoff.DEFAULT
.withMaxRetries(MAX_RETRIES).withInitialBackoff(INITIAL_BACKOFF).backoff();
while (true) {
// Batch mutate entities.
try {
CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
for (Entity entity: entities) {
commitRequest.addMutations(mutationBuilder.apply(entity));
}
commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
datastore.commit(commitRequest.build());
// Break if the commit threw no exception.
break;
} catch (DatastoreException exception) {
LOG.error("Error writing to the Datastore ({}): {}", exception.getCode(),
exception.getMessage());
if (!BackOffUtils.next(sleeper, backoff)) {
LOG.error("Aborting after {} retries.", MAX_RETRIES);
throw exception;
}
}
}
LOG.info("Successfully wrote {} entities", entities.size());
entities.clear();
}
示例4: flushBatch
import org.apache.beam.sdk.util.BackOffUtils; //导入依赖的package包/类
/**
* Writes a batch of mutations to Cloud Datastore.
*
* <p>If a commit fails, it will be retried up to {@link #MAX_RETRIES} times. All
* mutations in the batch will be committed again, even if the commit was partially
* successful. If the retry limit is exceeded, the last exception from Cloud Datastore will be
* thrown.
*
* @throws DatastoreException if the commit fails or IOException or InterruptedException if
* backing off between retries fails.
*/
private void flushBatch() throws DatastoreException, IOException, InterruptedException {
LOG.debug("Writing batch of {} mutations", mutations.size());
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backoff = BUNDLE_WRITE_BACKOFF.backoff();
while (true) {
// Batch upsert entities.
CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
commitRequest.addAllMutations(mutations);
commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
long startTime = System.currentTimeMillis(), endTime;
if (throttler.throttleRequest(startTime)) {
LOG.info("Delaying request due to previous failures");
throttledSeconds.inc(WriteBatcherImpl.DATASTORE_BATCH_TARGET_LATENCY_MS / 1000);
sleeper.sleep(WriteBatcherImpl.DATASTORE_BATCH_TARGET_LATENCY_MS);
continue;
}
try {
datastore.commit(commitRequest.build());
endTime = System.currentTimeMillis();
writeBatcher.addRequestLatency(endTime, endTime - startTime, mutations.size());
throttler.successfulRequest(startTime);
rpcSuccesses.inc();
// Break if the commit threw no exception.
break;
} catch (DatastoreException exception) {
if (exception.getCode() == Code.DEADLINE_EXCEEDED) {
/* Most errors are not related to request size, and should not change our expectation of
* the latency of successful requests. DEADLINE_EXCEEDED can be taken into
* consideration, though. */
endTime = System.currentTimeMillis();
writeBatcher.addRequestLatency(endTime, endTime - startTime, mutations.size());
}
// Only log the code and message for potentially-transient errors. The entire exception
// will be propagated upon the last retry.
LOG.error("Error writing batch of {} mutations to Datastore ({}): {}", mutations.size(),
exception.getCode(), exception.getMessage());
rpcErrors.inc();
if (NON_RETRYABLE_ERRORS.contains(exception.getCode())) {
throw exception;
}
if (!BackOffUtils.next(sleeper, backoff)) {
LOG.error("Aborting after {} retries.", MAX_RETRIES);
throw exception;
}
}
}
LOG.debug("Successfully wrote {} mutations", mutations.size());
mutations.clear();
mutationsSize = 0;
}