本文整理匯總了Java中org.apache.beam.sdk.util.FluentBackoff類的典型用法代碼示例。如果您正苦於以下問題:Java FluentBackoff類的具體用法?Java FluentBackoff怎麽用?Java FluentBackoff使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
FluentBackoff類屬於org.apache.beam.sdk.util包,在下文中一共展示了FluentBackoff類的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testStartLoadJobSucceeds
import org.apache.beam.sdk.util.FluentBackoff; //導入依賴的package包/類
/**
* Tests that {@link BigQueryServicesImpl.JobServiceImpl#startLoadJob} succeeds.
*/
@Test
public void testStartLoadJobSucceeds() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(200);
when(response.getContent()).thenReturn(toStream(testJob));
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob, new ApiErrorExtractor(), bigquery, sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verify(response, times(1)).getStatusCode();
verify(response, times(1)).getContent();
verify(response, times(1)).getContentType();
expectedLogs.verifyInfo(String.format("Started BigQuery job: %s", jobRef));
}
示例2: testStartLoadJobSucceedsAlreadyExists
import org.apache.beam.sdk.util.FluentBackoff; //導入依賴的package包/類
/**
* Tests that {@link BigQueryServicesImpl.JobServiceImpl#startLoadJob} succeeds
* with an already exist job.
*/
@Test
public void testStartLoadJobSucceedsAlreadyExists() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
when(response.getStatusCode()).thenReturn(409); // 409 means already exists
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob, new ApiErrorExtractor(), bigquery, sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verify(response, times(1)).getStatusCode();
verify(response, times(1)).getContent();
verify(response, times(1)).getContentType();
expectedLogs.verifyNotLogged("Started BigQuery job");
}
示例3: testStartLoadJobRetry
import org.apache.beam.sdk.util.FluentBackoff; //導入依賴的package包/類
/**
* Tests that {@link BigQueryServicesImpl.JobServiceImpl#startLoadJob} succeeds with a retry.
*/
@Test
public void testStartLoadJobRetry() throws IOException, InterruptedException {
Job testJob = new Job();
JobReference jobRef = new JobReference();
jobRef.setJobId("jobId");
jobRef.setProjectId("projectId");
testJob.setJobReference(jobRef);
// First response is 403 rate limited, second response has valid payload.
when(response.getContentType()).thenReturn(Json.MEDIA_TYPE);
when(response.getStatusCode()).thenReturn(403).thenReturn(200);
when(response.getContent())
.thenReturn(toStream(errorWithReasonAndStatus("rateLimitExceeded", 403)))
.thenReturn(toStream(testJob));
Sleeper sleeper = new FastNanoClockAndSleeper();
JobServiceImpl.startJob(
testJob, new ApiErrorExtractor(), bigquery, sleeper,
BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff()));
verify(response, times(2)).getStatusCode();
verify(response, times(2)).getContent();
verify(response, times(2)).getContentType();
}
示例4: pollJob
import org.apache.beam.sdk.util.FluentBackoff; //導入依賴的package包/類
@Override
public Job pollJob(JobReference jobRef, int maxAttempts) throws InterruptedException {
BackOff backoff =
BackOffAdapter.toGcpBackOff(
FluentBackoff.DEFAULT
.withMaxRetries(maxAttempts)
.withInitialBackoff(INITIAL_JOB_STATUS_POLL_BACKOFF)
.withMaxBackoff(Duration.standardMinutes(1))
.backoff());
return pollJob(jobRef, Sleeper.DEFAULT, backoff);
}
示例5: pollJob
import org.apache.beam.sdk.util.FluentBackoff; //導入依賴的package包/類
@Override
public Job pollJob(JobReference jobRef, int maxAttempts)
throws InterruptedException {
BackOff backoff =
BackOffAdapter.toGcpBackOff(
FluentBackoff.DEFAULT
.withMaxRetries(maxAttempts)
.withInitialBackoff(Duration.millis(10))
.withMaxBackoff(Duration.standardSeconds(1))
.backoff());
Sleeper sleeper = Sleeper.DEFAULT;
try {
do {
Job job = getJob(jobRef);
if (job != null) {
JobStatus status = job.getStatus();
if (status != null && status.getState() != null
&& (status.getState().equals("DONE") || status.getState().equals("FAILED"))) {
return job;
}
}
} while (BackOffUtils.next(sleeper, backoff));
} catch (IOException e) {
return null;
}
return null;
}
示例6: flushBatch
import org.apache.beam.sdk.util.FluentBackoff; //導入依賴的package包/類
private void flushBatch() throws DatastoreException, IOException, InterruptedException {
LOG.info("Writing batch of {} entities", entities.size());
Sleeper sleeper = Sleeper.DEFAULT;
BackOff backoff =
FluentBackoff.DEFAULT
.withMaxRetries(MAX_RETRIES).withInitialBackoff(INITIAL_BACKOFF).backoff();
while (true) {
// Batch mutate entities.
try {
CommitRequest.Builder commitRequest = CommitRequest.newBuilder();
for (Entity entity: entities) {
commitRequest.addMutations(mutationBuilder.apply(entity));
}
commitRequest.setMode(CommitRequest.Mode.NON_TRANSACTIONAL);
datastore.commit(commitRequest.build());
// Break if the commit threw no exception.
break;
} catch (DatastoreException exception) {
LOG.error("Error writing to the Datastore ({}): {}", exception.getCode(),
exception.getMessage());
if (!BackOffUtils.next(sleeper, backoff)) {
LOG.error("Aborting after {} retries.", MAX_RETRIES);
throw exception;
}
}
}
LOG.info("Successfully wrote {} entities", entities.size());
entities.clear();
}
示例7: Reader
import org.apache.beam.sdk.util.FluentBackoff; //導入依賴的package包/類
private Reader(final UnboundedSource.UnboundedReader<T> unboundedReader) {
this.unboundedReader = unboundedReader;
backoffFactory =
FluentBackoff.DEFAULT
.withInitialBackoff(Duration.millis(10))
.withMaxBackoff(maxReadTime.minus(1))
.withMaxCumulativeBackoff(maxReadTime.minus(1));
}