本文整理汇总了Java中com.amazonaws.services.s3.model.UploadPartResult类的典型用法代码示例。如果您正苦于以下问题:Java UploadPartResult类的具体用法?Java UploadPartResult怎么用?Java UploadPartResult使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
UploadPartResult类属于com.amazonaws.services.s3.model包,在下文中一共展示了UploadPartResult类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: call
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
@Override
public UploadPartResult call() throws Exception {
try {
return this.amazonS3.uploadPart(new UploadPartRequest().withBucketName(this.bucketName).
withKey(this.key).
withUploadId(this.uploadId).
withInputStream(new ByteArrayInputStream(this.content)).
withPartNumber(this.partNumber).
withLastPart(this.last).
withPartSize(this.contentLength));
} finally {
//Release the memory, as the callable may still live inside the CompletionService which would cause
// an exhaustive memory usage
this.content = null;
}
}
示例2: doUploadMultipart
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
protected PartETag doUploadMultipart(S3BlobStore blobStore, String bucketName, String blobName, String uploadId, InputStream is,
int length, boolean lastPart) throws AmazonS3Exception {
UploadPartRequest request = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(blobName)
.withUploadId(uploadId)
.withPartNumber(multipartChunks)
.withInputStream(is)
.withPartSize(length)
.withLastPart(lastPart);
UploadPartResult response = blobStore.client().uploadPart(request);
return response.getPartETag();
}
示例3: uploadPart
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
throw new UnsupportedOperationException();
}
示例4: uploadObject
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
/**
* Used for performance testing purposes only. Hence package private.
* This method is subject to removal anytime without notice.
*/
CompleteMultipartUploadResult uploadObject(final UploadObjectRequest req)
throws IOException, InterruptedException, ExecutionException {
// Set up the pipeline for concurrent encrypt and upload
// Set up a thread pool for this pipeline
ExecutorService es = req.getExecutorService();
final boolean defaultExecutorService = es == null;
if (es == null)
es = Executors.newFixedThreadPool(clientConfiguration.getMaxConnections());
UploadObjectObserver observer = req.getUploadObjectObserver();
if (observer == null)
observer = new UploadObjectObserver();
// initialize the observer
observer.init(req, this, this, es);
// Initiate upload
observer.onUploadInitiation(req);
final List<PartETag> partETags = new ArrayList<PartETag>();
MultiFileOutputStream mfos = req.getMultiFileOutputStream();
if (mfos == null)
mfos = new MultiFileOutputStream();
try {
// initialize the multi-file output stream
mfos.init(observer, req.getPartSize(), req.getDiskLimit());
// Kicks off the encryption-upload pipeline;
// Note mfos is automatically closed upon method completion.
putLocalObject(req, mfos);
// block till all part have been uploaded
for (Future<UploadPartResult> future: observer.getFutures()) {
UploadPartResult partResult = future.get();
partETags.add(new PartETag(partResult.getPartNumber(), partResult.getETag()));
}
} finally {
if (defaultExecutorService)
es.shutdownNow(); // shut down the locally created thread pool
mfos.cleanup(); // delete left-over temp files
}
// Complete upload
return observer.onCompletion(partETags);
}
示例5: onPartCreate
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
/**
* Notified from {@link MultiFileOutputStream#fos()} when a part ready for
* upload has been successfully created on disk. By default, this method
* performs the following:
* <ol>
* <li>calls {@link #newUploadPartRequest(PartCreationEvent, File)} to
* create an upload-part request for the newly created ciphertext file</li>
* <li>call {@link #appendUserAgent(AmazonWebServiceRequest, String)} to
* append the necessary user agent string to the request</li>
* <li>and finally submit a concurrent task, which calls the method
* {@link #uploadPart(UploadPartRequest)}, to be performed</li>
* </ol>
* <p>
* To enable parallel uploads, implementation of this method should never
* block.
*
* @param event
* to represent the completion of a ciphertext file creation
* which is ready for multipart upload to S3.
*/
public void onPartCreate(PartCreationEvent event) {
final File part = event.getPart();
final UploadPartRequest reqUploadPart =
newUploadPartRequest(event, part);
final OnFileDelete fileDeleteObserver = event.getFileDeleteObserver();
appendUserAgent(reqUploadPart, AmazonS3EncryptionClient.USER_AGENT);
futures.add(es.submit(new Callable<UploadPartResult>() {
@Override public UploadPartResult call() {
// Upload the ciphertext directly via the non-encrypting
// s3 client
try {
return uploadPart(reqUploadPart);
} finally {
// clean up part already uploaded
if (!part.delete()) {
LogFactory.getLog(getClass()).debug(
"Ignoring failure to delete file " + part
+ " which has already been uploaded");
} else {
if (fileDeleteObserver != null)
fileDeleteObserver.onFileDelete(null);
}
}
}
}));
}
示例6: uploadPart
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
/**
* Upload part of a multi-partition file.
* <i>Important: this call does not close any input stream in the request.</i>
* @param request request
* @return the result of the operation
* @throws AmazonClientException on problems
*/
public UploadPartResult uploadPart(UploadPartRequest request)
throws AmazonClientException {
try {
UploadPartResult uploadPartResult = mClient.uploadPart(request);
return uploadPartResult;
} catch (AmazonClientException e) {
throw e;
}
}
示例7: getMockClient
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
private AmazonS3Client getMockClient() {
AmazonS3Client mockClient = spy(AmazonS3Client.class);
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setETag("foo");
doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));
InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
initUploadResult.setUploadId("123");
doReturn(initUploadResult).when(mockClient)
.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
return mockClient;
}
示例8: uploadPart
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
@Override
public CompletableFuture<UploadPartResult> uploadPart(String bucketName, String key, String uploadId,
int partNumber, InputStream inputStream, int length) {
UploadPartRequest request = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(key)
.withUploadId(uploadId)
.withPartNumber(partNumber)
.withInputStream(inputStream)
.withPartSize(length);
return CompletableFuture.supplyAsync(() -> s3Client.uploadPart(request), executorService);
}
示例9: testWriteRecordsSpanningMultiplePartsWithRetry
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
@Test
public void testWriteRecordsSpanningMultiplePartsWithRetry() throws Exception {
localProps.put(S3SinkConnectorConfig.FLUSH_SIZE_CONFIG, "10000");
localProps.put(S3SinkConnectorConfig.S3_PART_RETRIES_CONFIG, "3");
setUp();
List<SinkRecord> sinkRecords = createRecords(11000);
int totalBytes = calcByteSize(sinkRecords);
final int parts = totalBytes / connectorConfig.getPartSize();
// From time to time fail S3 upload part method
final AtomicInteger count = new AtomicInteger();
PowerMockito.doAnswer(new Answer<UploadPartResult>() {
@Override
public UploadPartResult answer(InvocationOnMock invocationOnMock) throws Throwable {
if(count.getAndIncrement() % parts == 0){
throw new SdkClientException("Boom!");
} else {
return (UploadPartResult)invocationOnMock.callRealMethod();
}
}
}).when(s3).uploadPart(Mockito.isA(UploadPartRequest.class));
replayAll();
task = new S3SinkTask();
task.initialize(context);
task.start(properties);
verifyAll();
task.put(sinkRecords);
task.close(context.assignment());
task.stop();
long[] validOffsets = {0, 10000};
verify(sinkRecords, validOffsets);
}
示例10: getMultiPartsUploadResults
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
private List<PartETag> getMultiPartsUploadResults() throws ExecutionException, InterruptedException {
List<PartETag> result = new ArrayList<>(this.partNumberCounter);
for (int i = 0; i < this.partNumberCounter; i++) {
Future<UploadPartResult> uploadPartResultFuture = this.completionService.take();
result.add(uploadPartResultFuture.get().getPartETag());
}
return result;
}
示例11: uploadPartAndFlushBuffer
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
private void uploadPartAndFlushBuffer() throws IOException {
boolean operationSuccessful = false;
if (this.uploadId == null) {
this.uploadId = initiateMultipartUpload();
}
try {
if (this.partNumber >= MAX_PART_NUMBER) {
throw new IOException("Cannot upload any more data: maximum part number reached");
}
final InputStream inputStream = new InternalUploadInputStream(this.buf, this.bytesWritten);
final UploadPartRequest request = new UploadPartRequest();
request.setBucketName(this.bucket);
request.setKey(this.object);
request.setInputStream(inputStream);
request.setUploadId(this.uploadId);
request.setPartSize(this.bytesWritten);
request.setPartNumber(this.partNumber++);
final UploadPartResult result = this.s3Client.uploadPart(request);
this.partETags.add(result.getPartETag());
this.bytesWritten = 0;
operationSuccessful = true;
} catch (AmazonServiceException e) {
throw new IOException(StringUtils.stringifyException(e));
} finally {
if (!operationSuccessful) {
abortUpload();
}
}
}
示例12: call
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
public PartETag call()
/* */ throws Exception
/* */ {
/* 218 */ InputStream is = new ProgressableResettableBufferedFileInputStream(this.partFile, MultipartUploadOutputStream.this.progressable);
/* */
/* 220 */ UploadPartRequest request = new UploadPartRequest().withBucketName(MultipartUploadOutputStream.this.bucketName).withKey(MultipartUploadOutputStream.this.key).withUploadId(MultipartUploadOutputStream.this.uploadId).withInputStream(is).withPartNumber(this.partNumber).withPartSize(this.partFile.length()).withMD5Digest(this.md5sum);
/* */
/* 230 */ //MetricsSaver.StopWatch stopWatch = new MetricsSaver.StopWatch();
/* */ UploadPartResult result;
/* */ try
/* */ {
/* 232 */ String message = String.format("S3 uploadPart bucket:%s key:%s part:%d size:%d", new Object[] { MultipartUploadOutputStream.this.bucketName, MultipartUploadOutputStream.this.key, Integer.valueOf(this.partNumber), Long.valueOf(this.partFile.length()) });
/* */
/* 235 */ MultipartUploadOutputStream.LOG.info(message);
/* 236 */ result = MultipartUploadOutputStream.this.s3.uploadPart(request);
/* 237 */ //MetricsSaver.addValue("S3WriteDelay", stopWatch.elapsedTime());
/* 238 */ //MetricsSaver.addValue("S3WriteBytes", this.partFile.length());
/* */ } catch (Exception e) {
/* 240 */ //MetricsSaver.addValueWithError("S3WriteDelay", stopWatch.elapsedTime(), e);
/* 241 */ throw e;
/* */ } finally {
/* */ try {
/* 244 */ if (is != null)
/* 245 */ is.close();
/* */ }
/* */ finally {
/* 248 */ this.partFile.delete();
/* */ }
/* */ }
/* */
/* 252 */ return result.getPartETag();
/* */ }
示例13: GetETags
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
static List<PartETag> GetETags(List<UploadPartResult> responses)
{
List <PartETag> etags = new ArrayList<PartETag>();
for (UploadPartResult response: responses)
{
etags.add(new PartETag(response.getPartNumber(), response.getETag()));
}
return etags;
}
示例14: uploadPart
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
return delegate.uploadPart(request);
}
示例15: doUploadPart
import com.amazonaws.services.s3.model.UploadPartResult; //导入依赖的package包/类
private UploadPartResult doUploadPart(final String bucketName,
final String key, final String uploadId, final int partNumber,
final long partSize, Request<UploadPartRequest> request,
InputStream inputStream,
MD5DigestCalculatingInputStream md5DigestStream,
final ProgressListener listener) {
try {
request.setContent(inputStream);
ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
final String etag = metadata.getETag();
if (md5DigestStream != null
&& !skipMd5CheckStrategy.skipClientSideValidationPerUploadPartResponse(metadata)) {
byte[] clientSideHash = md5DigestStream.getMd5Digest();
byte[] serverSideHash = BinaryUtils.fromHex(etag);
if (!Arrays.equals(clientSideHash, serverSideHash)) {
final String info = "bucketName: " + bucketName + ", key: "
+ key + ", uploadId: " + uploadId
+ ", partNumber: " + partNumber + ", partSize: "
+ partSize;
throw new SdkClientException(
"Unable to verify integrity of data upload. "
+ "Client calculated content hash (contentMD5: "
+ Base16.encodeAsString(clientSideHash)
+ " in hex) didn't match hash (etag: "
+ etag
+ " in hex) calculated by Amazon S3. "
+ "You may need to delete the data stored in Amazon S3. "
+ "(" + info + ")");
}
}
publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
UploadPartResult result = new UploadPartResult();
result.setETag(etag);
result.setPartNumber(partNumber);
result.setSSEAlgorithm(metadata.getSSEAlgorithm());
result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
result.setRequesterCharged(metadata.isRequesterCharged());
return result;
} catch (Throwable t) {
publishProgress(listener, ProgressEventType.TRANSFER_PART_FAILED_EVENT);
// Leaving this here in case anyone is depending on it, but it's
// inconsistent with other methods which only generate one of
// COMPLETED_EVENT_CODE or FAILED_EVENT_CODE.
publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
throw failure(t);
}
}