本文整理汇总了Java中com.amazonaws.services.s3.model.InitiateMultipartUploadResult.setUploadId方法的典型用法代码示例。如果您正苦于以下问题:Java InitiateMultipartUploadResult.setUploadId方法的具体用法?Java InitiateMultipartUploadResult.setUploadId怎么用?Java InitiateMultipartUploadResult.setUploadId使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.amazonaws.services.s3.model.InitiateMultipartUploadResult
的用法示例。
在下文中一共展示了InitiateMultipartUploadResult.setUploadId方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetters
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; //导入方法依赖的package包/类
@Test
void testGetters() {
assertEquals(S3WritableObjectChannel.MIN_PART_SIZE, builder.getPartSize());
assertEquals(123, builder.partSize(123).getPartSize());
assertEquals(amazonS3, builder.amazonS3());
assertEquals(executorService, builder.executorService());
assertEquals("upldId", builder.uploadId());
assertEquals("bucket", builder.bucket());
assertEquals("key", builder.key());
assertEquals(2, builder.failedPartUploadRetries());
assertFalse(builder.hasDelayedHeader());
assertFalse(builder.closeExecutorOnChannelClose());
assertTrue(builder.defaultCachedThreadPoolExecutor().closeExecutorOnChannelClose());
InitiateMultipartUploadResult r = new InitiateMultipartUploadResult();
r.setBucketName("bucket1");
r.setKey("key1");
r.setUploadId("upldId1");
builder.initiateMultipartUploadResult(r);
assertEquals("upldId1", builder.uploadId());
assertEquals("bucket1", builder.bucket());
assertEquals("key1", builder.key());
builder.executorService().shutdown();
}
示例2: testInitiateUpload
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; //导入方法依赖的package包/类
@Test
public void testInitiateUpload()
{
InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
result.setUploadId(uploadId);
MockitoAnnotations.initMocks(this);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
when(fileMetadata.getFilePath()).thenReturn("/tmp/file1.txt");
when(fileMetadata.getNumberOfBlocks()).thenReturn(4);
S3InitiateFileUploadTest operator = new S3InitiateFileUploadTest();
operator.setBucketName("testbucket");
operator.setup(context);
CollectorTestSink<S3InitiateFileUploadOperator.UploadFileMetadata> fileSink = new CollectorTestSink<>();
CollectorTestSink<Object> tmp = (CollectorTestSink)fileSink;
operator.fileMetadataOutput.setSink(tmp);
operator.beginWindow(0);
operator.processTuple(fileMetadata);
operator.endWindow();
S3InitiateFileUploadOperator.UploadFileMetadata emitted = (S3InitiateFileUploadOperator.UploadFileMetadata)tmp.collectedTuples.get(0);
Assert.assertEquals("Upload ID :", uploadId, emitted.getUploadId());
}
示例3: getMockClient
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; //导入方法依赖的package包/类
private AmazonS3Client getMockClient() {
AmazonS3Client mockClient = spy(AmazonS3Client.class);
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setETag("foo");
doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));
InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
initUploadResult.setUploadId("123");
doReturn(initUploadResult).when(mockClient)
.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
return mockClient;
}
示例4: testAmazonClientException
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; //导入方法依赖的package包/类
@Test(expected = TransportException.class)
public void testAmazonClientException()
throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = mock(AmazonS3Client.class);
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setETag("foo");
doThrow(new AmazonClientException("expected")).when(mockClient)
.uploadPart(any(UploadPartRequest.class));
InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
initUploadResult.setUploadId("123");
doReturn(initUploadResult).when(mockClient)
.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport =
new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
try {
transport.sendBatch(buffer, partitions, new TestContext());
} catch (Exception e) {
assertEquals(e.getCause().getClass(), AmazonClientException.class);
throw e;
}
}
示例5: testS3OutputModule
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; //导入方法依赖的package包/类
@Test
public void testS3OutputModule() throws Exception
{
InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
result.setUploadId(uploadId);
PutObjectResult objResult = new PutObjectResult();
objResult.setETag("SuccessFullyUploaded");
UploadPartResult partResult = new UploadPartResult();
partResult.setPartNumber(1);
partResult.setETag("SuccessFullyPartUploaded");
MockitoAnnotations.initMocks(this);
when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
when(client.putObject(any(PutObjectRequest.class))).thenReturn(objResult);
when(client.uploadPart(any(UploadPartRequest.class))).thenReturn(partResult);
when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(completeMultiPart());
Application app = new S3OutputModuleMockTest.Application();
Configuration conf = new Configuration();
conf.set("dt.operator.HDFSInputModule.prop.files", inputDir);
conf.set("dt.operator.HDFSInputModule.prop.blockSize", "10");
conf.set("dt.operator.HDFSInputModule.prop.blocksThreshold", "1");
conf.set("dt.attr.CHECKPOINT_WINDOW_COUNT","20");
conf.set("dt.operator.S3OutputModule.prop.accessKey", "accessKey");
conf.set("dt.operator.S3OutputModule.prop.secretAccessKey", "secretKey");
conf.set("dt.operator.S3OutputModule.prop.bucketName", "bucketKey");
conf.set("dt.operator.S3OutputModule.prop.outputDirectoryPath", outputDir);
Path outDir = new Path("file://" + new File(outputDir).getAbsolutePath());
final Path outputFilePath = new Path(outDir.toString() + File.separator + FILE);
final FileSystem fs = FileSystem.newInstance(outDir.toUri(), new Configuration());
LocalMode lma = LocalMode.newInstance();
lma.prepareDAG(app, conf);
LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(true);
((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
{
@Override
public Boolean call() throws Exception
{
return fs.exists(outputFilePath);
}
});
lc.run(10000);
Assert.assertTrue("output file exist", fs.exists(outputFilePath));
}