本文整理汇总了Java中com.amazonaws.services.s3.transfer.TransferManagerBuilder类的典型用法代码示例。如果您正苦于以下问题:Java TransferManagerBuilder类的具体用法?Java TransferManagerBuilder怎么用?Java TransferManagerBuilder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TransferManagerBuilder类属于com.amazonaws.services.s3.transfer包,在下文中一共展示了TransferManagerBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: uploadToS3
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
private String uploadToS3(String bucket, String key, MultipartFile file) {
final AmazonS3 s3 = s3ClientFactory.createClient();
final TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build();
try {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(file.getSize());
metadata.setContentType(file.getContentType());
byte[] resultByte = DigestUtils.md5(file.getBytes());
String streamMD5 = new String(Base64.encodeBase64(resultByte));
metadata.setContentMD5(streamMD5);
Upload upload = transferManager.upload(bucket, key, file.getInputStream(), metadata);
upload.waitForCompletion();
return streamMD5;
} catch (AmazonServiceException | InterruptedException | IOException e) {
logger.error("Error uploading file: {}", e.toString());
return null;
} finally {
transferManager.shutdownNow();
}
}
示例2: getTransferManager
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
/**
* Gets an Amazon transfer manager.
*
* @return a transfer manager
*/
public TransferManager getTransferManager() {
if (transferManager == null) {
transferManager = TransferManagerBuilder.standard().withS3Client(getAmazonS3Client()).build();
}
return transferManager;
}
示例3: setup
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
/**
* Implementation of the Mapper::setup() method. This extracts the S3MapReduceCp options specified in the Job's
* configuration, to set up the Job.
*
* @param context Mapper's context.
* @throws IOException On IO failure.
* @throws InterruptedException If the job is interrupted.
*/
@Override
public void setup(Context context) throws IOException, InterruptedException {
conf = new S3MapReduceCpConfiguration(context.getConfiguration());
ignoreFailures = conf.getBoolean(ConfigurationVariable.IGNORE_FAILURES);
targetFinalPath = new Path(conf.get(S3MapReduceCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
AwsS3ClientFactory awsS3ClientFactory = new AwsS3ClientFactory();
transferManager = TransferManagerBuilder
.standard()
.withMinimumUploadPartSize(conf.getLong(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE))
.withMultipartUploadThreshold(conf.getLong(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD))
.withS3Client(awsS3ClientFactory.newInstance(conf))
.withShutDownThreadPools(true)
.withExecutorFactory(new ExecutorFactory() {
@Override
public ExecutorService newExecutor() {
return Executors.newFixedThreadPool(conf.getInt(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS));
}
})
.build();
}
示例4: createTransferManager
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
private TransferManager createTransferManager(final long multipartUploadThreshold,
final long multipartUploadPartSize,
final long multipartCopyThreshold,
final long multipartCopyPartSize) {
final ThreadFactory threadFactory = new ThreadFactory() {
private int threadCount = 1;
@Override
public Thread newThread(final Runnable r) {
final Thread thread = new Thread(r);
thread.setName("s3-transfer-" + threadCount++);
return thread;
}
};
return TransferManagerBuilder.standard()
.withS3Client(s3Client)
.withExecutorFactory(() -> Executors.newFixedThreadPool(THREAD_COUNT, threadFactory))
.withMultipartUploadThreshold(multipartUploadThreshold)
.withMinimumUploadPartSize(multipartUploadPartSize)
.withMultipartCopyPartSize(multipartCopyPartSize)
.withMultipartCopyThreshold(multipartCopyThreshold)
.build();
}
示例5: create
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
@Override
@Guarded(by = STARTED)
public Blob create(final InputStream blobData, final Map<String, String> headers) {
checkNotNull(blobData);
return create(headers, destination -> {
try (InputStream data = blobData) {
MetricsInputStream input = new MetricsInputStream(data);
TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build();
transferManager.upload(getConfiguredBucket(), destination, input, new ObjectMetadata())
.waitForCompletion();
return input.getMetrics();
} catch (InterruptedException e) {
throw new BlobStoreException("error uploading blob", e, null);
}
});
}
示例6: newInstance
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
public TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions) {
return TransferManagerBuilder
.standard()
.withMultipartCopyThreshold(s3s3CopierOptions.getMultipartCopyThreshold())
.withMultipartCopyPartSize(s3s3CopierOptions.getMultipartCopyPartSize())
.withS3Client(targetS3Client)
.build();
}
示例7: getS3TransferManager
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
public static TransferManager getS3TransferManager(AuthCredentialsServiceState credentials,
String region, ExecutorService executorService) {
AWSStaticCredentialsProvider awsStaticCredentialsProvider = new AWSStaticCredentialsProvider(
new BasicAWSCredentials(credentials.privateKeyId,
EncryptionUtils.decrypt(credentials.privateKey)));
AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard()
.withCredentials(awsStaticCredentialsProvider)
.withForceGlobalBucketAccessEnabled(true);
if (region == null) {
region = Regions.DEFAULT_REGION.getName();
}
if (isAwsS3Proxy()) {
AwsClientBuilder.EndpointConfiguration endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(
getAwsS3ProxyHost(), region);
amazonS3ClientBuilder.setEndpointConfiguration(endpointConfiguration);
} else {
amazonS3ClientBuilder.setRegion(region);
}
TransferManagerBuilder transferManagerBuilder = TransferManagerBuilder.standard()
.withS3Client(amazonS3ClientBuilder.build())
.withExecutorFactory(() -> executorService)
.withShutDownThreadPools(false);
return transferManagerBuilder.build();
}
示例8: deleteAngularjsApp
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
void deleteAngularjsApp(String websiteBucket, LambdaLogger logger) {
logger.log("Removing AngularjsApp content from website versioned S3 bucket");
// We need to delete every version of every key
ListVersionsRequest listVersionsRequest = new ListVersionsRequest()
.withBucketName(websiteBucket);
VersionListing versionListing;
AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
do {
versionListing = client.listVersions(listVersionsRequest);
versionListing
.getVersionSummaries()
.stream()
.filter(k -> (k.getKey().startsWith("app")))
.forEach(
k -> {
logger.log("About to delete version: " + k.getVersionId()
+ " of AngularjsApp page: " + k.getKey());
DeleteVersionRequest deleteVersionRequest = new DeleteVersionRequest(websiteBucket,
k.getKey(), k.getVersionId());
client.deleteVersion(deleteVersionRequest);
logger.log("Successfully deleted version: " + k.getVersionId()
+ " of AngularjsApp page: " + k.getKey());
});
listVersionsRequest.setKeyMarker(versionListing.getNextKeyMarker());
} while (versionListing.isTruncated());
logger.log("Finished removing AngularjsApp content from website S3 bucket");
}
示例9: setPublicReadPermissionsOnBucket
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
/**
* Sets public read permissions on content within an S3 bucket.
*
* <p>Web content served from an S3 bucket must have public read permissions.
*
* @param bucketName the bucket to apply the permissions to.
* @param prefix prefix within the bucket, beneath which to apply the permissions.
* @param logger a CloudwatchLogs logger.
*/
public static void setPublicReadPermissionsOnBucket(String bucketName, Optional<String> prefix,
LambdaLogger logger) {
// Ensure newly uploaded content has public read permission
ListObjectsRequest listObjectsRequest;
if (prefix.isPresent()) {
logger.log("Setting public read permission on bucket: " + bucketName + " and prefix: "
+ prefix.get());
listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(
prefix.get());
} else {
logger.log("Setting public read permission on bucket: " + bucketName);
listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
}
ObjectListing objectListing;
AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
do {
objectListing = client.listObjects(listObjectsRequest);
for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
logger.log("Setting permissions for S3 object: " + objectSummary.getKey());
client.setObjectAcl(bucketName, objectSummary.getKey(), CannedAccessControlList.PublicRead);
}
listObjectsRequest.setMarker(objectListing.getNextMarker());
} while (objectListing.isTruncated());
logger.log("Finished setting public read permissions");
}
示例10: addGzipContentEncodingMetadata
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
/**
* Adds gzip content-encoding metadata to S3 objects.
*
* <p>Adds gzip content-encoding metadata to S3 objects. All objects
* beneath the specified prefix (i.e. folder) will have the
* metadata added. When the bucket serves objects it will then
* add a suitable Content-Encoding header.
*
* @param bucketName the bucket to apply the metadata to.
* @param prefix prefix within the bucket, beneath which to apply the metadata.
* @param logger a CloudwatchLogs logger.
*/
public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix,
LambdaLogger logger) {
// To add new metadata, we must copy each object to itself.
ListObjectsRequest listObjectsRequest;
if (prefix.isPresent()) {
logger.log("Setting gzip content encoding metadata on bucket: " + bucketName
+ " and prefix: " + prefix.get());
listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(
prefix.get());
} else {
logger.log("Setting gzip content encoding metadata on bucket: " + bucketName);
listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
}
ObjectListing objectListing;
AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
do {
objectListing = client.listObjects(listObjectsRequest);
for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
String key = objectSummary.getKey();
logger.log("Setting metadata for S3 object: " + key);
// We must specify ALL metadata - not just the one we're adding.
ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
objectMetadata.setContentEncoding("gzip");
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName,
key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList(
CannedAccessControlList.PublicRead);
client.copyObject(copyObjectRequest);
logger.log("Set metadata for S3 object: " + key);
}
listObjectsRequest.setMarker(objectListing.getNextMarker());
} while (objectListing.isTruncated());
logger.log("Set gzip content encoding metadata on bucket");
}
示例11: addCacheControlHeader
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
/**
* Adds cache-control header to S3 objects.
*
* <p>Adds cache-control header to S3 objects. All objects
* beneath the specified prefix (i.e. folder), and with the
* specified extension will have the header added. When the
* bucket serves objects it will then add a suitable
* Cache-Control header.
*
* @param headerValue value of the cache-control header
* @param bucketName the bucket to apply the header to.
* @param prefix prefix within the bucket, beneath which to apply the header.
* @param extension file extension to apply header to
* @param logger a CloudwatchLogs logger.
*/
public static void addCacheControlHeader(String headerValue, String bucketName,
Optional<String> prefix, String extension, LambdaLogger logger) {
// To add new metadata, we must copy each object to itself.
ListObjectsRequest listObjectsRequest;
if (prefix.isPresent()) {
logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
+ " and prefix: " + prefix.get() + " and extension: " + extension);
listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(
prefix.get());
} else {
logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
+ " and extension: " + extension);
listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
}
ObjectListing objectListing;
AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
do {
objectListing = client.listObjects(listObjectsRequest);
for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
String key = objectSummary.getKey();
if (!key.endsWith(extension)) {
continue;
}
logger.log("Setting metadata for S3 object: " + key);
// We must specify ALL metadata - not just the one we're adding.
ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
objectMetadata.setCacheControl(headerValue);
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName,
key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList(
CannedAccessControlList.PublicRead);
client.copyObject(copyObjectRequest);
logger.log("Set metadata for S3 object: " + key);
}
listObjectsRequest.setMarker(objectListing.getNextMarker());
} while (objectListing.isTruncated());
logger.log("Set cache-control metadata on bucket");
}
示例12: run
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
@Override
public void run(@Nonnull final UploaderConfiguration configuration,
@Nonnull final Environment environment) throws Exception {
// set up S3 client
final AwsConfiguration awsConfig = configuration.getAws();
final AmazonS3 s3 = awsConfig.buildS3(environment);
final Uploader uploader = new Uploader(awsConfig);
// Configure the Netty TCP server
final ChannelFuture future = configuration.getNetty().build(environment,
uploader, awsConfig.getMaxUploadSize());
// Configure the transfer manager to use the Netty event loop
final TransferManager transfer = TransferManagerBuilder.standard()
.withS3Client(s3)
.withMinimumUploadPartSize(
awsConfig.getMinimumUploadPartSize().toBytes())
.withShutDownThreadPools(false)
.withExecutorFactory(new NettyExecutorFactory(future)).build();
uploader.setTransferManager(transfer);
environment.healthChecks().register("s3", new AmazonS3HealthCheck(s3));
// Resources
environment.jersey().register(new BatchResource(uploader));
environment.jersey().register(new PingResource());
environment.jersey().register(new VersionResource());
}
示例13: main
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
// create the AWS S3 Client
AmazonS3 s3 = AWSS3Factory.getS3Client();
// retrieve the key value from user
System.out.println( "Enter the object key:" );
String key = new BufferedReader( new InputStreamReader( System.in ) ).readLine();
// print start time
Date start_date = new Date();
System.out.println(start_date.toString());
// file will be placed in temp dir with .tmp extension
File file = File.createTempFile("read-large-object-tm", null);
TransferManager tm = TransferManagerBuilder.standard()
.withS3Client(s3)
.build();
// download the object to file
Download download = tm.download(AWSS3Factory.S3_BUCKET, key, file);
// block until download finished
download.waitForCompletion();
tm.shutdownNow();
// print end time
Date end_date = new Date();
System.out.println(end_date.toString());
}
示例14: S3Persister
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
public S3Persister(String s3Bucket) {
this.s3Bucket = s3Bucket;
this.transferManager = Optional.of(TransferManagerBuilder.defaultTransferManager());
}
示例15: prepareTransferManager
import com.amazonaws.services.s3.transfer.TransferManagerBuilder; //导入依赖的package包/类
private void prepareTransferManager() {
if (!transferManager.isPresent()) {
this.transferManager = Optional.of(TransferManagerBuilder.defaultTransferManager());
}
}