本文整理汇总了Java中com.amazonaws.services.s3.model.StorageClass类的典型用法代码示例。如果您正苦于以下问题:Java StorageClass类的具体用法?Java StorageClass怎么用?Java StorageClass使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StorageClass类属于com.amazonaws.services.s3.model包,在下文中一共展示了StorageClass类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initStorageClass
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
public static StorageClass initStorageClass(String storageClass) {
if (storageClass == null || storageClass.equals("")) {
return StorageClass.Standard;
}
try {
StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH));
if (_storageClass.equals(StorageClass.Glacier)) {
throw new BlobStoreException("Glacier storage class is not supported");
}
return _storageClass;
} catch (IllegalArgumentException illegalArgumentException) {
throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class.");
}
}
示例2: store
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Override
public void store(BuildCacheKey key, BuildCacheEntryWriter writer) {
logger.info("Start storing cache entry '{}' in S3 bucket", key.getHashCode());
ObjectMetadata meta = new ObjectMetadata();
meta.setContentType(BUILD_CACHE_CONTENT_TYPE);
try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
writer.writeTo(os);
meta.setContentLength(os.size());
try (InputStream is = new ByteArrayInputStream(os.toByteArray())) {
PutObjectRequest request = getPutObjectRequest(key, meta, is);
if(this.reducedRedundancy) {
request.withStorageClass(StorageClass.ReducedRedundancy);
}
s3.putObject(request);
}
} catch (IOException e) {
throw new BuildCacheException("Error while storing cache object in S3 bucket", e);
}
}
示例3: init
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Before
public void init() {
copierOptions.put(CREDENTIAL_PROVIDER, URI.create("localjceks://file/foo/bar.jceks"));
copierOptions.put(MULTIPART_UPLOAD_CHUNK_SIZE, 4096);
copierOptions.put(S3_SERVER_SIDE_ENCRYPTION, true);
copierOptions.put(STORAGE_CLASS, StorageClass.Glacier.toString());
copierOptions.put(TASK_BANDWIDTH, 1024);
copierOptions.put(NUMBER_OF_WORKERS_PER_MAP, 12);
copierOptions.put(MULTIPART_UPLOAD_THRESHOLD, 2048L);
copierOptions.put(MAX_MAPS, 5);
copierOptions.put(COPY_STRATEGY, "mycopystrategy");
copierOptions.put(LOG_PATH, new Path("hdfs:///tmp/logs"));
copierOptions.put(REGION, Regions.EU_WEST_1.getName());
copierOptions.put(IGNORE_FAILURES, false);
copierOptions.put(S3_ENDPOINT_URI, "http://s3.endpoint/");
copierOptions.put(UPLOAD_RETRY_COUNT, 5);
copierOptions.put(UPLOAD_RETRY_DELAY_MS, 520);
copierOptions.put(UPLOAD_BUFFER_SIZE, 1024);
parser = new S3MapReduceCpOptionsParser(SOURCES, TARGET, DEFAULT_CREDS_PROVIDER);
}
示例4: assertDefaults
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
private void assertDefaults(S3MapReduceCpOptions options) {
assertThat(options.getCredentialsProvider(), is(URI.create("localjceks://file/foo/bar.jceks")));
assertThat(options.getMultipartUploadPartSize(), is(4096L));
assertThat(options.isS3ServerSideEncryption(), is(true));
assertThat(options.getStorageClass(), is(StorageClass.Glacier.toString()));
assertThat(options.getMaxBandwidth(), is(1024L));
assertThat(options.getNumberOfUploadWorkers(), is(12));
assertThat(options.getMultipartUploadThreshold(), is(2048L));
assertThat(options.getMaxMaps(), is(5));
assertThat(options.getCopyStrategy(), is("mycopystrategy"));
assertThat(options.getLogPath(), is(new Path("hdfs:///tmp/logs")));
assertThat(options.getRegion(), is(Regions.EU_WEST_1.getName()));
assertThat(options.isIgnoreFailures(), is(false));
assertThat(options.getS3EndpointUri(), is(URI.create("http://s3.endpoint/")));
assertThat(options.getUploadRetryCount(), is(5));
assertThat(options.getUploadRetryDelayMs(), is(520L));
assertThat(options.getUploadBufferSize(), is(1024));
}
示例5: defaultValues
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Test
public void defaultValues() {
S3MapReduceCpOptions options = new S3MapReduceCpOptions();
assertThat(options.isHelp(), is(false));
assertThat(options.isBlocking(), is(true));
assertThat(options.getSources(), is(nullValue()));
assertThat(options.getTarget(), is(nullValue()));
assertThat(options.getCredentialsProvider(), is(nullValue()));
assertThat(options.getMultipartUploadPartSize(), is(5L * 1024 * 1024));
assertThat(options.isS3ServerSideEncryption(), is(false));
assertThat(options.getStorageClass(), is(StorageClass.Standard.toString()));
assertThat(options.getMaxBandwidth(), is(100L));
assertThat(options.getNumberOfUploadWorkers(), is(20));
assertThat(options.getMultipartUploadThreshold(), is(16L * 1024 * 1024));
assertThat(options.getMaxMaps(), is(20));
assertThat(options.getCopyStrategy(), is("uniformsize"));
assertThat(options.getLogPath(), is(nullValue()));
assertThat(options.getRegion(), is(nullValue()));
assertThat(options.isIgnoreFailures(), is(false));
assertThat(options.getS3EndpointUri(), is(nullValue()));
assertThat(options.getUploadRetryCount(), is(3));
assertThat(options.getUploadRetryDelayMs(), is(300L));
assertThat(options.getUploadBufferSize(), is(0));
}
示例6: testRestoreObjects
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Test
public void testRestoreObjects()
{
// Put a 1 byte Glacier storage class file in S3.
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
metadata.setOngoingRestore(false);
s3Operations
.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
null);
// Initiate a restore request for the test S3 file.
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS);
// Validate that there is an ongoing restore request for this object.
ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
assertTrue(objectMetadata.getOngoingRestore());
}
示例7: testRestoreObjectsGlacierObjectAlreadyBeingRestored
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Test
public void testRestoreObjectsGlacierObjectAlreadyBeingRestored()
{
// Put a 1 byte Glacier storage class file in S3 flagged as already being restored.
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
metadata.setOngoingRestore(true);
s3Operations
.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
null);
// Initiate a restore request for the test S3 file.
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS);
// Validate that there is still an ongoing restore request for this object.
ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
assertTrue(objectMetadata.getOngoingRestore());
}
示例8: testValidateGlacierS3FilesRestored
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Test
public void testValidateGlacierS3FilesRestored()
{
// Put a 1 byte already restored Glacier storage class file in S3.
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
metadata.setOngoingRestore(false);
s3Operations
.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
null);
// Validate the file.
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
s3Dao.validateGlacierS3FilesRestored(params);
}
示例9: testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated()
{
// Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null).
ObjectMetadata metadata = new ObjectMetadata();
metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
s3Operations
.putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
null);
// Try to validate if the Glacier S3 file is already restored.
try
{
S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
s3Dao.validateGlacierS3FilesRestored(params);
fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
}
catch (IllegalArgumentException e)
{
assertEquals(String
.format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}",
TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
}
}
示例10: initiateMultipartUpload
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
private String initiateMultipartUpload() throws IOException {
boolean operationSuccessful = false;
final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object);
if (this.useRRS) {
request.setStorageClass(StorageClass.ReducedRedundancy);
} else {
request.setStorageClass(StorageClass.Standard);
}
try {
final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request);
operationSuccessful = true;
return result.getUploadId();
} catch (AmazonServiceException e) {
throw new IOException(StringUtils.stringifyException(e));
} finally {
if (!operationSuccessful) {
abortUpload();
}
}
}
示例11: S3Resolver
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
public S3Resolver(
String name,
AWSCredentialsProvider credentialsProvider,
boolean overwrite,
Region region,
Optional<CannedAccessControlList> acl,
boolean serverSideEncryption,
StorageClass storageClass
) {
setName(name);
setRepository(new S3Repository(
credentialsProvider,
overwrite,
region,
acl,
serverSideEncryption,
storageClass
));
}
示例12: S3Repository
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
public S3Repository(
AWSCredentialsProvider provider,
boolean overwrite,
Region region,
CannedAccessControlList acl,
boolean serverSideEncryption,
StorageClass storageClass
) {
this(
AmazonS3Client.builder().standard()
.withCredentials(provider)
.withRegion(region.toString())
.build(),
overwrite,
Optional.ofNullable(acl),
serverSideEncryption,
storageClass
);
}
示例13: testInitStorageClass
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
public void testInitStorageClass() throws IOException {
// it should default to `standard`
assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard));
assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard));
// it should accept [standard, standard_ia, reduced_redundancy]
assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.Standard));
assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.StandardInfrequentAccess));
assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy));
}
示例14: storePutsObjectAndUsesReducedRedundancyWhenConfigured
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Test
public void storePutsObjectAndUsesReducedRedundancyWhenConfigured() throws IOException {
/** Setup **/
buildCacheService = spy(new AwsS3BuildCacheService(s3, "bucketName", true));
doReturn(putObjectRequest).when(buildCacheService).getPutObjectRequest(any(BuildCacheKey.class),
any(ObjectMetadata.class), any(InputStream.class));
/** Run **/
buildCacheService.store(key, writer);
/** Check **/
verifyThatStoreStores();
verify(putObjectRequest).withStorageClass(eq(StorageClass.ReducedRedundancy));
}
示例15: storePutsObjectAndDoesNotUseReducedRedundancyWhenConfigured
import com.amazonaws.services.s3.model.StorageClass; //导入依赖的package包/类
@Test
public void storePutsObjectAndDoesNotUseReducedRedundancyWhenConfigured() throws IOException {
/** Setup **/
buildCacheService = spy(new AwsS3BuildCacheService(s3, "bucketName", false));
doReturn(putObjectRequest).when(buildCacheService).getPutObjectRequest(any(BuildCacheKey.class),
any(ObjectMetadata.class), any(InputStream.class));
/** Run **/
buildCacheService.store(key, writer);
/** Check **/
verifyThatStoreStores();
verify(putObjectRequest, never()).withStorageClass(eq(StorageClass.ReducedRedundancy));
}