本文整理汇总了Java中com.amazonaws.services.lambda.runtime.events.S3Event.getRecords方法的典型用法代码示例。如果您正苦于以下问题:Java S3Event.getRecords方法的具体用法?Java S3Event.getRecords怎么用?Java S3Event.getRecords使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.amazonaws.services.lambda.runtime.events.S3Event
的用法示例。
在下文中一共展示了S3Event.getRecords方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: handleRequest
import com.amazonaws.services.lambda.runtime.events.S3Event; //导入方法依赖的package包/类
@Override
public Void handleRequest(S3Event s3Event, Context context) {
Collection<Partition> requiredPartitions = new HashSet<>();
TableService tableService = new TableService();
for (S3EventNotification.S3EventNotificationRecord record : s3Event.getRecords()) {
String bucket = record.getS3().getBucket().getName();
String key = record.getS3().getObject().getKey();
System.out.printf("S3 event [Event: %s, Bucket: %s, Key: %s]%n", record.getEventName(), bucket, key);
S3Object s3Object = new S3Object(bucket, key);
if (s3Object.hasDateTimeKey()) {
requiredPartitions.add(partitionConfig.createPartitionFor(s3Object));
}
}
if (!requiredPartitions.isEmpty()) {
Collection<Partition> missingPartitions = determineMissingPartitions(
partitionConfig.tableName(),
requiredPartitions,
tableService);
tableService.addPartitions(partitionConfig.tableName(), missingPartitions);
}
return null;
}
示例2: handleRequest
import com.amazonaws.services.lambda.runtime.events.S3Event; //导入方法依赖的package包/类
@Override
public Void handleRequest(S3Event s3Event, Context context) {
Collection<Partition> partitionsToRemove = new HashSet<>();
TableService tableService = new TableService();
for (S3EventNotification.S3EventNotificationRecord record : s3Event.getRecords()) {
String bucket = record.getS3().getBucket().getName();
String key = record.getS3().getObject().getKey();
System.out.printf("S3 event [Event: %s, Bucket: %s, Key: %s]%n", record.getEventName(), bucket, key);
S3Object s3Object = new S3Object(bucket, key);
if (s3Object.hasDateTimeKey()) {
partitionsToRemove.add(partitionConfig.createPartitionFor(s3Object));
}
}
if (!partitionsToRemove.isEmpty()) {
tableService.removePartitions(
partitionConfig.tableName(),
partitionsToRemove.stream().map(Partition::spec).collect(Collectors.toList()));
}
return null;
}
示例3: handleRequest
import com.amazonaws.services.lambda.runtime.events.S3Event; //导入方法依赖的package包/类
@Override
public Void handleRequest(S3Event s3Event, Context context){
Collection<Partition>requiredPartitions = new HashSet<>();
TableService tableService = new TableService();
DynamoDB dynamoDBClient=new DynamoDB(new AmazonDynamoDBClient(new EnvironmentVariableCredentialsProvider()));
for(S3EventNotification.S3EventNotificationRecord record:s3Event.getRecords()){
String bucket=record.getS3().getBucket().getName();
String key=record.getS3().getObject().getKey();
System.out.printf("S3event[Event:%s,Bucket:%s,Key:%s]%n",record.getEventName(),bucket,key);
S3Object s3Object=new S3Object(bucket,key);
if(s3Object.hasDateTimeKey()){
Partition partition = partitionConfig.createPartitionFor(s3Object);
//Check if the partition exists in DynamoDBtable, if not add the partition details to the table, skip otherwise
if (tryAddMissingPartition(partitionConfig.dynamoDBTableName(), dynamoDBClient, partition)) {
requiredPartitions.add(partition);
}
}
}
if(!requiredPartitions.isEmpty()){
tableService.addPartitions(partitionConfig.tableName(),requiredPartitions, true);
}
return null;
}
开发者ID:awslabs,项目名称:serverless-cf-analysis,代码行数:33,代码来源:CreateAthenaPartitionsBasedOnS3EventWithDDB.java
示例4: auditValidatedFile
import com.amazonaws.services.lambda.runtime.events.S3Event; //导入方法依赖的package包/类
public void auditValidatedFile(S3Event event,Context ctx) throws Exception{
Connection conn = new com.mysql.jdbc.Driver().connect(props.getProperty("url"), props);
List<S3EventNotificationRecord> notificationRecords = event.getRecords();
PreparedStatement ps = conn.prepareStatement(props.getProperty("sql.auditValidatedFile"));
for(S3EventNotificationRecord record : notificationRecords){
String fileURL = record.getS3().getBucket().getName()+"/"+record.getS3().getObject().getKey();
ps.setString(1, fileURL);
ps.setString(2, "VALIDATED");
ps.setString(3,"VALIDATED");
ps.addBatch();
}
ps.executeBatch();
ps.close();
conn.close();
}
示例5: handleNewS3Event
import com.amazonaws.services.lambda.runtime.events.S3Event; //导入方法依赖的package包/类
/**
* The handler that will get triggered by the CloudFront adding a new log chunk into the CloudFront Log S3 Bucket.
* Streams the log from S3 and processes each line, which represents a request to Cerberus.
* http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html#LogFileFormat
*
* @param context, the context of the lambda fn
*/
public void handleNewS3Event(S3Event event, Context context) throws IOException {
CloudFrontLogHandlerConfig config =
getConfiguration(context.getInvokedFunctionArn());
log.info(String.format("Found CloudFormation stack and derived params: %s",
objectMapper.writeValueAsString(config)));
for (S3EventNotification.S3EventNotificationRecord s3EventNotificationRecord : event.getRecords()){
String bucketName = s3EventNotificationRecord.getS3().getBucket().getName();
String key = s3EventNotificationRecord.getS3().getObject().getKey();
// Only process the log files from CF they end in .gz
if (! key.endsWith(".gz")) {
return;
}
log.info(String.format("Triggered from %s/%s", bucketName, key));
S3Object logObject = amazonS3Client.getObject(new GetObjectRequest(bucketName, key));
List<CloudFrontLogEvent> logEvents = ingestLogStream(logObject.getObjectContent());
logEventProcessors.forEach(processor -> {
try {
processor.processLogEvents(logEvents, config, bucketName);
} catch (Throwable t) {
log.error(String.format("Failed to run log processor %s", processor.getClass()), t);
// Send a message to slack if its configured to do so
if (StringUtils.isNotBlank(config.getSlackWebHookUrl())) {
String text = String.format("Failed to run log processor %s, env: %s reason: %s",
processor.getClass(), config.getEnv(), t.getMessage());
Message.Builder builder = new Message.Builder(text).userName("Cloud-Front-Event-Handler");
if (StringUtils.startsWith(config.getSlackIcon(), "http")) {
builder.iconUrl(config.getSlackIcon());
} else {
builder.iconEmoji(config.getSlackIcon());
}
new SlackClient(config.getSlackWebHookUrl()).sendMessage(builder.build());
}
}
});
}
}
示例6: handleRequest
import com.amazonaws.services.lambda.runtime.events.S3Event; //导入方法依赖的package包/类
@Override
public String handleRequest(S3Event s3Event, Context context) {
byte[] buffer = new byte[1024];
try {
for (S3EventNotificationRecord record: s3Event.getRecords()) {
String srcBucket = record.getS3().getBucket().getName();
// Object key may have spaces or unicode non-ASCII characters.
String srcKey = record.getS3().getObject().getKey()
.replace('+', ' ');
srcKey = URLDecoder.decode(srcKey, "UTF-8");
// Detect file type
Matcher matcher = Pattern.compile(".*\\.([^\\.]*)").matcher(srcKey);
if (!matcher.matches()) {
System.out.println("Unable to detect file type for key " + srcKey);
return "";
}
String extension = matcher.group(1).toLowerCase();
if (!"zip".equals(extension)) {
System.out.println("Skipping non-zip file " + srcKey + " with extension " + extension);
return "";
}
System.out.println("Extracting zip file " + srcBucket + "/" + srcKey);
// Download the zip from S3 into a stream
AmazonS3 s3Client = new AmazonS3Client();
S3Object s3Object = s3Client.getObject(new GetObjectRequest(srcBucket, srcKey));
ZipInputStream zis = new ZipInputStream(s3Object.getObjectContent());
ZipEntry entry = zis.getNextEntry();
while(entry != null) {
String fileName = entry.getName();
String mimeType = FileMimeType.fromExtension(FilenameUtils.getExtension(fileName)).mimeType();
System.out.println("Extracting " + fileName + ", compressed: " + entry.getCompressedSize() + " bytes, extracted: " + entry.getSize() + " bytes, mimetype: " + mimeType);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
int len;
while ((len = zis.read(buffer)) > 0) {
outputStream.write(buffer, 0, len);
}
InputStream is = new ByteArrayInputStream(outputStream.toByteArray());
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(outputStream.size());
meta.setContentType(mimeType);
s3Client.putObject(srcBucket, FilenameUtils.getFullPath(srcKey) + fileName, is, meta);
is.close();
outputStream.close();
entry = zis.getNextEntry();
}
zis.closeEntry();
zis.close();
//delete zip file when done
System.out.println("Deleting zip file " + srcBucket + "/" + srcKey + "...");
s3Client.deleteObject(new DeleteObjectRequest(srcBucket, srcKey));
System.out.println("Done deleting");
}
return "Ok";
} catch (IOException e) {
throw new RuntimeException(e);
}
}