本文整理汇总了Java中com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult.getUnprocessedItems方法的典型用法代码示例。如果您正苦于以下问题:Java BatchWriteItemResult.getUnprocessedItems方法的具体用法?Java BatchWriteItemResult.getUnprocessedItems怎么用?Java BatchWriteItemResult.getUnprocessedItems使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult
的用法示例。
在下文中一共展示了BatchWriteItemResult.getUnprocessedItems方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: batchWrite
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult; //导入方法依赖的package包/类
/**
* Writes multiple items in batch.
* @param items a map of tables->write requests
*/
protected static void batchWrite(Map<String, List<WriteRequest>> items) {
if (items == null || items.isEmpty()) {
return;
}
try {
BatchWriteItemResult result = getClient().batchWriteItem(new BatchWriteItemRequest().
withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL).withRequestItems(items));
if (result == null) {
return;
}
logger.debug("batchWrite(): total {}, cc {}", items.size(), result.getConsumedCapacity());
if (result.getUnprocessedItems() != null && !result.getUnprocessedItems().isEmpty()) {
Thread.sleep(1000);
logger.warn("{} UNPROCESSED write requests!", result.getUnprocessedItems().size());
batchWrite(result.getUnprocessedItems());
}
} catch (Exception e) {
logger.error(null, e);
}
}
示例2: runWithBackoff
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult; //导入方法依赖的package包/类
/**
* Writes to DynamoDBTable using an exponential backoff. If the
* batchWriteItem returns unprocessed items then it will exponentially
* backoff and retry the unprocessed items.
*/
public List<ConsumedCapacity> runWithBackoff(BatchWriteItemRequest req) {
BatchWriteItemResult writeItemResult = null;
List<ConsumedCapacity> consumedCapacities = new LinkedList<ConsumedCapacity>();
Map<String, List<WriteRequest>> unprocessedItems = null;
boolean interrupted = false;
try {
do {
writeItemResult = client.batchWriteItem(req);
unprocessedItems = writeItemResult.getUnprocessedItems();
consumedCapacities
.addAll(writeItemResult.getConsumedCapacity());
if (unprocessedItems != null) {
req.setRequestItems(unprocessedItems);
try {
Thread.sleep(exponentialBackoffTime);
} catch (InterruptedException ie) {
interrupted = true;
} finally {
exponentialBackoffTime *= 2;
if (exponentialBackoffTime > BootstrapConstants.MAX_EXPONENTIAL_BACKOFF_TIME) {
exponentialBackoffTime = BootstrapConstants.MAX_EXPONENTIAL_BACKOFF_TIME;
}
}
}
} while (unprocessedItems != null && unprocessedItems.get(tableName) != null);
return consumedCapacities;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
示例3: writeMultipleItemsBatchWrite
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult; //导入方法依赖的package包/类
private static void writeMultipleItemsBatchWrite() {
try {
// Create a map for the requests in the batch
Map<String, List<WriteRequest>> requestItems = new HashMap<String, List<WriteRequest>>();
// Create a PutRequest for a new Forum item
Map<String, AttributeValue> forumItem = new HashMap<String, AttributeValue>();
forumItem.put("Name", new AttributeValue().withS("Amazon RDS"));
forumItem.put("Threads", new AttributeValue().withN("0"));
List<WriteRequest> forumList = new ArrayList<WriteRequest>();
forumList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(forumItem)));
requestItems.put(table1Name, forumList);
// Create a PutRequest for a new Thread item
Map<String, AttributeValue> threadItem = new HashMap<String, AttributeValue>();
threadItem.put("ForumName", new AttributeValue().withS("Amazon RDS"));
threadItem.put("Subject", new AttributeValue().withS("Amazon RDS Thread 1"));
threadItem.put("Message", new AttributeValue().withS("ElasticCache Thread 1 message"));
threadItem.put("KeywordTags", new AttributeValue().withSS(Arrays.asList("cache", "in-memory")));
List<WriteRequest> threadList = new ArrayList<WriteRequest>();
threadList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(threadItem)));
// Create a DeleteRequest for a Thread item
Map<String, AttributeValue> threadDeleteKey = new HashMap<String, AttributeValue>();
threadDeleteKey.put("ForumName", new AttributeValue().withS("Amazon S3"));
threadDeleteKey.put("Subject", new AttributeValue().withS("S3 Thread 100"));
threadList.add(new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(threadDeleteKey)));
requestItems.put(table2Name, threadList);
BatchWriteItemResult result;
BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest()
.withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);
do {
System.out.println("Making the request.");
batchWriteItemRequest.withRequestItems(requestItems);
result = client.batchWriteItem(batchWriteItemRequest);
// Print consumed capacity units
for(ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) {
String tableName = consumedCapacity.getTableName();
Double consumedCapacityUnits = consumedCapacity.getCapacityUnits();
System.out.println("Consumed capacity units for table " + tableName + ": " + consumedCapacityUnits);
}
// Check for unprocessed keys which could happen if you exceed provisioned throughput
System.out.println("Unprocessed Put and Delete requests: \n" + result.getUnprocessedItems());
requestItems = result.getUnprocessedItems();
} while (result.getUnprocessedItems().size() > 0);
} catch (AmazonServiceException ase) {
System.err.println("Failed to retrieve items: ");
ase.printStackTrace(System.err);
}
}