本文整理汇总了C#中Microsoft.WindowsAzure.Storage.Table.CloudTable.ExecuteBatch方法的典型用法代码示例。如果您正苦于以下问题:C# CloudTable.ExecuteBatch方法的具体用法?C# CloudTable.ExecuteBatch怎么用?C# CloudTable.ExecuteBatch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Microsoft.WindowsAzure.Storage.Table.CloudTable
的用法示例。
在下文中一共展示了CloudTable.ExecuteBatch方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: PopulateTableData
private static void PopulateTableData(CloudTable cloudTable)
{
// if the table does not exist then create it and populate it wih some data
if (!cloudTable.Exists())
{
cloudTable.CreateIfNotExists();
var tableBatchOperation = new TableBatchOperation();
for (int i = 0; i < 100; i++)
{
tableBatchOperation.Add(
TableOperation.Insert(new Person(i.ToString(), string.Format("Person {0}", i))));
}
cloudTable.ExecuteBatch(tableBatchOperation);
}
}
示例2: MyClassInitialize
public static void MyClassInitialize(TestContext testContext)
{
tableClient = GenerateCloudTableClient();
currentTable = tableClient.GetTableReference(GenerateRandomTableName());
currentTable.CreateIfNotExists();
for (int i = 0; i < 15; i++)
{
TableBatchOperation batch = new TableBatchOperation();
for (int j = 0; j < 100; j++)
{
BaseEntity ent = GenerateRandomEntity("tables_batch_" + i.ToString());
ent.RowKey = string.Format("{0:0000}", j);
batch.Insert(ent);
}
currentTable.ExecuteBatch(batch);
}
}
示例3: MyClassInitialize
public static void MyClassInitialize(TestContext testContext)
{
CloudTableClient tableClient = GenerateCloudTableClient();
currentTable = tableClient.GetTableReference(GenerateRandomTableName());
currentTable.CreateIfNotExists();
// Bulk Query Entities
for (int i = 0; i < 15; i++)
{
TableBatchOperation batch = new TableBatchOperation();
for (int j = 0; j < 100; j++)
{
var ent = GenerateRandomEnitity("tables_batch_" + i.ToString());
ent.RowKey = string.Format("{0:0000}", j);
batch.Insert(ent);
}
currentTable.ExecuteBatch(batch);
}
complexEntityTable = tableClient.GetTableReference(GenerateRandomTableName());
complexEntityTable.Create();
// Setup
TableBatchOperation complexBatch = new TableBatchOperation();
string pk = Guid.NewGuid().ToString();
for (int m = 0; m < 100; m++)
{
ComplexEntity complexEntity = new ComplexEntity(pk, string.Format("{0:0000}", m));
complexEntity.String = string.Format("{0:0000}", m);
complexEntity.Binary = new byte[] { 0x01, 0x02, (byte)m };
complexEntity.BinaryPrimitive = new byte[] { 0x01, 0x02, (byte)m };
complexEntity.Bool = m % 2 == 0 ? true : false;
complexEntity.BoolPrimitive = m % 2 == 0 ? true : false;
complexEntity.Double = m + ((double)m / 100);
complexEntity.DoublePrimitive = m + ((double)m / 100);
complexEntity.Int32 = m;
complexEntity.Int32N = m;
complexEntity.IntegerPrimitive = m;
complexEntity.IntegerPrimitiveN = m;
complexEntity.Int64 = (long)int.MaxValue + m;
complexEntity.LongPrimitive = (long)int.MaxValue + m;
complexEntity.LongPrimitiveN = (long)int.MaxValue + m;
complexEntity.Guid = Guid.NewGuid();
complexBatch.Insert(complexEntity);
if (m == 50)
{
middleRef = complexEntity;
}
// Add delay to make times unique
Thread.Sleep(100);
}
complexEntityTable.ExecuteBatch(complexBatch);
}
示例4: DeleteStorageTableRows
private static void DeleteStorageTableRows(CloudTable table, Dictionary<string, TableBatchOperation> batches)
{
foreach (var batch in batches.Values)
table.ExecuteBatch(batch);
}
示例5: Post
// Azure Table内のデータを削除
// POST api/datadelete
public string Post([FromBody]DataDelete query)
{
// このAPIは、管理アプリケーション内の「削除」をクリックした時しか使われない
table = common.AzureAccess(); // Azure Tableへアクセス
// RDBの中から、ターゲットモジュールなどを検索
var loginuser = RDB.db.Users.Where(p => p.idName.Equals(User.Identity.Name)).Single();
var module = loginuser.Modules.Where(p => p.Name.Equals(query.modulename)).Single();
int id = module.id;
CloudBlobContainer container = common.BlobAccess(); // Azure Blobへアクセス
// 要求された日付範囲内のデータを取得(Take部)
TableQuery<DataEntity> query1 = new TableQuery<DataEntity>().Where(
TableQuery.CombineFilters(
TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Take," + id),
TableOperators.And,
TableQuery.CombineFilters(
TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, common.GetTimeIndex(query.datestart)),
TableOperators.And,
TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThanOrEqual, common.GetTimeIndex(query.dateend))
)
));
List<TableBatchOperation> deleteOperationList = new List<TableBatchOperation>();
TableBatchOperation deleteOperation = new TableBatchOperation();
try {
module.Type = "1"; //削除中は"1"
RDB.db.SaveChanges();
} catch {
}
int CountNum = 0;
// 100件ずつまとめて削除(Take部)(Blobデータなら、1件ずつ削除)
foreach (var entity in table.ExecuteQuery(query1)) {
deleteOperation.Delete(entity);
if (deleteOperation.Count == 100) {
deleteOperationList.Add(deleteOperation);
deleteOperation = new TableBatchOperation();
}
if (!(entity.DataVal == null)) {
if (entity.DataVal.Equals("BlobData")) {
CloudBlockBlob blockBlob = container.GetBlockBlobReference(id.ToString() + "," + entity.RowKey);
blockBlob.Delete();
}
}
CountNum++;
}
if (deleteOperation.Count > 0) {
deleteOperationList.Add(deleteOperation);
deleteOperation = new TableBatchOperation();
}
Parallel.ForEach(deleteOperationList, Operation => {
table.ExecuteBatch(Operation);
});
// 削除後のデータ件数を取得してRDBのNumDataを変更
TableQuery<DataEntity> Countquery = new TableQuery<DataEntity>().Where(TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Take," + module.id));
try {
module.NumData = table.ExecuteQuery(Countquery).Count();
RDB.db.SaveChanges();
} catch {
}
// 要求された日付範囲内のデータを取得(Value部)
TableQuery<DataEntity> query2 = new TableQuery<DataEntity>().Where(
TableQuery.CombineFilters(
TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, "Value," + id),
TableOperators.And,
TableQuery.CombineFilters(
TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, common.GetTimeIndex(query.datestart)),
TableOperators.And,
TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThanOrEqual, common.GetTimeIndex(query.dateend)+1)
)
));
deleteOperationList.Clear();
// 100件ずつまとめて削除(Value部)
foreach (var entity in table.ExecuteQuery(query2)) {
deleteOperation.Delete(entity);
if (deleteOperation.Count == 100) {
deleteOperationList.Add(deleteOperation);
deleteOperation = new TableBatchOperation();
}
}
if (deleteOperation.Count > 0) {
deleteOperationList.Add(deleteOperation);
deleteOperation = new TableBatchOperation();
}
Parallel.ForEach(deleteOperationList, Operation => {
table.ExecuteBatch(Operation);
});
//.........这里部分代码省略.........
示例6: DeleteAllEntitiesInBatches
private static void DeleteAllEntitiesInBatches(CloudTable table, Expression<Func<DynamicTableEntity, bool>> filters)
{
Action<IEnumerable<DynamicTableEntity>> processor = entities =>
{
var batches = new Dictionary<string, TableBatchOperation>();
foreach (var entity in entities)
{
TableBatchOperation batch = null;
if (batches.TryGetValue(entity.PartitionKey, out batch) == false)
{
batches[entity.PartitionKey] = batch = new TableBatchOperation();
}
batch.Add(TableOperation.Delete(entity));
if (batch.Count == 100)
{
table.ExecuteBatch(batch);
batches[entity.PartitionKey] = new TableBatchOperation();
}
}
foreach (var batch in batches.Values)
{
if (batch.Count > 0)
{
table.ExecuteBatch(batch);
}
}
};
ProcessEntities(table, processor, filters);
}
示例7: Post
// (テスト用)POST api/dataadd
public string Post([FromBody]string value) {
JavaScriptSerializer serializer = new JavaScriptSerializer();
List<DataAdd> dataaddList = serializer.Deserialize<List<DataAdd>>(value);
String ConnectionString = "UseDevelopmentStorage=true;DevelopmentStorageProxyUri=http://127.0.0.1:10002";
CloudStorageAccount storageAccount = CloudStorageAccount.Parse(ConnectionString);
CloudTableClient tableClient = storageAccount.CreateCloudTableClient();
String TableName = "Sample16";
table = tableClient.GetTableReference(TableName);
TableBatchOperation batchOperationTake = new TableBatchOperation();
TableBatchOperation batchOperationValue = new TableBatchOperation();
string date = String.Empty;
Marimo marimo = new Marimo();
//string[] codelist = { "i_i,0", "i_j,0", "while,count,6", "while,count,4", "send,i_j", "send,i_i", "i_i,i_i,+,1", "endw,3", "i_i,0", "i_j,i_j,+,1", "endw,2" };
//string[] codelist = { "i_data2,get,nowdata,2","if,i_data2,>,100", "send,Worning!!" ,"endi,1" };
bool Flag_MarimoCode = true;
try {
string codelists = RDB.db.Modules.Where(p => p.Name.Equals("HogerX01")).Single().Code.Replace(Environment.NewLine, "|");
string[] codelist = codelists.Split('|');
marimo.codelist = codelist;
} catch {
Flag_MarimoCode = false;
}
foreach (DataAdd dataadd in dataaddList) {
marimo.dataadd = dataadd;
if (Flag_MarimoCode) {
marimo.RunMarimo();
}
foreach (var data in marimo.dataadd.dat) {
Debug.WriteLine("Data: " + data);
}
date = marimo.dataadd.dt;
string time = common.GetTimeIndex(date);
TestEntity customer2 = new TestEntity("Value", time);
customer2.DataVal = marimo.dataadd.dat;
batchOperationValue.Insert(customer2);
if (batchOperationValue.Count == 100) {
table.ExecuteBatch(batchOperationValue);
batchOperationValue = new TableBatchOperation();
}
}
if (batchOperationTake.Count > 0) {
table.ExecuteBatch(batchOperationTake);
}
if (batchOperationValue.Count > 0) {
table.ExecuteBatch(batchOperationValue);
}
return "Success!";
}
示例8: InsertBatchOfElevationDataSetTableEntity
/// <summary>
/// Insert a batch of ElevationDataSetTableEntity into the storage table
/// </summary>
/// <param name="table"></param>
/// <param name="entities"></param>
private static void InsertBatchOfElevationDataSetTableEntity(CloudTable table, List<ElevationDataSetTableEntity> entities)
{
TableBatchOperation batchOperation = new TableBatchOperation();
foreach (ElevationDataSetTableEntity tableEntity in entities)
batchOperation.InsertOrReplace(tableEntity);
try
{
table.ExecuteBatch(batchOperation);
}
catch(Exception ex)
{
Console.WriteLine(ex.Message);
throw;
}
}
示例9: CalculateMADataToAzure
private void CalculateMADataToAzure(CloudTable table, string azureTableStockCode, int MA)
{
DateTime startingDate = DateTime.FromFileTimeUtc(0);
TableOperation retrieveStockEntityStatus = TableOperation.Retrieve<StockEntityStatus>("status-" + azureTableStockCode, "status");
var stockEntityStatus = (StockEntityStatus)table.Execute(retrieveStockEntityStatus).Result;
if (stockEntityStatus != null)
{
startingDate = stockEntityStatus.GetLatestMAStartDate(MA);
Console.WriteLine("Latest starting date for MA{0} is on {1}", MA, startingDate.ToString("yyyy-MM-dd"));
}
string pkFilter = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, azureTableStockCode);
string rkLowerFilter = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, startingDate.ToString("yyyy-MM-dd"));
string combinedFilter = TableQuery.CombineFilters(pkFilter, TableOperators.And, rkLowerFilter);
TableQuery<StockEntity> query = new TableQuery<StockEntity>().Where(combinedFilter);
var sortedStockEntities = table.ExecuteQuery<StockEntity>(query).OrderBy(entity => entity.Date).ToList();
if (sortedStockEntities.LongCount() >= MA)
{
long totalCountToUpload = sortedStockEntities.LongCount();
long currentCountUploaded = 0;
Queue<double> maData = new Queue<double>();
TableBatchOperation tableBatchOperation = new TableBatchOperation();
foreach (var stockEntity in sortedStockEntities)
{
maData.Enqueue(stockEntity.Close);
if (maData.Count == MA)
{
double sum = 0;
foreach (var data in maData)
{
sum += data;
}
stockEntity.SetMA(MA, sum / MA);
tableBatchOperation.Add(TableOperation.InsertOrMerge(stockEntity));
maData.Dequeue();
}
if (tableBatchOperation.Count == 100)
{
table.ExecuteBatch(tableBatchOperation);
currentCountUploaded += 100;
Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload);
tableBatchOperation.Clear();
}
}
if (tableBatchOperation.Count > 0)
{
table.ExecuteBatch(tableBatchOperation);
currentCountUploaded += tableBatchOperation.Count;
Console.WriteLine("{0}/{1} entities uploaded...", currentCountUploaded, totalCountToUpload);
}
sortedStockEntities.Reverse();
if (sortedStockEntities == null)
{
stockEntityStatus = new StockEntityStatus(azureTableStockCode);
}
stockEntityStatus.SetLatestMAStartDate(MA, sortedStockEntities[MA - 2].Date);
table.Execute(TableOperation.InsertOrMerge(stockEntityStatus));
}
}
示例10: __fetchScheduledItems
private ScheduledTask[] __fetchScheduledItems(CloudTable scheduleTable, string channel, int retryCount)
{
/* ------------------------------------
* Make Range Query to fetch tasks with scheduled time elapsed.
* The reason we retrieve 50 items is that each row will be followed by two table operations each, which makes a total of 100 operations (Maximum handled by a BatchTableOperation)
*-------------------------------------
*/
const int count = 50;
TableQuery<DynamicTableEntity> rangeQuery = new TableQuery<DynamicTableEntity>().Where(
TableQuery.CombineFilters(
TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, channel),
TableOperators.And,
TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThan, DateTime.Now.ToUniversalTime().ToString("yyyyMMddHHmmssffff"))))
.Take(count);
var results = scheduleTable.ExecuteQuery(rangeQuery).Take(count).ToArray();
if (results.Length == 0)
return new ScheduledTask[0];
/* ------------------------------------
* For every ScheduleEntry retrieved, they must be deleted right away for concurency reason.
* They are also 'postponed' at the specifyed time span in order to be fired again in case the task never completes.
*-------------------------------------
*/
TableBatchOperation tb = new TableBatchOperation();
List<ScheduledTask> _items = new List<ScheduledTask>();
foreach (var x in results)
{
int delimiter = x.RowKey.IndexOf('-');
string[] rowSpl = new string[] { x.RowKey.Substring(0, delimiter), x.RowKey.Substring(delimiter + 1) };
string tempPostponed = DateTime.Now.ToUniversalTime().AddSeconds(POSTPONE_DELAY_FOR_UNCOMMITED_SECONDS).ToString("yyyyMMddHHmmssffff") + "-" + rowSpl[1];
DynamicTableEntity ghost = new DynamicTableEntity(x.PartitionKey, tempPostponed);
ghost.Properties = x.Properties;
int tryCount = 0;
if (ghost.Properties.ContainsKey("RetryCount"))
{
tryCount = ghost.Properties["RetryCount"].Int32Value.Value;
}
ghost.Properties["RetryCount"] = new EntityProperty(tryCount + 1);
//delete an postpone
tb.Add(TableOperation.Delete(x));
tb.Add(TableOperation.Insert(ghost));
_items.Add(new ScheduledTask
{
ScheduledTime = DateTime.ParseExact(rowSpl[0], "yyyyMMddHHmmssffff", System.Globalization.CultureInfo.InvariantCulture),
Channel = channel,
FailedTimes = tryCount,
Data = x["Data"].StringValue,
Id = rowSpl[1]
});
}
/* ----------------------------------------------------------------------------------------------------------------------------------------
* Now that the batch operation containing deletes and 'postpones' is built, we execute it.
*
* This BatchOperation is the 'trick' that handles concurency, as the Azure Table Storage is centralized somewhere as one authority,
* if two batches are made at the same time on the same rows, one of them will fail, hence it should'nt be possible to dequeue twice
*------------------------------------------------------------------------------------------------------------------------------------------
*/
TableResult[] tableResults = null;
try
{
tableResults = scheduleTable.ExecuteBatch(tb).Where(x => x.HttpStatusCode == 201).ToArray(); //select only 201 response to get only inserted items results
for (int i = 0; i < _items.Count; i++)
{
_items[i].temporaryTask = (DynamicTableEntity)tableResults[i].Result;
}
}
catch (Exception ex)
{
/* ----------------------------------------------------------------------------------------------------------------------------------------
* If an exception occurs while executing, it's most likely because another Fetch operation where made at the same time (which should have succeed),
* so we try to execute the FetchScheduledItems operation again to get the next items.
*
* If the exception keeps occuring after several retry times, it's most likely a problem with the Azure Storage Account.
*------------------------------------------------------------------------------------------------------------------------------------------
*/
if (retryCount >= 5)
throw ex;
return __fetchScheduledItems(scheduleTable, channel, ++retryCount);
}
return _items.ToArray();
}
示例11: DeleteEntities
public void DeleteEntities(CloudTable table, string partition = null)
{
if (!table.Exists())
{
return;
}
TableQuery query = new TableQuery();
if (partition != null)
{
query.FilterString = string.Format("PartitionKey eq '{0}'", partition);
}
var entities = table.ExecuteQuery(query);
if (entities.Any())
{
var batch = new TableBatchOperation();
foreach (var entity in entities)
{
batch.Delete(entity);
}
table.ExecuteBatch(batch);
}
}