本文整理汇总了C#中Microsoft.WindowsAzure.Storage.Blob.CloudBlockBlob.PutBlock方法的典型用法代码示例。如果您正苦于以下问题:C# CloudBlockBlob.PutBlock方法的具体用法?C# CloudBlockBlob.PutBlock怎么用?C# CloudBlockBlob.PutBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Microsoft.WindowsAzure.Storage.Blob.CloudBlockBlob
的用法示例。
在下文中一共展示了CloudBlockBlob.PutBlock方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: CreateForTest
private static void CreateForTest(CloudBlockBlob blob, int blockCount, int blockSize, bool isAsync, bool commit = true)
{
byte[] buffer = GetRandomBuffer(blockSize);
List<string> blocks = GetBlockIdList(blockCount);
using (AutoResetEvent waitHandle = new AutoResetEvent(false))
{
foreach (string block in blocks)
{
using (MemoryStream stream = new MemoryStream(buffer))
{
if (isAsync)
{
IAsyncResult result = blob.BeginPutBlock(block, stream, null,
ar => waitHandle.Set(),
null);
waitHandle.WaitOne();
blob.EndPutBlock(result);
}
else
{
blob.PutBlock(block, stream, null);
}
}
}
if (commit)
{
if (isAsync)
{
IAsyncResult result = blob.BeginPutBlockList(blocks,
ar => waitHandle.Set(),
null);
waitHandle.WaitOne();
blob.EndPutBlockList(result);
}
else
{
blob.PutBlockList(blocks);
}
}
}
}
示例2: WriteBytes
/// <summary>
/// Yes, copying the byte array to here. But given we'll not have many of these tasks going to parallel
/// and each byte array is AT MOST 4M, I think I can live with the memory overhead.
/// </summary>
/// <param name="offset"></param>
/// <param name="bytesRead"></param>
/// <param name="bytesToRead"></param>
/// <param name="blob"></param>
/// <param name="uploadedBlockList"></param>
/// <param name="testMode"></param>
/// <returns></returns>
private Task WriteBytes(long offset, int bytesRead, byte[] bytesToRead, CloudBlockBlob blob, ConcurrentBag<UploadedBlock> uploadedBlockList, bool testMode)
{
var t = Task.Factory.StartNew(() =>
{
var sig = CommonOps.GenerateBlockSig(bytesToRead, offset, (int)bytesRead, 0);
var blockId = Convert.ToBase64String(sig.MD5Signature);
bool isDupe = false;
lock (parallelLock)
{
isDupe = uploadedBlockList.Any(ub => ub.BlockId == blockId);
// store the block id that is associated with this byte range.
uploadedBlockList.Add(new UploadedBlock()
{
BlockId = blockId,
Offset = offset,
Sig = sig,
Size = bytesRead,
IsNew = true,
IsDuplicate = isDupe
});
}
if (!testMode)
{
if (!isDupe)
{
// yes, putting into memory stream is probably a waste here.
using (var ms = new MemoryStream(bytesToRead))
{
var options = new BlobRequestOptions() { ServerTimeout = new TimeSpan(0, 90, 0) };
blob.PutBlock(blockId, ms, null, null, options);
}
}
}
});
return t;
}
示例3: BlockCreate
/// <summary>
/// Create a block with a random name.
/// </summary>
/// <param name="testBlob">The block blob.</param>
/// <param name="testAccessCondition">The access condition.</param>
/// <returns>The name of the new block.</returns>
private string BlockCreate(CloudBlockBlob testBlob, AccessCondition testAccessCondition)
{
byte[] buffer = new byte[4 * 1024];
Random random = new Random();
random.NextBytes(buffer);
string blockId = Guid.NewGuid().ToString("N");
Stream blockData = new MemoryStream(buffer);
testBlob.PutBlock(blockId, blockData, null /* content MD5 */, testAccessCondition, null /* options */);
return blockId;
}
示例4: UploadBlobInChunks
static void UploadBlobInChunks(FileInfo fileInfo, CloudBlockBlob packageBlob, CloudBlobClient blobClient)
{
var operationContext = new OperationContext();
operationContext.ResponseReceived += delegate(object sender, RequestEventArgs args)
{
var statusCode = (int) args.Response.StatusCode;
var statusDescription = args.Response.StatusDescription;
Log.Verbose("Uploading, response received: " + statusCode + " " + statusDescription);
if (statusCode >= 400)
{
Log.Error("Error when uploading the package. Azure returned a HTTP status code of: " +
statusCode + " " + statusDescription);
Log.Verbose("The upload will be retried");
}
};
blobClient.SetServiceProperties(blobClient.GetServiceProperties(), operationContext: operationContext);
Log.VerboseFormat("Uploading the package to blob storage. The package file is {0}.", fileInfo.Length.ToFileSizeString());
using (var fileReader = fileInfo.OpenRead())
{
var blocklist = new List<string>();
long uploadedSoFar = 0;
var data = new byte[1024 * 1024];
var id = 1;
while (true)
{
id++;
var read = fileReader.Read(data, 0, data.Length);
if (read == 0)
{
packageBlob.PutBlockList(blocklist);
break;
}
var blockId = Convert.ToBase64String(Encoding.UTF8.GetBytes(id.ToString(CultureInfo.InvariantCulture).PadLeft(30, '0')));
packageBlob.PutBlock(blockId, new MemoryStream(data, 0, read, true), null);
blocklist.Add(blockId);
uploadedSoFar += read;
Log.VerboseFormat("Uploading package to blob storage: {0} of {1}", uploadedSoFar.ToFileSizeString(), fileInfo.Length.ToFileSizeString());
}
}
Log.Verbose("Upload complete");
}
示例5: AddMessageToBlock
/// <summary>
/// Adds the diagnostic message to block blob.
/// </summary>
/// <param name="blob">The cloud blob.</param>
/// <param name="message">The message.</param>
protected virtual void AddMessageToBlock(CloudBlockBlob blob, string message)
{
Sitecore.Diagnostics.Assert.ArgumentNotNull(blob, "blob");
Sitecore.Diagnostics.Assert.ArgumentNotNull(message, "message");
var blockIds = new List<string>();
if (blob.Exists())
{
blockIds.AddRange(blob.DownloadBlockList().Select(b => b.Name));
}
string blockId = Guid.NewGuid().ToString().Replace("-", string.Empty);
blockIds.Add(blockId);
using (var blockData = new MemoryStream(LogStorageManager.DefaultTextEncoding.GetBytes(message), false))
{
blob.PutBlock(blockId, blockData, null);
blob.PutBlockList(blockIds);
}
}
示例6: ParallelWriteBlockBlob
/// <summary>
/// Upload in parallel.
/// If total size of file is smaller than chunkSize, then simply split length by parallel factor.
/// FIXME: Need to retest this!!!
/// </summary>
/// <param name="stream"></param>
/// <param name="blob"></param>
/// <param name="parallelFactor"></param>
/// <param name="chunkSizeInMB"></param>
private void ParallelWriteBlockBlob(Stream stream, CloudBlockBlob blob, int parallelFactor, int chunkSizeInMB)
{
long chunkSize = chunkSizeInMB * 1024 * 1024;
var length = stream.Length;
if (chunkSize > length)
{
chunkSize = length / parallelFactor;
}
var numberOfBlocks = (length / chunkSize) + 1;
var blockIdList = new string[numberOfBlocks];
var chunkSizeList = new int[numberOfBlocks];
var taskList = new List<Task>();
var count = numberOfBlocks - 1;
// read the data... spawn a task to launch... then wait for all.
while (count >= 0)
{
while (count >= 0 && taskList.Count < parallelFactor)
{
var index = (numberOfBlocks - count - 1);
var chunkSizeToUpload = (int)Math.Min(chunkSize, length - (index * chunkSize));
// only upload if we have data to give.
// edge case where we already have uploaded all the data.
if (chunkSizeToUpload > 0)
{
chunkSizeList[index] = chunkSizeToUpload;
var dataBuffer = new byte[chunkSizeToUpload];
stream.Seek(index * chunkSize, SeekOrigin.Begin);
stream.Read(dataBuffer, 0, chunkSizeToUpload);
var t = Task.Factory.StartNew(() =>
{
var tempCount = index;
var uploadSize = chunkSizeList[tempCount];
var newBuffer = new byte[uploadSize];
Array.Copy(dataBuffer, newBuffer, dataBuffer.Length);
var blockId = Convert.ToBase64String(Guid.NewGuid().ToByteArray());
using (var memStream = new MemoryStream(newBuffer, 0, uploadSize))
{
blob.PutBlock(blockId, memStream, null);
}
blockIdList[tempCount] = blockId;
});
taskList.Add(t);
}
count--;
}
var waitedIndex = Task.WaitAny(taskList.ToArray());
if (waitedIndex >= 0)
{
taskList.RemoveAt(waitedIndex);
}
}
Task.WaitAll(taskList.ToArray());
blob.PutBlockList(blockIdList.Where(t => t != null));
}