本文整理匯總了Golang中github.com/Azure/azure-sdk-for-go/storage.BlobStorageClient類的典型用法代碼示例。如果您正苦於以下問題:Golang BlobStorageClient類的具體用法?Golang BlobStorageClient怎麽用?Golang BlobStorageClient使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了BlobStorageClient類的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: validate
func validate(cli storage.BlobStorageClient, blob string, startByte, endByte int64, data []byte) error {
url := cli.GetBlobURL(cnt, blob)
reader, err := cli.GetBlob(cnt, blob)
if err != nil {
return fmt.Errorf("Failed to read from %s: %s\n", url, err.Error())
}
defer reader.Close()
dataRead, err := ioutil.ReadAll(reader)
if err != nil {
return fmt.Errorf("Failed to read from %s: %s\n", url, err.Error())
}
same := true
for i := startByte; i <= endByte; i++ {
if data[i] != dataRead[i] {
same = false
}
}
if !same {
return fmt.Errorf("Failed to read data properly from %s: %s\n", url, err.Error())
}
return nil
}
示例2: putBlockBlob
// PutBlockBlob uploads given stream into a block blob by splitting
// data stream into chunks and uploading as blocks. Commits the block
// list at the end. This is a helper method built on top of PutBlock
// and PutBlockList methods with sequential block ID counting logic.
func putBlockBlob(b storage.BlobStorageClient, container, name string, blob io.Reader, chunkSize int) error {
if chunkSize <= 0 || chunkSize > storage.MaxBlobBlockSize {
chunkSize = storage.MaxBlobBlockSize
}
chunk := make([]byte, chunkSize)
n, err := blob.Read(chunk)
if err != nil && err != io.EOF {
return err
}
blockList := []storage.Block{}
for blockNum := 0; ; blockNum++ {
id := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%011d", blockNum)))
data := chunk[:n]
err = b.PutBlock(container, name, id, data)
if err != nil {
return err
}
blockList = append(blockList, storage.Block{id, storage.BlockStatusLatest})
// Read next block
n, err = blob.Read(chunk)
if err != nil && err != io.EOF {
return err
}
if err == io.EOF {
break
}
}
return b.PutBlockList(container, name, blockList)
}
示例3: clearPage
func clearPage(cli storage.BlobStorageClient, name string, startByte, endByte int64) error {
if err := cli.PutPage(cnt, name, startByte, endByte, storage.PageWriteTypeClear, nil); err != nil {
url := cli.GetBlobURL(cnt, name)
fmt.Printf("Failed to clear pages of %s: %s\n", url, err.Error())
return err
}
return nil
}
示例4: writePage
func writePage(cli storage.BlobStorageClient, name string, startByte, endByte int64, chunk []byte) error {
if err := cli.PutPage(cnt, name, startByte, endByte, storage.PageWriteTypeUpdate, chunk); err != nil {
url := cli.GetBlobURL(cnt, name)
fmt.Printf("Failed to write pages to %s: %s\n", url, err.Error())
return err
}
return nil
}
示例5: resourceArmStorageBlobPageUploadFromSource
func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
workerCount := parallelism * runtime.NumCPU()
file, err := os.Open(source)
if err != nil {
return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
}
defer file.Close()
blobSize, pageList, err := resourceArmStorageBlobPageSplit(file)
if err != nil {
return fmt.Errorf("Error splitting source file %q into pages: %s", source, err)
}
if err := client.PutPageBlob(container, name, blobSize, map[string]string{}); err != nil {
return fmt.Errorf("Error creating storage blob on Azure: %s", err)
}
pages := make(chan resourceArmStorageBlobPage, len(pageList))
errors := make(chan error, len(pageList))
wg := &sync.WaitGroup{}
wg.Add(len(pageList))
total := int64(0)
for _, page := range pageList {
total += page.section.Size()
pages <- page
}
close(pages)
for i := 0; i < workerCount; i++ {
go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{
container: container,
name: name,
source: source,
blobSize: blobSize,
client: client,
pages: pages,
errors: errors,
wg: wg,
attempts: attempts,
})
}
wg.Wait()
if len(errors) > 0 {
return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
}
return nil
}
示例6: resourceArmStorageBlobBlockUploadFromSource
func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
workerCount := parallelism * runtime.NumCPU()
file, err := os.Open(source)
if err != nil {
return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
}
defer file.Close()
blockList, parts, err := resourceArmStorageBlobBlockSplit(file)
if err != nil {
return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err)
}
wg := &sync.WaitGroup{}
blocks := make(chan resourceArmStorageBlobBlock, len(parts))
errors := make(chan error, len(parts))
wg.Add(len(parts))
for _, p := range parts {
blocks <- p
}
close(blocks)
for i := 0; i < workerCount; i++ {
go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{
client: client,
source: source,
container: container,
name: name,
blocks: blocks,
errors: errors,
wg: wg,
attempts: attempts,
})
}
wg.Wait()
if len(errors) > 0 {
return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
}
err = client.PutBlockList(container, name, blockList)
if err != nil {
return fmt.Errorf("Error updating block list for source file %q: %s", source, err)
}
return nil
}
示例7: createBlockBlob
func createBlockBlob(cli storage.BlobStorageClient, k string, b []byte) error {
var err error
if c.Conf.GZIP {
if b, err = gz(b); err != nil {
return err
}
k = k + ".gz"
}
if err := cli.CreateBlockBlobFromReader(
c.Conf.AzureContainer,
k,
uint64(len(b)),
bytes.NewReader(b),
map[string]string{},
); err != nil {
return fmt.Errorf("Failed to upload data to %s/%s, %s",
c.Conf.AzureContainer, k, err)
}
return nil
}