本文整理汇总了Python中azure.storage.BlobService.put_block_list方法的典型用法代码示例。如果您正苦于以下问题:Python BlobService.put_block_list方法的具体用法?Python BlobService.put_block_list怎么用?Python BlobService.put_block_list使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类azure.storage.BlobService
的用法示例。
在下文中一共展示了BlobService.put_block_list方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: uri_put_file
# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_block_list [as 别名]
def uri_put_file(creds, uri, fp, content_encoding=None):
assert fp.tell() == 0
assert uri.startswith('wabs://')
def log_upload_failures_on_error(exc_tup, exc_processor_cxt):
def standard_detail_message(prefix=''):
return (prefix + ' There have been {n} attempts to upload '
'file {url} so far.'.format(n=exc_processor_cxt, url=uri))
typ, value, tb = exc_tup
del exc_tup
# Screen for certain kinds of known-errors to retry from
if issubclass(typ, socket.error):
socketmsg = value[1] if isinstance(value, tuple) else value
logger.info(
msg='Retrying upload because of a socket error',
detail=standard_detail_message(
"The socket error's message is '{0}'."
.format(socketmsg)))
else:
# For all otherwise untreated exceptions, report them as a
# warning and retry anyway -- all exceptions that can be
# justified should be treated and have error messages
# listed.
logger.warning(
msg='retrying file upload from unexpected exception',
detail=standard_detail_message(
'The exception type is {etype} and its value is '
'{evalue} and its traceback is {etraceback}'
.format(etype=typ, evalue=value,
etraceback=''.join(traceback.format_tb(tb)))))
# Help Python GC by resolving possible cycles
del tb
# Because we're uploading in chunks, catch rate limiting and
# connection errors which occur for each individual chunk instead of
# failing the whole file and restarting.
@retry(retry_with_count(log_upload_failures_on_error))
def upload_chunk(chunk, block_id):
check_sum = base64.encodestring(md5(chunk).digest()).strip('\n')
conn.put_block(url_tup.netloc, url_tup.path, chunk,
block_id, content_md5=check_sum)
url_tup = urlparse(uri)
kwargs = dict(x_ms_blob_type='BlockBlob')
if content_encoding is not None:
kwargs['x_ms_blob_content_encoding'] = content_encoding
conn = BlobService(creds.account_name, creds.account_key, protocol='https')
conn.put_blob(url_tup.netloc, url_tup.path, '', **kwargs)
# WABS requires large files to be uploaded in 4MB chunks
block_ids = []
length, index = 0, 0
pool_size = os.getenv('WABS_UPLOAD_POOL_SIZE', 5)
p = gevent.pool.Pool(size=pool_size)
while True:
data = fp.read(WABS_CHUNK_SIZE)
if data:
length += len(data)
block_id = base64.b64encode(str(index))
p.wait_available()
p.spawn(upload_chunk, data, block_id)
block_ids.append(block_id)
index += 1
else:
p.join()
break
conn.put_block_list(url_tup.netloc, url_tup.path, block_ids)
# To maintain consistency with the S3 version of this function we must
# return an object with a certain set of attributes. Currently, that set
# of attributes consists of only 'size'
return _Key(size=len(data))
示例2: AzureFS
# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_block_list [as 别名]
#.........这里部分代码省略.........
if fh not in self.fds:
raise FuseOSError(EIO)
path = self.fds[fh][0]
data = self.fds[fh][1]
dirty = self.fds[fh][2]
if not dirty:
return 0 # avoid redundant write
d, f = self._parse_path(path)
c_name = self.parse_container(path)
if data is None:
data = ''
try:
if len(data) < 64 * 1024 * 1024: # 64 mb
self.blobs.put_blob(c_name, f, data, 'BlockBlob')
else:
# divide file by blocks and upload
block_size = 8 * 1024 * 1024
num_blocks = int(math.ceil(len(data) * 1.0 / block_size))
rd = str(random.randint(1, 1e8))
block_ids = list()
for i in range(num_blocks):
part = data[i * block_size:min((i + 1) * block_size,
len(data))]
block_id = base64.encodestring('%s_%s' % (rd,
(8 - len(str(i))) * '0' + str(i)))
self.blobs.put_block(c_name, f, part, block_id)
block_ids.append(block_id)
self.blobs.put_block_list(c_name, f, block_ids)
except WindowsAzureError:
raise FuseOSError(EAGAIN)
dir = self._get_dir(d, True)
if not dir or f not in dir['files']:
raise FuseOSError(EIO)
# update local data
dir['files'][f]['st_size'] = len(data)
dir['files'][f]['st_mtime'] = time.time()
self.fds[fh] = (path, data, False) # mark as not dirty
return 0
def release(self, path, fh=None):
if fh is not None and fh in self.fds:
del self.fds[fh]
def truncate(self, path, length, fh=None):
return 0 # assume done, no need
def write(self, path, data, offset, fh=None):
if not fh or fh not in self.fds:
raise FuseOSError(ENOENT)
else:
d = self.fds[fh][1]
if d is None:
d = ""
self.fds[fh] = (self.fds[fh][0], d[:offset] + data, True)
return len(data)
def unlink(self, path):
c_name = self.parse_container(path)