本文整理汇总了Python中azure.storage.blob.BlobService.get_blob方法的典型用法代码示例。如果您正苦于以下问题:Python BlobService.get_blob方法的具体用法?Python BlobService.get_blob怎么用?Python BlobService.get_blob使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类azure.storage.blob.BlobService
的用法示例。
在下文中一共展示了BlobService.get_blob方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: uri_get_file
# 需要导入模块: from azure.storage.blob import BlobService [as 别名]
# 或者: from azure.storage.blob.BlobService import get_blob [as 别名]
def uri_get_file(creds, uri, conn=None):
assert uri.startswith('wabs://')
url_tup = urlparse(uri)
if conn is None:
conn = BlobService(creds.account_name, creds.account_key,
sas_token=creds.access_token, protocol='https')
# Determin the size of the target blob
props = conn.get_blob_properties(url_tup.netloc, url_tup.path.lstrip('/'))
blob_size = int(props['content-length'])
ret_size = 0
data = io.BytesIO()
# WABS requires large files to be downloaded in 4MB chunks
while ret_size < blob_size:
ms_range = 'bytes={0}-{1}'.format(ret_size,
ret_size + WABS_CHUNK_SIZE - 1)
while True:
# Because we're downloading in chunks, catch rate limiting and
# connection errors here instead of letting them bubble up to the
# @retry decorator so that we don't have to start downloading the
# whole file over again.
try:
part = conn.get_blob(url_tup.netloc,
url_tup.path.lstrip('/'),
x_ms_range=ms_range)
except EnvironmentError as e:
if e.errno in (errno.EBUSY, errno.ECONNRESET):
logger.warning(
msg="retrying after encountering exception",
detail=("Exception traceback:\n{0}".format(
traceback.format_exception(*sys.exc_info()))),
hint="")
gevent.sleep(30)
else:
raise
else:
break
length = len(part)
ret_size += length
data.write(part)
if length > 0 and length < WABS_CHUNK_SIZE:
break
elif length == 0:
break
return data.getvalue()
示例2: set_azure_details
# 需要导入模块: from azure.storage.blob import BlobService [as 别名]
# 或者: from azure.storage.blob.BlobService import get_blob [as 别名]
def set_azure_details(blob_name, download_name, container_name=AZURE_CONTAINER):
try:
blob_service = BlobService(AZURE_ACCOUNT_NAME, AZURE_ACCOUNT_KEY)
blob = blob_service.get_blob(container_name, blob_name)
if blob:
blob_service.set_blob_properties(
container_name,
blob_name,
x_ms_blob_content_type='audio/mpeg',
x_ms_blob_content_disposition='attachment;filename="{0}"'.format(download_name)
)
print("Processed: %s" % download_name)
else:
print("No blob found for: %s" % download_name)
except AzureMissingResourceHttpError:
print("No blob found for: %s" % download_name)
except Exception as ex:
print("Error processing blob %s: %s" % (download_name, ex))
示例3: AzureFS
# 需要导入模块: from azure.storage.blob import BlobService [as 别名]
# 或者: from azure.storage.blob.BlobService import get_blob [as 别名]
#.........这里部分代码省略.........
c_name = path[1:]
resp = self.blobs.delete_container(c_name)
if resp:
if path in self.containers:
del self.containers[path]
else:
raise FuseOSError(EACCES)
else:
raise FuseOSError(ENOSYS) # TODO support 2nd+ level mkdirs
def create(self, path, mode):
node = dict(st_mode=(S_IFREG | mode), st_size=0, st_nlink=1,
st_uid=getuid(), st_mtime=time.time())
d, f = self._parse_path(path)
if not f:
log.error("Cannot create files on root level: /")
raise FuseOSError(ENOSYS)
dir = self._get_dir(d, True)
if not dir:
raise FuseOSError(EIO)
dir['files'][f] = node
return self.open(path, data='') # reusing handler provider
def open(self, path, flags=0, data=None):
if data == None: # download contents
c_name = self.parse_container(path)
f_name = path[path.find('/', 1) + 1:]
try:
data = self.blobs.get_blob(c_name, f_name)
except AzureMissingResourceHttpError:
dir = self._get_dir('/' + c_name, True)
if f_name in dir['files']:
del dir['files'][f_name]
raise FuseOSError(ENOENT)
except AzureException as e:
log.error("Read blob failed HTTP %d" % e.code)
raise FuseOSError(EAGAIN)
self.fd += 1
self.fds[self.fd] = (path, data, False)
return self.fd
def flush(self, path, fh=None):
if not fh:
raise FuseOSError(EIO)
else:
if fh not in self.fds:
raise FuseOSError(EIO)
path = self.fds[fh][0]
data = self.fds[fh][1]
dirty = self.fds[fh][2]
if not dirty:
return 0 # avoid redundant write
d, f = self._parse_path(path)
c_name = self.parse_container(path)
if data is None:
data = ''
示例4: AzureFS
# 需要导入模块: from azure.storage.blob import BlobService [as 别名]
# 或者: from azure.storage.blob.BlobService import get_blob [as 别名]
#.........这里部分代码省略.........
args=(self.blobs, cname, files),
name="list-blobs/%s" % cname)
process.daemon = True
process.start()
container['process'] = process
log.info("Started blob list retrieval for '%s': %s",
cname, process)
container['files'] = files
return container
def _get_file(self, path):
d, f = self._parse_path(path)
log.debug("get_file: requested path=%s (d=%s, f=%s)", path, d, f)
directory = self._get_dir(d, True)
files = None
if directory is not None:
files = directory['files']
if f in files:
return files[f]
if not hasattr(self, "_get_file_noent"):
self._get_file_noent = {}
last_check = self._get_file_noent.get(path, 0)
if time.time() - last_check <= 30:
# Negative TTL is 30 seconds (hardcoded for now)
log.info("get_file: cache says to reply negative for %s", path)
return None
# Check if file now exists and our caches are just stale.
try:
c = self._parse_container(d)
p = path[path.find('/', 1) + 1:]
props = self.blobs.get_blob_properties(c, p)
log.info("get_file: found locally unknown remote file %s: %s",
path, repr(props))
node = make_stat(stat.S_IFREG | 0644, props)
if node['st_size'] > 0:
log.info("get_file: properties for %s: %s", path, repr(node))
# Remember this, so we won't have to re-query it.
files[f] = node
if path in self._get_file_noent:
del self._get_file_noent[path]
return node
else:
# TODO: FIXME: HACK: We currently ignore empty files.
# Sometimes the file is not yet here and is still uploading.
# Such files have "content-length: 0". Ignore those for now.
log.warning("get_file: the file %s is not yet here (size=%s)",
path, node['st_size'])
self._get_file_noent[path] = time.time()
return None
except AzureMissingResourceHttpError:
log.info("get_file: remote confirms non-existence of %s", path)
self._get_file_noent[path] = time.time()
return None
except AzureException as e:
log.error("get_file: exception while querying remote for %s: %s",
path, repr(e))
self._get_file_noent[path] = time.time()
return None
def getattr(self, path, fh=None):