当前位置: 首页>>代码示例>>Python>>正文


Python BlobService.put_blob方法代码示例

本文整理汇总了Python中azure.storage.BlobService.put_blob方法的典型用法代码示例。如果您正苦于以下问题:Python BlobService.put_blob方法的具体用法?Python BlobService.put_blob怎么用?Python BlobService.put_blob使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在azure.storage.BlobService的用法示例。


在下文中一共展示了BlobService.put_blob方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: store

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]
def store(image, entity, entity_id):
    blob_service = BlobService(account_name='shnergledata',
                               account_key=os.environ['BLOB_KEY'])
    myblob = image.read()
    name = '/' + entity + '/' + entity_id
    blob_service.put_blob('images', name, myblob, x_ms_blob_type='BlockBlob')
    return True
开发者ID:ernestoluisrojas,项目名称:ShnergleServer,代码行数:9,代码来源:azureutil.py

示例2: test_azure_call

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]
def test_azure_call(request):
    import os
    try:
        from azure.storage import BlobService
        bs = BlobService(os.environ["AZURE_STORAGE_ACCOUNT"], os.environ["AZURE_STORAGE_ACCESS_KEY"])
        import random
        container_name = hex(int(random.random() * 1000000000))

        bs.create_container(container_name)
        bs.put_blob(container_name, 'testblob', 'hello world\n', 'BlockBlob')
        blob = bs.get_blob(container_name, 'testblob')
        if blob != 'hello world\n':
            return HttpResponse("Failed!", status = '404')
        
        bs.delete_blob(container_name, 'testblob')
        bs.delete_container(container_name)

        return HttpResponse("Succeeded!")
    except:
        try:
            import traceback
        
            return HttpResponse(traceback.format_exc() + str(os.environ.keys()))
        except:
            import traceback
            return HttpResponse(traceback.format_exc())
开发者ID:KuduApps,项目名称:PythonApp,代码行数:28,代码来源:test_urls.py

示例3: uri_put_file

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]
def uri_put_file(access_key, secret_key, uri, fp, content_encoding=None):
    assert fp.tell() == 0
    data = fp.read()

    assert uri.startswith('wabs://')
    url_tup = urlparse(uri)
    check_sum = base64.encodestring(md5(data).digest())
    kwargs = dict(x_ms_blob_type='BlockBlob',
                  content_md5=check_sum.strip('\n'))
    if content_encoding is not None:
        kwargs['x_ms_blob_content_encoding'] = content_encoding

    conn = BlobService(access_key, secret_key, protocol='https')
    conn.put_blob(url_tup.netloc, url_tup.path, data, **kwargs)
    # To maintain consistency with the S3 version of this function we must
    # return an object with a certain set of attributes.  Currently, that set
    # of attributes consists of only 'size'
    return _Key(size=len(data))
开发者ID:runway20,项目名称:wal-e,代码行数:20,代码来源:wabs_util.py

示例4: uri_put_file

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]
def uri_put_file(creds, uri, fp, content_encoding=None):
    assert fp.tell() == 0
    assert uri.startswith('wabs://')

    def log_upload_failures_on_error(exc_tup, exc_processor_cxt):
        def standard_detail_message(prefix=''):
            return (prefix + '  There have been {n} attempts to upload  '
                    'file {url} so far.'.format(n=exc_processor_cxt, url=uri))
        typ, value, tb = exc_tup
        del exc_tup

        # Screen for certain kinds of known-errors to retry from
        if issubclass(typ, socket.error):
            socketmsg = value[1] if isinstance(value, tuple) else value

            logger.info(
                msg='Retrying upload because of a socket error',
                detail=standard_detail_message(
                    "The socket error's message is '{0}'."
                    .format(socketmsg)))
        else:
            # For all otherwise untreated exceptions, report them as a
            # warning and retry anyway -- all exceptions that can be
            # justified should be treated and have error messages
            # listed.
            logger.warning(
                msg='retrying file upload from unexpected exception',
                detail=standard_detail_message(
                    'The exception type is {etype} and its value is '
                    '{evalue} and its traceback is {etraceback}'
                    .format(etype=typ, evalue=value,
                            etraceback=''.join(traceback.format_tb(tb)))))

        # Help Python GC by resolving possible cycles
        del tb

    # Because we're uploading in chunks, catch rate limiting and
    # connection errors which occur for each individual chunk instead of
    # failing the whole file and restarting.
    @retry(retry_with_count(log_upload_failures_on_error))
    def upload_chunk(chunk, block_id):
        check_sum = base64.encodestring(md5(chunk).digest()).strip('\n')
        conn.put_block(url_tup.netloc, url_tup.path, chunk,
                       block_id, content_md5=check_sum)

    url_tup = urlparse(uri)
    kwargs = dict(x_ms_blob_type='BlockBlob')
    if content_encoding is not None:
        kwargs['x_ms_blob_content_encoding'] = content_encoding

    conn = BlobService(creds.account_name, creds.account_key, protocol='https')
    conn.put_blob(url_tup.netloc, url_tup.path, '', **kwargs)

    # WABS requires large files to be uploaded in 4MB chunks
    block_ids = []
    length, index = 0, 0
    pool_size = os.getenv('WABS_UPLOAD_POOL_SIZE', 5)
    p = gevent.pool.Pool(size=pool_size)
    while True:
        data = fp.read(WABS_CHUNK_SIZE)
        if data:
            length += len(data)
            block_id = base64.b64encode(str(index))
            p.wait_available()
            p.spawn(upload_chunk, data, block_id)
            block_ids.append(block_id)
            index += 1
        else:
            p.join()
            break

    conn.put_block_list(url_tup.netloc, url_tup.path, block_ids)

    # To maintain consistency with the S3 version of this function we must
    # return an object with a certain set of attributes.  Currently, that set
    # of attributes consists of only 'size'
    return _Key(size=len(data))
开发者ID:boldfield,项目名称:wal-e,代码行数:79,代码来源:wabs_util.py

示例5: AzureBlobStorage

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]

#.........这里部分代码省略.........
        else:
            return result

    def get_available_name(self, name):
        return super(AzureBlobStorage, self).get_available_name(name.replace('\\', '/'))

    def get_valid_name(self, name):

        return name

    def _list(self, path, prefix, maxresults):
        result = []
        blobs = self.blob.list_blobs(path, prefix, maxresults)

        for _blob in blobs:
            result.append(_blob.name)

        return result

    def listdir(self, path=None, prefix=None, maxresults=None):
        """
        Catalog file list.
        """
        if not path:
            path = self.container
        return [], self._list(path, prefix, maxresults)

    def size(self, name):
        """
        File size.
        """

        result, properties = self.exists(name, with_properties=True)

        if result:
            return int(properties['content-length'])
        else:
            return 0

    def url(self, name, chk_exist=False):
        """
        URL for file downloading.
        """

        if chk_exist:
            if self.exists(name):
                return '%s%s/%s' % (self.base_url, self.container, name)
            else:
                return None
        else:
            return '%s%s/%s' % (self.base_url, self.container, name)

    def _open(self, name, mode='rb'):
        """
        Open file.
        """

        in_mem_file = StringIO.StringIO(
            self.blob.get_blob(self.container, name))
        in_mem_file.name = name
        in_mem_file.mode = mode
        return File(in_mem_file)

    def _save(self, name, blob_to_upload, x_ms_blob_type='BlockBlob', content_type=None):
        """
        Save file.
        """

        if hasattr(blob_to_upload, 'content_type'):
            content_type = blob_to_upload.content_type or None

        if content_type is None:
            content_type = mimetypes.guess_type(name)[0] or None

        blob_to_upload.seek(0)

        self.blob.put_blob(self.container, name, blob_to_upload,
                           x_ms_blob_type, x_ms_blob_content_type=content_type)

        return name

    def modified_time(self, name):
        """
        Last modification time.
        """

        result, properties = self.exists(name, with_properties=True)

        if result:
            date_string = properties['last-modified']
            modified_dt = parser.parse(date_string)

            if timezone.is_naive(modified_dt):
                return modified_dt
            else:
                return timezone.make_naive(modified_dt, timezone.get_current_timezone())
        else:
            return None

    created_time = accessed_time = modified_time
开发者ID:jacobis,项目名称:couple-mission,代码行数:104,代码来源:storage.py

示例6: generate_and_upload

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]
def generate_and_upload(gauge_factory, config, logger):
    start = datetime.datetime.now()
    twitter_followers = gauge_factory('twitter.followers')
    twitter_tweets = gauge_factory('twitter.tweets')
    fb_friends = gauge_factory('facebook.friends')
    foursq_checkins = gauge_factory('foursquare.checkins')
    klout_score = gauge_factory('klout.score')
    runkeeper_activities = gauge_factory('runkeeper.activities')
    runkeeper_calories = gauge_factory('runkeeper.calories_burned')
    runkeeper_weight = gauge_factory('runkeeper.weight')
    tmp102_celsius = gauge_factory('tmp102.te  mperature', gauge_type='hourly')
    lastfm_listened = gauge_factory('lastfm.listened')
    jawbone_sleeps = gauge_factory('jawbone.sleeps')
    jawbone_steps = gauge_factory('jawbone.steps')
    jawbone_caffeine = gauge_factory('jawbone.caffeine')

    data = {}
    data_sources = [
        # (output key, gauge, days back, aggregator, postprocessors)
        ('twitter.followers', twitter_followers, 30, None,
            [zero_fill_daily, interpolators.linear]),
        ('twitter.tweets', twitter_tweets, 20, None, [zero_fill_daily]),
        ('facebook.friends', fb_friends, 180, monthly_max, None),
        ('foursquare.checkins', foursq_checkins, 14, None, [zero_fill_daily]),
        ('lastfm.listened', lastfm_listened, 14, None, [zero_fill_daily]),
        ('klout.score', klout_score, 30, weekly_max, [zero_fill_weekly,
                                                      interpolators.linear]),
        ('runkeeper.calories', runkeeper_calories, 60, weekly_sum,
            [zero_fill_weekly]),
        ('runkeeper.activities', runkeeper_activities, 60,weekly_sum,
            [zero_fill_weekly]),
        ('runkeeper.weight', runkeeper_weight, 180,weekly_min,
            [zero_fill_weekly, interpolators.linear]),
        ('sleeps', jawbone_sleeps, 14, None, [zero_fill_daily,
            interpolators.linear]),
        ('steps', jawbone_steps, 14, None, [zero_fill_daily,
            interpolators.linear]),
        ('caffeine', jawbone_caffeine, 30, None, [zero_fill_daily]),
        ('tmp102.temperature', tmp102_celsius, 2.5, None, None)
    ]

    for ds in data_sources:
        data[ds[0]] = ds[1].aggregate(today_utc() - timedelta(days=ds[2]),
                                      aggregator=ds[3],
                                      post_processors=ds[4])

    report = {
        'generated': str(now_utc()),
        'data': data,
        'took': (datetime.datetime.now() - start).seconds
    }
    report_json = json.dumps(report, indent=4, default=json_date_serializer)
    report_content = '{0}({1})'.format(JSONP_CALLBACK_NAME, report_json)
    
    blob_service = BlobService(config['azure.account'], config['azure.key'])
    blob_service.create_container(config['azure.blob.container'])
    blob_service.set_container_acl(config['azure.blob.container'],
                                   x_ms_blob_public_access='container')
    blob_service.put_blob(config['azure.blob.container'],
                          config['azure.blob.name'], report_content, 'BlockBlob')

    took = (datetime.datetime.now() - start).seconds
    logger.info('Report generated and uploaded. Took {0} s.'.format(took))
开发者ID:JonathanGitHub,项目名称:personal-dashboard,代码行数:65,代码来源:reporting.py

示例7: AzureFS

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]

#.........这里部分代码省略.........
                if f_name in dir['files']:
                    del dir['files'][f_name]
                raise FuseOSError(ENOENT)
            except WindowsAzureError as e:
                log.error("Read blob failed HTTP %d" % e.code)
                raise FuseOSError(EAGAIN)

        self.fd += 1
        self.fds[self.fd] = (path, data, False)

        return self.fd

    def flush(self, path, fh=None):
        if not fh:
            raise FuseOSError(EIO)
        else:
            if fh not in self.fds:
                raise FuseOSError(EIO)
            path = self.fds[fh][0]
            data = self.fds[fh][1]
            dirty = self.fds[fh][2]

            if not dirty:
                return 0     # avoid redundant write

            d, f = self._parse_path(path)
            c_name = self.parse_container(path)

            if data is None:
                data = ''

            try:
                if len(data) < 64 * 1024 * 1024:   # 64 mb
                    self.blobs.put_blob(c_name, f, data, 'BlockBlob')
                else:
                    # divide file by blocks and upload
                    block_size = 8 * 1024 * 1024
                    num_blocks = int(math.ceil(len(data) * 1.0 / block_size))
                    rd = str(random.randint(1, 1e8))
                    block_ids = list()

                    for i in range(num_blocks):
                        part = data[i * block_size:min((i + 1) * block_size,
                            len(data))]
                        block_id = base64.encodestring('%s_%s' % (rd,
                            (8 - len(str(i))) * '0' + str(i)))
                        self.blobs.put_block(c_name, f, part, block_id)
                        block_ids.append(block_id)

                    self.blobs.put_block_list(c_name, f, block_ids)
            except WindowsAzureError:
                raise FuseOSError(EAGAIN)

            dir = self._get_dir(d, True)
            if not dir or f not in dir['files']:
                raise FuseOSError(EIO)

            # update local data
            dir['files'][f]['st_size'] = len(data)
            dir['files'][f]['st_mtime'] = time.time()
            self.fds[fh] = (path, data, False)  # mark as not dirty
            return 0

    def release(self, path, fh=None):
        if fh is not None and fh in self.fds:
            del self.fds[fh]
开发者ID:QuasarSE,项目名称:azurefs,代码行数:70,代码来源:azurefs.py

示例8: AzureStorageBlockDeviceAPI

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]

#.........这里部分代码省略.........
        common_params = {
            'service_name': self._service_name,
            'deployment_name': self._service_name,
            'role_name': attach_to,
            'lun': lun
        }
        disk_size = None

        if target_disk.__class__.__name__ == 'Blob':
            # exclude 512 byte footer
            disk_size = target_disk.properties.content_length

            common_params['source_media_link'] = \
                'https://' + self._storage_account_name \
                + '.blob.core.windows.net/' + self._disk_container_name \
                + '/' + blockdevice_id

            common_params['disk_label'] = blockdevice_id

        else:

            disk_size = self._gibytes_to_bytes(
                target_disk.logical_disk_size_in_gb)

            common_params['disk_name'] = target_disk.name

        request = self._azure_service_client.add_data_disk(**common_params)
        self._wait_for_async(request.request_id, 5000)

        return disk_size

    def _create_volume_blob(self, size, dataset_id):
        # Create a new page blob as a blank disk
        self._azure_storage_client.put_blob(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            blob=None,
            x_ms_blob_type='PageBlob',
            x_ms_blob_content_type='application/octet-stream',
            x_ms_blob_content_length=size)

        # for disk to be a valid vhd it requires a vhd footer
        # on the last 512 bytes
        vhd_footer = Vhd.generate_vhd_footer(size)

        self._azure_storage_client.put_page(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            page=vhd_footer,
            x_ms_page_write='update',
            x_ms_range='bytes=' + str((size - 512)) + '-' + str(size - 1))

    def _disk_label_for_dataset_id(self, dataset_id):
        """
        Returns a disk label for a given Dataset ID
        :param unicode dataset_id: The identifier of the dataset
        :returns string: A string representing the disk label
        """

        label = 'flocker-' + str(dataset_id)
        return label

    def _dataset_id_for_disk_label(self, disk_label):
        """
        Returns a UUID representing the Dataset ID for the given disk
        label
开发者ID:FlorianLudwig,项目名称:azure-flocker-driver,代码行数:70,代码来源:azure_storage_driver.py

示例9: Command

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]

#.........这里部分代码省略.........
        self.wipe = options.get('wipe')
        self.test_run = options.get('test_run')
        self.verbosity = int(options.get('verbosity'))
        if hasattr(options, 'container'):
            self.STATIC_CONTAINER = options.get('container')
        self.sync_files()

    def sync_files(self):
        self.service = BlobService(account_name=self.ACCOUNT_NAME,
            account_key=self.ACCOUNT_KEY)

        try:
            self.service.get_container_properties(self.STATIC_CONTAINER)
        except WindowsAzureMissingResourceError:
            self.service.create_container(self.STATIC_CONTAINER,
                x_ms_blob_public_access='blob')

        self.service.set_container_acl(self.STATIC_CONTAINER, x_ms_blob_public_access='blob')

        # if -w option is provided, wipe out the contents of the container
        if self.wipe:
            blob_count = len(self.service.list_blobs(self.STATIC_CONTAINER))

            if self.test_run:
                print "Wipe would delete %d objects." % blob_count
            else:
                print "Deleting %d objects..." % blob_count
                for blob in self.service.list_blobs(self.STATIC_CONTAINER):
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)

        # walk through the directory, creating or updating files on the cloud
        os.path.walk(self.DIRECTORY, self.upload_files, "foo")

        # remove any files on remote that don't exist locally
        self.delete_files()

        # print out the final tally to the cmd line
        self.update_count = self.upload_count - self.create_count
        print
        if self.test_run:
            print "Test run complete with the following results:"
        print "Skipped %d. Created %d. Updated %d. Deleted %d." % (
            self.skip_count, self.create_count, self.update_count, self.delete_count)

    def upload_files(self, arg, dirname, names):
        # upload or skip items
        for item in names:
            file_path = os.path.join(dirname, item)
            if os.path.isdir(file_path):
                continue # Don't try to upload directories

            object_name = self.STATIC_URL + file_path.split(self.DIRECTORY)[1]
            self.local_object_names.append(object_name)

            try:
                properties = self.service.get_blob_properties(self.STATIC_CONTAINER,
                    object_name)
            except WindowsAzureMissingResourceError:
                properties = {}
                self.create_count += 1

            cloud_datetime = None
            if 'last-modified' in properties:
                cloud_datetime = (properties['last-modified'] and
                                  datetime.datetime.strptime(
                                    properties['last-modified'],
                                    "%a, %d %b %Y %H:%M:%S %Z"
                                  ) or None)

            local_datetime = datetime.datetime.utcfromtimestamp(
                                               os.stat(file_path).st_mtime)

            if cloud_datetime and local_datetime < cloud_datetime:
                self.skip_count += 1
                if self.verbosity > 1:
                    print "Skipped %s: not modified." % object_name
                continue

            if not self.test_run:
                file_contents = open(file_path, 'r').read()
                content_type, encoding = mimetypes.guess_type(file_path)
                print "content-type", content_type
                print "encoding", encoding
                self.service.put_blob(self.STATIC_CONTAINER, object_name, file_contents,
                    x_ms_blob_type='BlockBlob', x_ms_blob_content_type=content_type,
                    content_encoding=encoding)
                # sync_headers(cloud_obj)
            self.upload_count += 1
            if self.verbosity > 1:
                print "Uploaded", object_name

    def delete_files(self):
        # remove any objects in the container that don't exist locally
        for blob in self.service.list_blobs(self.STATIC_CONTAINER):
            if blob.name not in self.local_object_names:
                self.delete_count += 1
                if self.verbosity > 1:
                    print "Deleted %s" % blob.name
                if not self.test_run:
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)
开发者ID:ricardomomm,项目名称:django-azurite,代码行数:104,代码来源:syncstatic.py

示例10: Storage

# 需要导入模块: from azure.storage import BlobService [as 别名]
# 或者: from azure.storage.BlobService import put_blob [as 别名]
class Storage(driver.Base):

    supports_bytes_range = True

    def __init__(self, path=None, config=None):
        self._config = config
        self._container = self._config.azure_storage_container

        protocol = 'https' if self._config.azure_use_https else 'http'
        acct_name = self._config.azure_storage_account_name
        acct_key = self._config.azure_storage_account_key
        self._blob = BlobService(
            account_name=acct_name, account_key=acct_key, protocol=protocol)

        self._init_container()
        logger.debug("Initialized azureblob storage driver")

    def _init_container(self):
        '''Initializes image container on Azure blob storage if the container
        does not exist.
        '''
        created = self._blob.create_container(
            self._container, x_ms_blob_public_access='blob',
            fail_on_exist=False)
        if created:
            logger.info('Created blob container for image registry.')
        else:
            logger.debug('Registry container already exists.')
        return created

    @lru.get
    def get_content(self, path):
        try:
            return self._blob.get_blob(self._container, path)
        except azure.WindowsAzureMissingResourceError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    @lru.set
    def put_content(self, path, content):
        self._blob.put_blob(self._container, path, content, 'BlockBlob')
        return path

    def stream_read(self, path, bytes_range=None):
        try:
            f = io.BytesIO()
            self._blob.get_blob_to_file(self._container, path, f)

            if bytes_range:
                f.seek(bytes_range[0])
                total_size = bytes_range[1] - bytes_range[0] + 1
            else:
                f.seek(0)

            while True:
                buf = None
                if bytes_range:
                    # Bytes Range is enabled
                    buf_size = self.buffer_size
                    if nb_bytes + buf_size > total_size:
                        # We make sure we don't read out of the range
                        buf_size = total_size - nb_bytes
                    if buf_size > 0:
                        buf = f.read(buf_size)
                        nb_bytes += len(buf)
                    else:
                        # We're at the end of the range
                        buf = ''
                else:
                    buf = f.read(self.buffer_size)

                if not buf:
                    break

                yield buf
        except IOError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    def stream_write(self, path, fp):
        self._blob.put_block_blob_from_file(self._container, path, fp)

    def list_directory(self, path=None):
        if not path.endswith('/'):
            path += '/'  # path=a would list a/b.txt as well as 'abc.txt'

        blobs = list(self._blob.list_blobs(self._container, path))
        if not blobs:
            raise exceptions.FileNotFoundError('%s is not there' % path)

        return [b.name for b in blobs]

    def exists(self, path):
        try:
            self._blob.get_blob_properties(self._container, path)
            return True
        except azure.WindowsAzureMissingResourceError:
            return False

    @lru.remove
    def remove(self, path):
        is_blob = self.exists(path)
#.........这里部分代码省略.........
开发者ID:ahmetalpbalkan,项目名称:docker-registry-driver-azure,代码行数:103,代码来源:azureblob.py


注:本文中的azure.storage.BlobService.put_blob方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。