当前位置: 首页>>代码示例>>Python>>正文


Python Bucket.list方法代码示例

本文整理汇总了Python中boto.s3.bucket.Bucket.list方法的典型用法代码示例。如果您正苦于以下问题:Python Bucket.list方法的具体用法?Python Bucket.list怎么用?Python Bucket.list使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在boto.s3.bucket.Bucket的用法示例。


在下文中一共展示了Bucket.list方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_basic_anon

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
    def test_basic_anon(self):
        auth_con = S3Connection()
        # create a new, empty bucket
        bucket_name = 'test-%d' % int(time.time())
        auth_bucket = auth_con.create_bucket(bucket_name)

        # try read the bucket anonymously
        anon_con = S3Connection(anon=True)
        anon_bucket = Bucket(anon_con, bucket_name)
        try:
            iter(anon_bucket.list()).next()
            self.fail("anon bucket list should fail")
        except S3ResponseError:
            pass

        # give bucket anon user access and anon read again
        auth_bucket.set_acl('public-read')
        try:
            iter(anon_bucket.list()).next()
            self.fail("not expecting contents")
        except S3ResponseError:
            self.fail("we should have public-read access.")
        except StopIteration:
            pass

        # cleanup
        auth_con.delete_bucket(auth_bucket)
开发者ID:2uinc,项目名称:boto,代码行数:29,代码来源:test_connection.py

示例2: test_basic_anon

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
    def test_basic_anon(self):
        auth_con = S3Connection()
        # create a new, empty bucket
        bucket_name = 'test-%d' % int(time.time())
        auth_bucket = auth_con.create_bucket(bucket_name)

        # try read the bucket anonymously
        anon_con = S3Connection(anon=True)
        anon_bucket = Bucket(anon_con, bucket_name)
        try:
            next(iter(anon_bucket.list()))
            self.fail("anon bucket list should fail")
        except S3ResponseError:
            pass

        # give bucket anon user access and anon read again
        auth_bucket.set_acl('public-read')
        time.sleep(10)  # Was 5 secondes, turns out not enough
        try:
            next(iter(anon_bucket.list()))
            self.fail("not expecting contents")
        except S3ResponseError as e:
            self.fail("We should have public-read access, but received "
                      "an error: %s" % e)
        except StopIteration:
            pass

        # cleanup
        auth_con.delete_bucket(auth_bucket)
开发者ID:10sr,项目名称:hue,代码行数:31,代码来源:test_connection.py

示例3: ls

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
def ls(bucket_name, prefix='', pattern=None):
    connection = boto.connect_s3()
    bucket = Bucket(connection, bucket_name)
    keys = [k.key for k in bucket.list(prefix=prefix)]
    if pattern:
        regex = re.compile(pattern)
        keys = [k for k in keys if regex.search(k)]
    return keys
开发者ID:andreasjansson,项目名称:old-crap-look-away,代码行数:10,代码来源:s3.py

示例4: S3Mr2LogMonitor

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
class S3Mr2LogMonitor(ElasticSearchMonitor):

    def __init__(self, jobflow, cluster_id, cluster_name, bucket, prefix, **kwargs):
        super(S3Mr2LogMonitor, self).__init__(**kwargs)

        self.jobflow = jobflow
        self.cluster_id = cluster_id
        self.cluster_name = cluster_name
        self.bucket = bucket
        self.prefix = prefix

        self.emr_logs = Bucket(boto.connect_s3(), bucket)

    def run(self):
        listing = self.emr_logs.list(prefix=self.prefix, delimiter="/")
        events = []
        for f in listing:
            path = f.name

            if not path.endswith('.jhist'):
                continue

            ts = arrow.get(f.last_modified)

            if(ts <= self.checktime):
                log.debug('Skipping old file: ' + f.name)
                continue

            job_id = job_pattern.match(path.split('/')[-1]).group(0)

            if job_id in self.jobs and self.jobs[job_id] >= ts.timestamp*1000:
                log.debug('Skipping processed file: ' + f.name)
                continue

            config_path = path[:path.rfind('/')]+'/'+job_id+'_conf.xml'

            event = {
                'inviso.type': 'mr2',
                'job.id': job_id,
                'application.id': job_id.replace('job_', 'application_'),
                'job.type': 'mr2',
                'file.type': ['history', 'config'],
                'jobflow' : self.jobflow,
                'cluster.id': self.cluster_id,
                'cluster': self.cluster_name,
                'history.uri': 's3://%s/%s' % (self.bucket,path),
                'config.uri':'s3://%s/%s' % (self.bucket,config_path),
                'bucket': self.bucket,
                'timestamp': str(ts),
                'epoch': ts.timestamp * 1000,
                'mapreduce.version': 'mr2'
            }

            log.info('Publishing event: (%s) %s ' % (event['cluster'], event['job.id']))
            events.append(event)
        for chunk in [events[i:i + self.chunk_size] for i in xrange(0, len(events), self.chunk_size)]:
            self.publisher.publish(chunk)
开发者ID:Netflix,项目名称:inviso,代码行数:59,代码来源:monitor.py

示例5: S3Mr1LogMonitor

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
class S3Mr1LogMonitor(TimestampMonitor):

    def __init__(self, jobflow, cluster_id, cluster_name, bucket, prefix, **kwargs):
        super(S3Mr1LogMonitor, self).__init__(**kwargs)

        self.jobflow = jobflow
        self.cluster_id = cluster_id
        self.cluster_name = cluster_name
        self.bucket = bucket
        self.prefix = prefix

        self.emr_logs = Bucket(boto.connect_s3(), bucket)

    def run(self):
        listing = self.emr_logs.list(prefix=self.prefix, delimiter="/")
        events = []
        for f in listing:
            path = f.name

            if path.endswith('_conf.xml') or not path.split('/')[-1].startswith('job_'):
                continue

            ts = parse(f.last_modified)

            if(ts <= self.last_run):
                continue

            job_id = job_pattern.match(path.split('/')[-1]).group(0)

            config_path = path[:path.rfind('/')]+'/'+job_id+'_conf.xml'

            event = {
                'inviso.type': 'mr1',
                'job.id': job_id,
                'job.type': 'mr1',
                'file.type': ['history', 'config'],
                'jobflow' : self.jobflow,
                'cluster.id': self.cluster_id,
                'cluster': self.cluster_name,
                'history.uri': 's3://%s/%s' % (self.bucket,path),
                'config.uri':'s3://%s/%s' % (self.bucket,config_path),
                'bucket': self.bucket,
                'timestamp': str(ts),
                'epoch': int((ts - EPOCH).total_seconds()) * 1000,
                'mapreduce.version': 'mr1'
            }
            events.append(event)
        for chunk in [events[i:i + self.chunk_size] for i in xrange(0, len(events), self.chunk_size)]:
            self.publisher.publish(chunk)
开发者ID:Netflix,项目名称:inviso,代码行数:51,代码来源:monitor.py

示例6: delete_from_s3

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
def delete_from_s3(site, bucket, prefix=None):
    """ Remove all files with the prefix specified from the bucket. """
    if bucket is None:
        print red('Error: Bucket must be specified.')
        return
    # Setup boto
    import boto
    from boto.s3.bucket import Bucket
    from boto.s3.key import Key

    setup_aws_access_key(site)

    # Fix the prefix
    if prefix:
        prefix = prefix.lstrip('/')

    # Connect to S3, list the contents, and remove all of the keys
    c = boto.connect_s3()
    b = Bucket(c, bucket)
    result_set = b.list(prefix=prefix)
    result = b.delete_keys([key.name for key in result_set])
开发者ID:mvx24,项目名称:fabric-shuttle,代码行数:23,代码来源:s3.py

示例7: delete_file

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
logging.basicConfig(filename="s3_download.log", level=logging.INFO)

from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
AWS_S3_SECURE_URLS = False
AWS_QUERYSTRING_AUTH = False

AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_STORAGE_BUCKET_NAME = ''


def delete_file(key):
    key.delete()
    logging.info("del:%s" % key.name)

if __name__ == "__main__":
    conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
    bucket = Bucket(connection=conn, name=AWS_STORAGE_BUCKET_NAME)

    logging.info("Fetching bucket list")
    bucket_list = bucket.list()

    logging.info("Creating a pool")
    pool = GreenPool(size=20)

    logging.info("Deleting files from bucket...")
    for x in pool.imap(delete_file, bucket_list):
        pass
开发者ID:jwineinger,项目名称:S3-Helpers,代码行数:31,代码来源:s3_bucket_empty.py

示例8: EmrMr1LogMonitor

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
class EmrMr1LogMonitor(TimestampMonitor):

    def __init__(self, bucket=None, log_path=None, **kwargs):
        super(EmrMr1LogMonitor, self).__init__(**kwargs)

        s3 = boto.connect_s3()
        self.emr_logs = Bucket(s3, bucket)

    def run(self):
        clusters = self.emr_logs.list(delimiter="/")

        threads = self.process_clusters(clusters)

        start = time.time()

        for thread in threads:
            thread.join()

        log.info('Completed in: %f seconds' % (time.time() - start))

    def process_clusters(self, clusters):
        threads = []

        for cluster in clusters:
            cluster_id = cluster.name
            cluster_name = re.sub('_([0-9]+)/?', '', cluster.name)

            jobflows = self.emr_logs.list(prefix=cluster.name, delimiter="/")

            def process_jobflow(cname, cid, jobflow):
                log.info('Processing flow: %s' % jobflow.name)
                config_paths = self.emr_logs.list(prefix=jobflow.name+"jobs/", delimiter="/")

                for config_path in config_paths:
                    ts = parse(config_path.last_modified)

                    if(ts <= self.last_run):
                        continue

                    job_id = job_pattern.match(config_path.name.split('/')[-1]).group(0)

                    log.debug("Publishing job event for: %s" % job_id)

                    self.publisher.publish([{
                                                'job.id': job_id,
                                                'job.type': 'mr1',
                                                'file.type': 'config' if config_path.name.endswith('_conf.xml') else 'history',
                                                'cluster.id': cid,
                                                'cluster.name': cname,
                                                'history.uri': 's3://' +config_path.bucket.name+'/' + config_path.key,
                                                'bucket': config_path.bucket.name,
                                                'key': config_path.key,
                                                'timestamp': str(ts)
                                            }])

                log.info('Flow complete: ' + jobflow.name)

            for jobflow in jobflows:
                t = Thread(target=process_jobflow, name=jobflow.name, args=[cluster_name, cluster_id, jobflow])
                t.start()
                threads.append(t)

        return threads
开发者ID:Pivotal-Field-Engineering,项目名称:inviso,代码行数:65,代码来源:monitor.py

示例9: rm

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
def rm(args):
    parser = option_parser("rm URL...")
    parser.add_option("-f", "--force", dest="force", action="store_true",
        default=False, help="Ignore nonexistent keys")
    parser.add_option("-F", "--file", dest="file", action="store",
        default=None, help="File containing a list of URLs to delete")
    options, args = parser.parse_args(args)

    if len(args) == 0 and not options.file:
        parser.error("Specify URL")

    if options.file:
        for rec in read_command_file(options.file):
            if len(rec) != 1:
                raise Exception("Invalid record: %s" % rec)
            args.append(rec[0])

    buckets = {}
    for arg in args:
        uri = parse_uri(arg)
        if uri.bucket is None:
            raise Exception("URL for rm must contain a bucket: %s" % arg)
        if uri.key is None:
            raise Exception("URL for rm must contain a key: %s" % arg)

        bid = "%s/%s" % (uri.ident, uri.bucket)
        buri = S3URI(uri.user, uri.site, uri.bucket, uri.secure)

        if bid not in buckets:
            buckets[bid] = (buri, [])
        buckets[bid][1].append(uri)

    config = get_config(options)

    for bucket in buckets:

        # Connect to the bucket
        debug("Deleting keys from bucket %s" % bucket)
        uri, keys = buckets[bucket]
        conn = get_connection(config, uri)
        b = Bucket(connection=conn, name=uri.bucket)

        # Get a final list of all the keys, resolving wildcards as necessary
        bucket_contents = None
        keys_to_delete = set()
        for key in keys:
            key_name = key.key

            if has_wildcards(key_name):

                # If we haven't yet queried the bucket, then do so now
                # so that we can match the wildcards
                if bucket_contents is None:
                    bucket_contents = b.list()

                # Collect all the keys that match
                for k in bucket_contents:
                    if fnmatch.fnmatch(k.name, key_name):
                        keys_to_delete.add(k.name)

            else:
                keys_to_delete.add(key_name)

        info("Deleting %d keys" % len(keys_to_delete))

        batch_delete = config.getboolean(uri.site, "batch_delete")

        if batch_delete:
            debug("Using batch deletes")

            # Delete the keys in batches
            batch_delete_size = config.getint(uri.site, "batch_delete_size")
            debug("batch_delete_size: %d" % batch_delete_size)
            batch = []
            for k in keys_to_delete:
                batch.append(k)
                if len(batch) == batch_delete_size:
                    info("Deleting batch of %d keys" % len(batch))
                    b.delete_keys(batch, quiet=True)
                    batch = []

            # Delete the final batch
            if len(batch) > 0:
                info("Deleting batch of %d keys" % len(batch))
                b.delete_keys(batch, quiet=True)

        else:
            for key_name in keys_to_delete:
                debug("Deleting %s" % key_name)
                b.delete_key(key_name)
开发者ID:pegasus-isi,项目名称:pegasus,代码行数:92,代码来源:s3.py

示例10: get

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
def get(args):
    parser = option_parser("get URL [FILE]")
    parser.add_option("-c", "--chunksize", dest="chunksize", action="store", type="int",
        metavar="X", default=10, help="Set the chunk size for parallel downloads to X "
        "megabytes. A value of 0 will avoid chunked reads. This option only applies for "
        "sites that support ranged downloads (see ranged_downloads configuration "
        "parameter). The default chunk size is 10MB, the min is 1MB and the max is "
        "1024MB. Choose smaller values to reduce the impact of transient failures.")
    parser.add_option("-p", "--parallel", dest="parallel", action="store", type="int",
        metavar="N", default=4, help="Use N threads to upload FILE in parallel. The "
            "default value is 4, which enables parallel downloads with 4 threads. "
            "This parameter is only valid if the site supports ranged downloads "
            "and the --chunksize parameter is not 0. Otherwise parallel downloads are "
            "disabled.")
    parser.add_option("-r", "--recursive", dest="recursive", action="store_true",
        help="Get all keys that start with URL")
    options, args = parser.parse_args(args)

    if options.chunksize < 0 or options.chunksize > 1024:
        parser.error("Invalid chunksize")

    if options.parallel <= 0:
        parser.error("Invalid value for --parallel")

    if len(args) == 0:
        parser.error("Specify URL")

    uri = parse_uri(args[0])

    if uri.bucket is None:
        raise Exception("URL must contain a bucket: %s" % args[0])
    if uri.key is None and not options.recursive:
        raise Exception("URL must contain a key or use --recursive")

    if len(args) > 1:
        output = fix_file(args[1])
    elif uri.key is None:
        output = "./"
    else:
        output = os.path.basename(uri.key.rstrip("/"))

    info("Downloading %s" % uri)

    # Does the site support ranged downloads properly?
    config = get_config(options)
    ranged_downloads = config.getboolean(uri.site, "ranged_downloads")

    # Warn the user
    if options.parallel > 1:
        if not ranged_downloads:
            warn("ranged downloads not supported, ignoring --parallel")
        elif options.chunksize == 0:
            warn("--chunksize set to 0, ignoring --parallel")

    conn = get_connection(config, uri)
    b = Bucket(connection=conn, name=uri.bucket)

    if options.recursive:
        # Get all the keys we need to download

        def keyfilter(k):
            if uri.key is None:
                # We want all the keys in the bucket
                return True

            if uri.key.endswith("/"):
                # The user specified a "folder", so we should only match keys
                # in that "folder"
                return k.name.startswith(uri.key)

            if k.name == uri.key:
                # Match bare keys in case they specify recursive, but there
                # is a key that matches the specified path. Note that this
                # could cause a problem in the case where they have a key
                # called 'foo' and a "folder" called 'foo' in the same
                # bucket. In a file system that can't happen, but it can
                # happen in S3.
                return True

            if k.name.startswith(uri.key+"/"):
                # All other keys in the "folder"
                return True

            return False

        keys = [x for x in b.list(uri.key) if keyfilter(x)]
    else:
        # Just get the one key we need to download
        key = b.get_key(uri.key)
        if key is None:
            raise Exception("No such key. If %s is a folder, try --recursive." % uri.key)
        keys = [key]

    info("Downloading %d keys" % len(keys))

    start = time.time()
    totalsize = 0
    for key in keys:
        outfile = get_path_for_key(b.name, uri.key, key.name, output)

#.........这里部分代码省略.........
开发者ID:pegasus-isi,项目名称:pegasus,代码行数:103,代码来源:s3.py

示例11: S3Bucket

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
class S3Bucket(object):
    """A class for..."""

#===================================================================================================
#                                                                                       C L A S S

    PUBLIC_READ = 'public-read'
    PRIVATE     = 'private'

#___________________________________________________________________________________________________ __init__
    def __init__(self, bucketName, awsId, awsSecret):
        """Creates a new instance of S3Bucket."""
        self._bucketName = bucketName
        self._awsId      = awsId
        self._awsSecret  = awsSecret

        self._conn   = S3Connection(self._awsId, self._awsSecret)
        self._bucket = Bucket(self._conn, bucketName)

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: bucketName
    @property
    def bucketName(self):
        return self._bucketName

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ listKeys
    def listKeys(self, path, pathFilter =None, includeDirs =True, includeFiles =True):
        if len(path) > 0 and path[0] == '/':
            path = path[1:]

        objs = self._bucket.list(path)
        out  = []
        for obj in objs:
            isDir = obj.name[-1] == '/' and obj.size == 0
            if isDir and not includeDirs:
                continue
            if not isDir and not includeFiles:
                continue

            if pathFilter is None or obj.name.find(pathFilter) != -1:
                out.append(obj)

        return out

#___________________________________________________________________________________________________ printBucketContents
    def printBucketContents(self, path, fileFilter, logger =None):
        out = self.listKeys(path, fileFilter)
        s = u'Displaying %s results for %s/%s.' % (
            unicode(len(out)),
            self._bucketName,
            unicode(path))
        if logger:
            logger.write(s)
        else:
            print s

        index = 0
        for obj in out:
            s = u'  ' + unicode(index) + u' - ' + obj.name
            if logger:
                logger.write(s)
            else:
                print s
            index += 1

#___________________________________________________________________________________________________ getKey
    def getKey(self, key, createIfMissing =True):
        if isinstance(key, basestring):
            out = self._bucket.get_key(key_name=key)
            if createIfMissing and not out:
                out = Key(self._bucket, key)
            return out

        return key

#___________________________________________________________________________________________________ put
    def put(
            self, key, contents, zipContents =False, maxAge =-1, eTag =None, expires =None,
            newerThanDate =None, policy =None
    ):
        """Doc..."""
        k = self.getKey(key)

        if not self._localIsNewer(k, newerThanDate):
            return False

        headers = self._generateHeaders(k.name, expires=expires, eTag=eTag, maxAge=maxAge)

        if not isinstance(contents, unicode):
            contents = contents.decode('utf-8', 'ignore')

        if zipContents:
            fd, tempPath = tempfile.mkstemp()
            f = gzip.open(tempPath, 'w+b')
            f.write(contents.encode('utf-8', 'ignore'))
#.........这里部分代码省略.........
开发者ID:hannahp,项目名称:PyAid,代码行数:103,代码来源:S3Bucket.py

示例12: S3StaticFileStorage

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
class S3StaticFileStorage(Storage):
    BUCKET_NAME = settings.S3_STATICFILES_BUCKET
    KEY_POLICY = 'public-read'
    CHUNK_SIZE = 100 << 20

    def __init__(self):
        super(S3StaticFileStorage, self).__init__()
        self._bucket = Bucket(connection=s3_conn, name=self.BUCKET_NAME)
        self._bucket_public = Bucket(connection=s3_public_conn,
                name=self.BUCKET_NAME)
        if s3_conn.lookup(self.BUCKET_NAME) is None:
            s3_conn.create_bucket(self.BUCKET_NAME, policy='public-read')

        # Allow CORS access (for web fonts)
        self._bucket.set_cors(self._get_cors_config())

    def _get_cors_config(self):
        cors = CORSConfiguration()
        cors.add_rule(['GET'], ['*'])
        return cors

    def _get_key(self, name):
        key = self._bucket.get_key(name)
        if key is None:
            raise IOError('No such key')
        return key

    def _open(self, name, mode='rb'):
        if mode not in ('r', 'rb'):
            raise IOError('_open() only supports reading')
        key = self._get_key(name)
        key.open_read()
        return File(key)

    def _save(self, name, content):
        if name.endswith('.css'):
            content_type = 'text/css'
        elif name.endswith('.js'):
            content_type = 'application/javascript'
        elif name.endswith('.json'):
            content_type = 'application/json'
        elif hasattr(content.file, 'getvalue'):
            content_type = magic.from_buffer(content.file.getvalue(),
                    mime=True)
        else:
            content_type = magic.from_file(content.file.name, mime=True)
        hdrs = {
            'Content-Type': content_type,
        }
        if content.size > self.CHUNK_SIZE:
            # Upload in chunks
            upload = self._bucket.initiate_multipart_upload(name,
                    policy=self.KEY_POLICY, headers=hdrs)
            for i, buf in enumerate(content.chunks(self.CHUNK_SIZE), 1):
                upload.upload_part_from_file(StringIO(buf), i)
            upload.complete_upload()
        else:
            # Upload all at once
            key = self._bucket.new_key(name)
            key.set_contents_from_string(content.read(),
                    policy=self.KEY_POLICY, headers=hdrs)
        return name

    def get_available_name(self, name):
        return name

    def get_valid_name(self, name):
        return name

    def delete(self, name):
        self._bucket.delete_key(name)

    def exists(self, name):
        key = self._bucket.get_key(name)
        return key is not None

    def listdir(self, path):
        path = path.lstrip('/')
        return ([], [key.name for key in self._bucket.list(prefix=path)])

    def modified_time(self, name):
        key = self._get_key(name)
        stamp = dateutil.parser.parse(key.last_modified)
        # Convert to naive datetime in local time, as FileSystemStorage does
        return stamp.astimezone(tzlocal()).replace(tzinfo=None)

    def size(self, name):
        key = self._get_key(name)
        return key.size

    def url(self, name):
        key = self._bucket_public.new_key(name)
        return key.generate_url(0, query_auth=False)
开发者ID:cmusatyalab,项目名称:django-s3,代码行数:95,代码来源:storage.py

示例13: S3Bucket

# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import list [as 别名]
class S3Bucket(object):
    """A class for..."""

#===================================================================================================
#                                                                                       C L A S S

    LOCATIONS   = Location
    PUBLIC_READ = 'public-read'
    PRIVATE     = 'private'

    _UPLOAD_CONDITIONS = [
        '{"bucket":"%(bucket)s"}',
        '{"acl":"private"}',
        '{"key":"%(key)s"}',
        '{"success_action_status":"200"}',
        '["content-length-range", 0, %(maxSize)s]',
        '{"x-amz-meta-uuid": "14365123651274"}',
        '["starts-with", "$x-amz-meta-tag", ""]',
        '{"x-amz-algorithm": "AWS4-HMAC-SHA256"}',
        '{"x-amz-credential": "%(awsid)/%{isoDate}/%{region}/s3/aws4_request"}'
        '{"x-amz-date": "%{isoDate}T000000Z" }']

    _UPLOAD_POLICY = '{"expiration":"%s", "conditions":[%s]}'

#___________________________________________________________________________________________________ __init__
    def __init__(self, bucketName, awsId, awsSecret, location =None):
        """Creates a new instance of S3Bucket."""
        self._bucketName = bucketName
        self._awsId      = awsId
        self._awsSecret  = awsSecret

        if location:
            self._conn = s3.connect_to_region(
                region_name=location,
                aws_access_key_id=self._awsId,
                aws_secret_access_key=self._awsSecret,
                calling_format=boto.s3.connection.OrdinaryCallingFormat())
        else:
            self._conn = S3Connection(
                aws_access_key_id=self._awsId,
                aws_secret_access_key=self._awsSecret)

        self._bucket = Bucket(self._conn, bucketName)


#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: bucketName
    @property
    def bucketName(self):
        return self._bucketName

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ generateUrl
    def generateUrl(self, keyName, secure =True, expires =0, expiresInHours =0, expiresInDays =0):
        """ Creates a url for the specified key name that expires in the specified number of
            seconds. Alternatively you can specify the expiresInHours or expiresInDays for easy
            conversion to alternate time periods. """
        if not expires:
            if expiresInHours:
                expires = int(3600.0*float(expiresInHours))
            elif expiresInDays:
                expires = int(24.0*3600.0*float(expiresInDays))

        if expires == 0:
            proto = 'http'
            if secure:
                proto += 's'
            return proto + '://' + self._bucket.get_website_endpoint() + '/' + keyName.lstrip('/')

        key = self.getKey(keyName, createIfMissing=True)
        return key.generate_url(
            expires_in=expires,
            query_auth=bool(expires > 0),
            force_http=not secure)

#___________________________________________________________________________________________________ listKeys
    def listKeys(self, path, pathFilter =None, includeDirs =True, includeFiles =True):
        if len(path) > 0 and path[0] == '/':
            path = path[1:]

        objs = self._bucket.list(path)
        out  = []
        for obj in objs:
            isDir = obj.name[-1] == '/' and obj.size == 0
            if isDir and not includeDirs:
                continue
            if not isDir and not includeFiles:
                continue

            if pathFilter is None or obj.name.find(pathFilter) != -1:
                out.append(obj)

        return out

#___________________________________________________________________________________________________ printBucketContents
    def printBucketContents(self, path, fileFilter, logger =None):
#.........这里部分代码省略.........
开发者ID:sernst,项目名称:PyAid,代码行数:103,代码来源:S3Bucket.py


注:本文中的boto.s3.bucket.Bucket.list方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。