本文整理汇总了Python中boto.s3.bucket.Bucket.initiate_multipart_upload方法的典型用法代码示例。如果您正苦于以下问题:Python Bucket.initiate_multipart_upload方法的具体用法?Python Bucket.initiate_multipart_upload怎么用?Python Bucket.initiate_multipart_upload使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boto.s3.bucket.Bucket
的用法示例。
在下文中一共展示了Bucket.initiate_multipart_upload方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: put
# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import initiate_multipart_upload [as 别名]
#.........这里部分代码省略.........
info("Uploading %s to %s/%s" % (infile, uri.bucket, keyname))
# Make sure file is not too large for the service
size = os.stat(infile).st_size
if size > (max_object_size*GB):
raise Exception("File %s exceeds object size limit"
" (%sGB) of service" % (infile, max_object_size))
totalsize += size
k = Key(bucket=b, name=keyname)
# Make sure the key does not exist
if not options.force and k.exists():
raise Exception("Key exists: '%s'. Try --force." % k.name)
if (not multipart_uploads) or (options.chunksize==0):
# no multipart, or chunks disabled, just do it the simple way
k.set_contents_from_filename(infile)
else:
# Multipart supported, chunking requested
# The target chunk size is user-defined, but we may need
# to go larger if the file is big because the maximum number
# of chunks is 10,000. So the actual size of a chunk
# will range from 5MB to ~525MB if the maximum object size
# is 5 TB.
part_size = max(options.chunksize*MB, size/9999)
num_parts = int(math.ceil(size / float(part_size)))
if num_parts <= 1:
# Serial
k.set_contents_from_filename(infile)
else:
# Parallel
# Request upload
info("Creating multipart upload")
upload = b.initiate_multipart_upload(k.name)
try:
# Create all uploads
uploads = []
for i in range(0, num_parts):
length = min(size-(i*part_size), part_size)
up = PartialUpload(upload, i+1, num_parts, infile, i*part_size, length)
uploads.append(up)
if options.parallel <= 1:
# Serial
for up in uploads:
up()
else:
# Parallel
# Queue up requests
queue = Queue.Queue()
for up in uploads:
queue.put(up)
# No sense forking more threads than there are chunks
nthreads = min(options.parallel, num_parts)
# Fork threads
threads = []
for i in range(0, nthreads):
t = WorkThread(queue)
threads.append(t)
t.start()
# Wait for the threads
for t in threads:
t.join()
# If any of the threads encountered
# an error, then we fail here
if t.exception is not None:
raise t.exception
info("Completing upload")
upload.complete_upload()
except Exception as e:
# If there is an error, then we need to try and abort
# the multipart upload so that it doesn't hang around
# forever on the server.
try:
info("Aborting multipart upload")
upload.cancel_upload()
except Exception as f:
sys.stderr.write("ERROR: Unable to abort multipart"
" upload (use lsup/rmup): %s\n" % f)
raise e
end = time.time()
totalsize = totalsize / 1024.0
elapsed = end - start
if elapsed > 0:
rate = totalsize / elapsed
else:
rate = 0.0
info("Uploaded %d files of %0.1f KB in %0.6f seconds: %0.2f KB/s" %
(len(infiles), totalsize, elapsed, rate))
示例2: S3StaticFileStorage
# 需要导入模块: from boto.s3.bucket import Bucket [as 别名]
# 或者: from boto.s3.bucket.Bucket import initiate_multipart_upload [as 别名]
class S3StaticFileStorage(Storage):
BUCKET_NAME = settings.S3_STATICFILES_BUCKET
KEY_POLICY = 'public-read'
CHUNK_SIZE = 100 << 20
def __init__(self):
super(S3StaticFileStorage, self).__init__()
self._bucket = Bucket(connection=s3_conn, name=self.BUCKET_NAME)
self._bucket_public = Bucket(connection=s3_public_conn,
name=self.BUCKET_NAME)
if s3_conn.lookup(self.BUCKET_NAME) is None:
s3_conn.create_bucket(self.BUCKET_NAME, policy='public-read')
# Allow CORS access (for web fonts)
self._bucket.set_cors(self._get_cors_config())
def _get_cors_config(self):
cors = CORSConfiguration()
cors.add_rule(['GET'], ['*'])
return cors
def _get_key(self, name):
key = self._bucket.get_key(name)
if key is None:
raise IOError('No such key')
return key
def _open(self, name, mode='rb'):
if mode not in ('r', 'rb'):
raise IOError('_open() only supports reading')
key = self._get_key(name)
key.open_read()
return File(key)
def _save(self, name, content):
if name.endswith('.css'):
content_type = 'text/css'
elif name.endswith('.js'):
content_type = 'application/javascript'
elif name.endswith('.json'):
content_type = 'application/json'
elif hasattr(content.file, 'getvalue'):
content_type = magic.from_buffer(content.file.getvalue(),
mime=True)
else:
content_type = magic.from_file(content.file.name, mime=True)
hdrs = {
'Content-Type': content_type,
}
if content.size > self.CHUNK_SIZE:
# Upload in chunks
upload = self._bucket.initiate_multipart_upload(name,
policy=self.KEY_POLICY, headers=hdrs)
for i, buf in enumerate(content.chunks(self.CHUNK_SIZE), 1):
upload.upload_part_from_file(StringIO(buf), i)
upload.complete_upload()
else:
# Upload all at once
key = self._bucket.new_key(name)
key.set_contents_from_string(content.read(),
policy=self.KEY_POLICY, headers=hdrs)
return name
def get_available_name(self, name):
return name
def get_valid_name(self, name):
return name
def delete(self, name):
self._bucket.delete_key(name)
def exists(self, name):
key = self._bucket.get_key(name)
return key is not None
def listdir(self, path):
path = path.lstrip('/')
return ([], [key.name for key in self._bucket.list(prefix=path)])
def modified_time(self, name):
key = self._get_key(name)
stamp = dateutil.parser.parse(key.last_modified)
# Convert to naive datetime in local time, as FileSystemStorage does
return stamp.astimezone(tzlocal()).replace(tzinfo=None)
def size(self, name):
key = self._get_key(name)
return key.size
def url(self, name):
key = self._bucket_public.new_key(name)
return key.generate_url(0, query_auth=False)