当前位置: 首页>>代码示例>>Python>>正文


Python transfer.TransferConfig方法代码示例

本文整理汇总了Python中boto3.s3.transfer.TransferConfig方法的典型用法代码示例。如果您正苦于以下问题:Python transfer.TransferConfig方法的具体用法?Python transfer.TransferConfig怎么用?Python transfer.TransferConfig使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在boto3.s3.transfer的用法示例。


在下文中一共展示了transfer.TransferConfig方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: put_multipart

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def put_multipart(self, local_path, destination_s3_path, part_size=DEFAULT_PART_SIZE, **kwargs):
        """
        Put an object stored locally to an S3 path
        using S3 multi-part upload (for files > 8Mb).
        :param local_path: Path to source local file
        :param destination_s3_path: URL for target S3 location
        :param part_size: Part size in bytes. Default: 8388608 (8MB)
        :param kwargs: Keyword arguments are passed to the boto function `upload_fileobj` as ExtraArgs
        """
        self._check_deprecated_argument(**kwargs)

        from boto3.s3.transfer import TransferConfig
        # default part size for boto3 is 8Mb, changing it to fit part_size
        # provided as a parameter
        transfer_config = TransferConfig(multipart_chunksize=part_size)

        (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)

        self.s3.meta.client.upload_fileobj(
            Fileobj=open(local_path, 'rb'), Bucket=bucket, Key=key, Config=transfer_config, ExtraArgs=kwargs) 
开发者ID:d6t,项目名称:d6tpipe,代码行数:22,代码来源:s3.py

示例2: close

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def close(self):
        self.writer.close()

        if self.s3client is not None:
            from boto3.s3.transfer import TransferConfig
            config = TransferConfig(multipart_threshold=1024 * 25, max_concurrency=10,
                                    multipart_chunksize=1024 * 25, use_threads=True)
            self.s3client.upload_file(
                os.path.join(self.storage_dir.name, 'temp.tfrecord'),
                self.bucket_name,
                self.file_name,
                ExtraArgs={'ACL': 'public-read'}, Config=config,
            )
            self.storage_dir.cleanup()
        if self.gclient is not None:
            bucket = self.gclient.get_bucket(self.bucket_name)
            blob = bucket.blob(self.file_name)
            blob.upload_from_filename(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
            self.storage_dir.cleanup() 
开发者ID:rowanz,项目名称:grover,代码行数:21,代码来源:prepare_lm_data.py

示例3: main

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def main():
    cfg = get_config()
    parser = argparse.ArgumentParser(
        description='Read a key from s3 and write the content to stdout',
    )
    parser.add_argument('name', help='name of S3 key')
    args = parser.parse_args()
    extra_config = {}
    if 'HOST' in cfg:
        extra_config['endpoint_url'] = cfg['HOST']
    config = TransferConfig(max_concurrency=int(cfg['CONCURRENCY']), multipart_chunksize=int(re.sub('M', '', cfg['CHUNK_SIZE'])) * MB)
    if 'S3_KEY_ID' in cfg:
        s3 = boto3.client('s3', aws_access_key_id=cfg['S3_KEY_ID'], aws_secret_access_key=cfg['S3_SECRET'], **extra_config)
    else:
        s3 = boto3.client('s3', **extra_config)
    try:
        s3.download_fileobj(cfg['BUCKET'], args.name, sys.stdout, Config=config)
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "404":
            print("The object does not exist.")
        else:
            raise 
开发者ID:presslabs,项目名称:z3,代码行数:24,代码来源:get.py

示例4: _copy_file

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def _copy_file(self, source_path, destination_path, threads=DEFAULT_THREADS, part_size=DEFAULT_PART_SIZE, **kwargs):
        src_bucket, src_key = self._path_to_bucket_and_key(source_path)
        dst_bucket, dst_key = self._path_to_bucket_and_key(destination_path)
        transfer_config = TransferConfig(max_concurrency=threads, multipart_chunksize=part_size)
        item = self.get_key(source_path)
        copy_source = {
            'Bucket': src_bucket,
            'Key': src_key
        }

        self.s3.meta.client.copy(copy_source, dst_bucket, dst_key, Config=transfer_config, ExtraArgs=kwargs)

        return 1, item.size 
开发者ID:d6t,项目名称:d6tpipe,代码行数:15,代码来源:s3.py

示例5: _copy_dir

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def _copy_dir(self, source_path, destination_path, threads=DEFAULT_THREADS,
                  start_time=None, end_time=None, part_size=DEFAULT_PART_SIZE, **kwargs):
        start = datetime.datetime.now()
        copy_jobs = []
        management_pool = ThreadPool(processes=threads)
        transfer_config = TransferConfig(max_concurrency=threads, multipart_chunksize=part_size)
        src_bucket, src_key = self._path_to_bucket_and_key(source_path)
        dst_bucket, dst_key = self._path_to_bucket_and_key(destination_path)
        src_prefix = self._add_path_delimiter(src_key)
        dst_prefix = self._add_path_delimiter(dst_key)
        key_path_len = len(src_prefix)
        total_size_bytes = 0
        total_keys = 0
        for item in self.list(source_path, start_time=start_time, end_time=end_time, return_key=True):
            path = item.key[key_path_len:]
            # prevents copy attempt of empty key in folder
            if path != '' and path != '/':
                total_keys += 1
                total_size_bytes += item.size
                copy_source = {
                    'Bucket': src_bucket,
                    'Key': src_prefix + path
                }
                the_kwargs = {'Config': transfer_config, 'ExtraArgs': kwargs}
                job = management_pool.apply_async(self.s3.meta.client.copy,
                                                  args=(copy_source, dst_bucket, dst_prefix + path),
                                                  kwds=the_kwargs)
                copy_jobs.append(job)
        # Wait for the pools to finish scheduling all the copies
        management_pool.close()
        management_pool.join()
        # Raise any errors encountered in any of the copy processes
        for result in copy_jobs:
            result.get()
        end = datetime.datetime.now()
        duration = end - start
        logger.info('%s : Complete : %s total keys copied in %s' %
                    (datetime.datetime.now(), total_keys, duration))
        return total_keys, total_size_bytes 
开发者ID:d6t,项目名称:d6tpipe,代码行数:41,代码来源:s3.py

示例6: _native_download_file

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def _native_download_file(meta, full_dst_file_name, max_concurrency):
        logger = getLogger(__name__)
        try:
            akey = SnowflakeS3Util._get_s3_object(meta, meta['src_file_name'])
            akey.download_file(
                full_dst_file_name,
                Callback=meta['get_callback'](
                    meta['src_file_name'],
                    meta['src_file_size'],
                    output_stream=meta['get_callback_output_stream'],
                    show_progress_bar=meta['show_progress_bar']) if
                meta['get_callback'] else None,
                Config=TransferConfig(
                    multipart_threshold=SnowflakeS3Util.DATA_SIZE_THRESHOLD,
                    max_concurrency=max_concurrency,
                    num_download_attempts=10,
                )
            )
            meta['result_status'] = ResultStatus.DOWNLOADED
        except botocore.exceptions.ClientError as err:
            if err.response['Error']['Code'] == EXPIRED_TOKEN:
                meta['result_status'] = ResultStatus.RENEW_TOKEN
            else:
                logger.debug(
                    "Failed to download a file: %s, err: %s",
                    full_dst_file_name, err, exc_info=True)
                raise err
        except RetriesExceededError as err:
            meta['result_status'] = ResultStatus.NEED_RETRY
            meta['last_error'] = err
        except OpenSSL.SSL.SysCallError as err:
            meta['last_error'] = err
            if err.args[0] == ERRORNO_WSAECONNABORTED:
                # connection was disconnected by S3
                # because of too many connections. retry with
                # less concurrency to mitigate it

                meta[
                    'result_status'] = ResultStatus.NEED_RETRY_WITH_LOWER_CONCURRENCY
            else:
                meta['result_status'] = ResultStatus.NEED_RETRY 
开发者ID:snowflakedb,项目名称:snowflake-connector-python,代码行数:43,代码来源:s3_util.py

示例7: upload_to_s3

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def upload_to_s3(in_fn, out_fn):
    config = TransferConfig(multipart_threshold=1024 * 25, max_concurrency=10,
                            multipart_chunksize=1024 * 25, use_threads=True)
    s3client.upload_file(in_fn, 'periodista', out_fn,
                         ExtraArgs={'ACL': 'public-read'},
                         Config=config,
                         ) 
开发者ID:rowanz,项目名称:grover,代码行数:9,代码来源:dedupe_crawl.py

示例8: __init__

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def __init__(self, fn):
        self.fn = fn
        if fn.startswith('s3://'):
            from boto3.s3.transfer import TransferConfig
            import boto3
            self.gclient = None
            self.s3client = boto3.client('s3',
                                         )
            self.storage_dir = TemporaryDirectory()
            self.writer = tf.python_io.TFRecordWriter(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
            self.bucket_name, self.file_name = self.fn.split('s3://', 1)[1].split('/', 1)
        elif fn.startswith('gs://'):
            from google.cloud import storage
            self.s3client = None
            self.gclient = storage.Client()
            self.storage_dir = TemporaryDirectory()
            self.writer = tf.python_io.TFRecordWriter(os.path.join(self.storage_dir.name, 'temp.tfrecord'))
            self.bucket_name, self.file_name = self.fn.split('gs://', 1)[1].split('/', 1)

        else:
            self.s3client = None
            self.gclient = None
            self.bucket_name = None
            self.file_name = None
            self.storage_dir = None
            self.writer = tf.python_io.TFRecordWriter(fn) 
开发者ID:rowanz,项目名称:grover,代码行数:28,代码来源:prepare_lm_data.py

示例9: __init__

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def __init__(self, *args, **kwargs):
        MarketRecorder.__init__(self, *args, **kwargs)
        self._bucket = self.context["bucket"]
        self._data_type = self.context.get("data_type", "marketdata")
        self.s3 = boto3.client("s3")
        transfer_config = TransferConfig(use_threads=False)
        self.transfer = S3Transfer(self.s3, config=transfer_config) 
开发者ID:liampauling,项目名称:flumine,代码行数:9,代码来源:marketrecorder.py

示例10: bucket_upload_fileobj

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def bucket_upload_fileobj(self, Fileobj, Key, ExtraArgs=None,
                          Callback=None, Config=None):
    """Upload a file-like object to this bucket.

    The file-like object must be in binary mode.

    This is a managed transfer which will perform a multipart upload in
    multiple threads if necessary.

    Usage::

        import boto3
        s3 = boto3.resource('s3')
        bucket = s3.Bucket('mybucket')

        with open('filename', 'rb') as data:
            bucket.upload_fileobj(data, 'mykey')

    :type Fileobj: a file-like object
    :param Fileobj: A file-like object to upload. At a minimum, it must
        implement the `read` method, and must return bytes.

    :type Key: str
    :param Key: The name of the key to upload to.

    :type ExtraArgs: dict
    :param ExtraArgs: Extra arguments that may be passed to the
        client operation.

    :type Callback: method
    :param Callback: A method which takes a number of bytes transferred to
        be periodically called during the upload.

    :type Config: boto3.s3.transfer.TransferConfig
    :param Config: The transfer configuration to be used when performing the
        upload.
    """
    return self.meta.client.upload_fileobj(
        Fileobj=Fileobj, Bucket=self.name, Key=Key, ExtraArgs=ExtraArgs,
        Callback=Callback, Config=Config) 
开发者ID:skarlekar,项目名称:faces,代码行数:42,代码来源:inject.py

示例11: object_upload_fileobj

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def object_upload_fileobj(self, Fileobj, ExtraArgs=None, Callback=None,
                          Config=None):
    """Upload a file-like object to this object.

    The file-like object must be in binary mode.

    This is a managed transfer which will perform a multipart upload in
    multiple threads if necessary.

    Usage::

        import boto3
        s3 = boto3.resource('s3')
        bucket = s3.Bucket('mybucket')
        obj = bucket.Object('mykey')

        with open('filename', 'rb') as data:
            obj.upload_fileobj(data)

    :type Fileobj: a file-like object
    :param Fileobj: A file-like object to upload. At a minimum, it must
        implement the `read` method, and must return bytes.

    :type ExtraArgs: dict
    :param ExtraArgs: Extra arguments that may be passed to the
        client operation.

    :type Callback: method
    :param Callback: A method which takes a number of bytes transferred to
        be periodically called during the upload.

    :type Config: boto3.s3.transfer.TransferConfig
    :param Config: The transfer configuration to be used when performing the
        upload.
    """
    return self.meta.client.upload_fileobj(
        Fileobj=Fileobj, Bucket=self.bucket_name, Key=self.key,
        ExtraArgs=ExtraArgs, Callback=Callback, Config=Config) 
开发者ID:skarlekar,项目名称:faces,代码行数:40,代码来源:inject.py

示例12: bucket_download_fileobj

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def bucket_download_fileobj(self, Key, Fileobj, ExtraArgs=None,
                            Callback=None, Config=None):
    """Download an object from this bucket to a file-like-object.

    The file-like object must be in binary mode.

    This is a managed transfer which will perform a multipart download in
    multiple threads if necessary.

    Usage::

        import boto3
        s3 = boto3.resource('s3')
        bucket = s3.Bucket('mybucket')

        with open('filename', 'wb') as data:
            bucket.download_fileobj('mykey', data)

    :type Fileobj: a file-like object
    :param Fileobj: A file-like object to download into. At a minimum, it must
        implement the `write` method and must accept bytes.

    :type Key: str
    :param Key: The name of the key to download from.

    :type ExtraArgs: dict
    :param ExtraArgs: Extra arguments that may be passed to the
        client operation.

    :type Callback: method
    :param Callback: A method which takes a number of bytes transferred to
        be periodically called during the download.

    :type Config: boto3.s3.transfer.TransferConfig
    :param Config: The transfer configuration to be used when performing the
        download.
    """
    return self.meta.client.download_fileobj(
        Bucket=self.name, Key=Key, Fileobj=Fileobj, ExtraArgs=ExtraArgs,
        Callback=Callback, Config=Config) 
开发者ID:skarlekar,项目名称:faces,代码行数:42,代码来源:inject.py

示例13: object_download_fileobj

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def object_download_fileobj(self, Fileobj, ExtraArgs=None, Callback=None,
                            Config=None):
    """Download this object from S3 to a file-like object.

    The file-like object must be in binary mode.

    This is a managed transfer which will perform a multipart download in
    multiple threads if necessary.

    Usage::

        import boto3
        s3 = boto3.resource('s3')
        bucket = s3.Bucket('mybucket')
        obj = bucket.Object('mykey')

        with open('filename', 'wb') as data:
            obj.download_fileobj(data)

    :type Fileobj: a file-like object
    :param Fileobj: A file-like object to download into. At a minimum, it must
        implement the `write` method and must accept bytes.

    :type ExtraArgs: dict
    :param ExtraArgs: Extra arguments that may be passed to the
        client operation.

    :type Callback: method
    :param Callback: A method which takes a number of bytes transferred to
        be periodically called during the download.

    :type Config: boto3.s3.transfer.TransferConfig
    :param Config: The transfer configuration to be used when performing the
        download.
    """
    return self.meta.client.download_fileobj(
        Bucket=self.bucket_name, Key=self.key, Fileobj=Fileobj,
        ExtraArgs=ExtraArgs, Callback=Callback, Config=Config) 
开发者ID:skarlekar,项目名称:faces,代码行数:40,代码来源:inject.py

示例14: transfer_config

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def transfer_config(self):
        return TransferConfig(
            multipart_threshold=1024 * 25,
            max_concurrency=4,
            multipart_chunksize=1024 * 25,
            use_threads=True
        ) 
开发者ID:augerai,项目名称:a2ml,代码行数:9,代码来源:file_uploader.py

示例15: _upload_func

# 需要导入模块: from boto3.s3 import transfer [as 别名]
# 或者: from boto3.s3.transfer import TransferConfig [as 别名]
def _upload_func(self, s3_uri, func, archive):
        from boto3.s3.transfer import S3Transfer, TransferConfig
        _, bucket, key_prefix = parse_s3(s3_uri)
        key = "%s/%s" % (key_prefix, func.name)
        transfer = S3Transfer(
            self.session_factory().client('s3'),
            config=TransferConfig(
                multipart_threshold=1024 * 1024 * 4))
        transfer.upload_file(
            archive.path,
            bucket=bucket,
            key=key,
            extra_args={
                'ServerSideEncryption': 'AES256'})
        return bucket, key 
开发者ID:cloud-custodian,项目名称:cloud-custodian,代码行数:17,代码来源:mu.py


注:本文中的boto3.s3.transfer.TransferConfig方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。