本文整理汇总了Python中botocore.config.Config方法的典型用法代码示例。如果您正苦于以下问题:Python config.Config方法的具体用法?Python config.Config怎么用?Python config.Config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类botocore.config
的用法示例。
在下文中一共展示了config.Config方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_connection
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def get_connection(conf):
if boto3 is None:
raise RuntimeError("boto3 unavailable")
conn = boto3.client(
's3',
endpoint_url=conf.s3_endpoint_url,
region_name=conf.s3_region_name,
aws_access_key_id=conf.s3_access_key_id,
aws_secret_access_key=conf.s3_secret_access_key,
config=boto_config.Config(
max_pool_connections=conf.s3_max_pool_connections))
return conn, conf.s3_region_name, conf.s3_bucket_prefix
# NOTE(jd) OperationAborted might be raised if we try to create the bucket
# for the first time at the same time
示例2: sagemaker_session_with_custom_bucket
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def sagemaker_session_with_custom_bucket(
boto_session, sagemaker_client_config, sagemaker_runtime_config, custom_bucket_name
):
sagemaker_client_config.setdefault("config", Config(retries=dict(max_attempts=10)))
sagemaker_client = (
boto_session.client("sagemaker", **sagemaker_client_config)
if sagemaker_client_config
else None
)
runtime_client = (
boto_session.client("sagemaker-runtime", **sagemaker_runtime_config)
if sagemaker_runtime_config
else None
)
return Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=custom_bucket_name,
)
示例3: test_sagemaker_session_does_not_create_bucket_on_init
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def test_sagemaker_session_does_not_create_bucket_on_init(
sagemaker_client_config, sagemaker_runtime_config, boto_session
):
sagemaker_client_config.setdefault("config", Config(retries=dict(max_attempts=10)))
sagemaker_client = (
boto_session.client("sagemaker", **sagemaker_client_config)
if sagemaker_client_config
else None
)
runtime_client = (
boto_session.client("sagemaker-runtime", **sagemaker_runtime_config)
if sagemaker_runtime_config
else None
)
Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=CUSTOM_BUCKET_NAME,
)
s3 = boto3.resource("s3", region_name=boto_session.region_name)
assert s3.Bucket(CUSTOM_BUCKET_NAME).creation_date is None
示例4: sagemaker_session
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def sagemaker_session(sagemaker_client_config, sagemaker_runtime_config, boto_session):
sagemaker_client_config.setdefault("config", Config(retries=dict(max_attempts=10)))
sagemaker_client = (
boto_session.client("sagemaker", **sagemaker_client_config)
if sagemaker_client_config
else None
)
runtime_client = (
boto_session.client("sagemaker-runtime", **sagemaker_runtime_config)
if sagemaker_runtime_config
else None
)
return Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
)
示例5: __init__
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def __init__(
self,
aws_conn_id: Optional[str] = "aws_default",
verify: Union[bool, str, None] = None,
region_name: Optional[str] = None,
client_type: Optional[str] = None,
resource_type: Optional[str] = None,
config: Optional[Config] = None
) -> None:
super().__init__()
self.aws_conn_id = aws_conn_id
self.verify = verify
self.client_type = client_type
self.resource_type = resource_type
self.region_name = region_name
self.config = config
if not (self.client_type or self.resource_type):
raise AirflowException(
'Either client_type or resource_type'
' must be provided.')
# pylint: disable=too-many-statements
示例6: delete_empty_log_streams
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def delete_empty_log_streams(
log_group_name_prefix: str = None,
purge_non_empty: bool = False,
dry_run: bool = False,
region: str = None,
profile: str = None,
):
global cw_logs
boto_session = boto3.Session(region_name=region, profile_name=profile)
cw_logs = boto_session.client("logs", config=Config(retries=dict(max_attempts=10)))
kwargs = {"PaginationConfig": {"PageSize": 50}}
if log_group_name_prefix:
kwargs["logGroupNamePrefix"] = log_group_name_prefix
log.info("finding log groups with prefix %r", log_group_name_prefix)
for response in cw_logs.get_paginator("describe_log_groups").paginate(**kwargs):
for group in response["logGroups"]:
_delete_empty_log_streams(group, purge_non_empty, dry_run)
示例7: handle
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def handle(request, context):
global cw_logs
cw_logs = boto3.client("logs", config=Config(retries=dict(max_attempts=10)))
dry_run = request.get("dry_run", False)
if "dry_run" in request and not isinstance(dry_run, bool):
raise ValueError(f"'dry_run' is not a boolean value, {request}")
purge_non_empty = request.get("purge_non_empty", False)
if "purge_non_empty" in request and not isinstance(dry_run, bool):
raise ValueError(f"'purge_non_empty' is not a boolean value, {request}")
log_group_name_prefix = request.get("log_group_name_prefix")
if log_group_name_prefix:
delete_empty_log_streams(log_group_name_prefix, purge_non_empty, dry_run)
else:
fan_out(
context.invoked_function_arn,
get_all_log_group_names(),
purge_non_empty,
dry_run,
)
示例8: __init__
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def __init__(self, resource, config, credentials=None, region_name=None):
"""Constructor
:param resource: AWS specific token for resource type. e.g., 's3', 'sqs', etc.
:type resource: string
:param config: Resource specific configuration
:type config: :class:`botocore.client.Config`
:param credentials: Authentication values needed to access AWS. If no credentials are passed, then IAM
role-based access is assumed.
:type credentials: :class:`util.aws.AWSCredentials`
:param region_name: The AWS region the resource resides in.
:type region_name: string
"""
self.credentials = credentials
self.region_name = region_name
self._client = None
self._resource_name = resource
self._config = config
示例9: _max_retries_config
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def _max_retries_config(self):
"""
If a ``BOTO_MAX_RETRIES_<self.api_name>`` environment variable is set,
return a new ``botocore.config.Config`` instance using that number
as the retries max_attempts value.
:rtype: ``botocore.config.Config`` or None
"""
key = 'BOTO_MAX_RETRIES_%s' % self.api_name
if key not in os.environ:
return None
try:
max_retries = int(os.environ[key])
except Exception:
logger.error(
'ERROR: Found "%s" environment variable, but unable to '
'parse value "%s" to an integer.', key, os.environ[key]
)
return None
logger.debug(
'Setting explicit botocore retry config with max_attempts=%d '
'for "%s" API based on %s environment variable.',
max_retries, self.api_name, key
)
return Config(retries={'max_attempts': max_retries})
示例10: __init__
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def __init__(self, client_kwargs=None):
"""Creates S3 clients for processes
Botocore sessions and clients are not pickleable so they cannot be
inherited across Process boundaries. Instead, they must be instantiated
once a process is running.
"""
self._client_kwargs = client_kwargs
if self._client_kwargs is None:
self._client_kwargs = {}
client_config = deepcopy(self._client_kwargs.get('config', Config()))
if not client_config.user_agent_extra:
client_config.user_agent_extra = PROCESS_USER_AGENT
else:
client_config.user_agent_extra += " " + PROCESS_USER_AGENT
self._client_kwargs['config'] = client_config
示例11: __init__
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def __init__(self, data):
self.parsed_data = data = self._parse_data(data)
self.endpoint_url = data.get('host')
if self.endpoint_url and '://' not in self.endpoint_url:
self.endpoint_url = 'https://' + self.endpoint_url
self.session_kwargs = {}
self.client_kwargs = {}
if 'profile' in data:
self.session_kwargs['profile_name'] = data['profile']
if 'access_key' in data:
self.session_kwargs['aws_access_key_id'] = data['access_key']
if 'secret_key' in data:
self.session_kwargs['aws_secret_access_key'] = data['secret_key']
if 'addressing_style' in data:
self.client_kwargs['config'] = Config(s3={'addressing_style': data['addressing_style']})
self.bucket_policy_file = data.get('bucket_policy_file')
self.bucket_versioning = data.get('bucket_versioning') in ('1', 'true', 'yes', 'on')
if data.get('proxy') in ('1', 'true', 'yes', 'on'):
self.proxy_downloads = ProxyDownloadsMode.local
elif data.get('proxy') in ('xaccelredirect', 'nginx'):
self.proxy_downloads = ProxyDownloadsMode.nginx
else:
self.proxy_downloads = ProxyDownloadsMode.disabled
self.meta = data.get('meta')
示例12: iterate_datasets
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def iterate_datasets(bucket_name, config, prefix, suffix, start_date, end_date, func, unsafe, sources_policy):
manager = Manager()
queue = manager.Queue()
s3 = boto3.resource('s3', config=Config(signature_version=UNSIGNED))
bucket = s3.Bucket(bucket_name)
logging.info("Bucket : %s prefix: %s ", bucket_name, str(prefix))
# safety = 'safe' if not unsafe else 'unsafe'
worker_count = cpu_count() * 2
processess = []
for i in range(worker_count):
proc = Process(target=worker, args=(config, bucket_name, prefix, suffix, start_date, end_date, func, unsafe, sources_policy, queue,))
processess.append(proc)
proc.start()
for obj in bucket.objects.filter(Prefix=str(prefix)):
if (obj.key.endswith(suffix)):
queue.put(obj.key)
for i in range(worker_count):
queue.put(GUARDIAN)
for proc in processess:
proc.join()
示例13: worker
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def worker(parse_only, queue):
s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
dc = datacube.Datacube()
idx = dc.index
while True:
try:
url = queue.get(timeout=60)
if url == STOP_SIGN:
break
logging.info("Processing {} {}".format(url, current_process()))
index_dataset(idx, s3, url, parse_only)
queue.task_done()
except Empty:
break
except EOFError:
break
示例14: fetch_autoclaved_bucket
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def fetch_autoclaved_bucket(dst_dir, bucket_date):
print("Fetch bucket")
dst_bucket_dir = os.path.join(dst_dir, bucket_date)
if not os.path.exists(dst_bucket_dir):
os.makedirs(dst_bucket_dir)
client = boto3.client("s3", config=Config(signature_version=UNSIGNED))
resource = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
prefix = "autoclaved/jsonl.tar.lz4/{}/".format(bucket_date)
paginator = client.get_paginator("list_objects")
for result in paginator.paginate(Bucket="ooni-data", Delimiter="/", Prefix=prefix):
for f in result.get("Contents", []):
fkey = f.get("Key")
dst_pathname = os.path.join(dst_bucket_dir, os.path.basename(fkey))
try:
s = os.stat(dst_pathname)
if s.st_size == f.get("Size"):
continue
except Exception: # XXX maybe make this more strict. It's FileNotFoundError on py3 and OSError on py2
pass
print("[+] Downloading {}".format(dst_pathname))
resource.meta.client.download_file("ooni-data", fkey, dst_pathname)
示例15: test_connect_service
# 需要导入模块: from botocore import config [as 别名]
# 或者: from botocore.config import Config [as 别名]
def test_connect_service(self):
client = connect_service('iam', self.creds)
client = connect_service('iam', self.creds, config={})
client = connect_service('iam', self.creds, silent=True)
client = connect_service('ec2', self.creds, region_name = 'us-east-1')
try:
client = connect_service('opinelunittest', creds)
assert(False)
except:
pass
config = Config(region_name = 'us-east-1')
client = connect_service('ec2', self.creds, config = config)
try:
client = connect_service('ec2', self.creds, region_name = config)
assert(False)
except:
pass