本文整理汇总了Python中google.cloud.exceptions.GoogleCloudError方法的典型用法代码示例。如果您正苦于以下问题:Python exceptions.GoogleCloudError方法的具体用法?Python exceptions.GoogleCloudError怎么用?Python exceptions.GoogleCloudError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类google.cloud.exceptions
的用法示例。
在下文中一共展示了exceptions.GoogleCloudError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cost_usage_source_is_reachable
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def cost_usage_source_is_reachable(self, credential_name, data_source):
"""
Verify that the GCP bucket exists and is reachable.
Args:
credential_name (object): not used; only present for interface compatibility
data_source (dict): dict containing name of GCP storage bucket
"""
storage_client = storage.Client()
bucket = data_source["bucket"]
try:
bucket_info = storage_client.lookup_bucket(bucket)
if not bucket_info:
# if the lookup does not return anything, then this is an nonexistent bucket
key = "billing_source.bucket"
message = f"The provided GCP bucket {bucket} does not exist"
raise serializers.ValidationError(error_obj(key, message))
except GoogleCloudError as e:
key = "billing_source.bucket"
raise serializers.ValidationError(error_obj(key, e.message))
return True
示例2: wait_for_operation
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def wait_for_operation(self, operation: Operation, project_id: Optional[str] = None) -> Operation:
"""
Given an operation, continuously fetches the status from Google Cloud until either
completion or an error occurring
:param operation: The Operation to wait for
:type operation: google.cloud.container_V1.gapic.enums.Operation
:param project_id: Google Cloud Platform project ID
:type project_id: str
:return: A new, updated operation fetched from Google Cloud
"""
self.log.info("Waiting for OPERATION_NAME %s", operation.name)
time.sleep(OPERATIONAL_POLL_INTERVAL)
while operation.status != Operation.Status.DONE:
if operation.status == Operation.Status.RUNNING or operation.status == \
Operation.Status.PENDING:
time.sleep(OPERATIONAL_POLL_INTERVAL)
else:
raise exceptions.GoogleCloudError(
"Operation has failed with status: %s" % operation.status)
# To update status of operation
operation = self.get_operation(operation.name, project_id=project_id or self.project_id)
return operation
示例3: copy_to
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def copy_to(self, source_path):
if os.path.getsize(source_path) == 0:
message = (
'Local source file {0:s} is empty. Not uploading to GCS'.format(
source_path))
log.error(message)
raise TurbiniaException(message)
bucket = self.client.get_bucket(self.bucket)
destination_path = os.path.join(
self.base_output_dir, self.unique_dir, os.path.basename(source_path))
log.info(
'Writing {0:s} to GCS path {1:s}'.format(source_path, destination_path))
try:
blob = storage.Blob(destination_path, bucket, chunk_size=self.CHUNK_SIZE)
blob.upload_from_filename(source_path, client=self.client)
except exceptions.GoogleCloudError as exception:
message = 'File upload to GCS failed: {0!s}'.format(exception)
log.error(message)
raise TurbiniaException(message)
return os.path.join('gs://', self.bucket, destination_path)
示例4: setup_stackdriver_handler
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def setup_stackdriver_handler(project_id):
"""Set up Google Cloud Stackdriver Logging
The Google Cloud Logging library will attach itself as a
handler to the default Python logging module.
Attributes:
project_id: The name of the Google Cloud project.
Raises:
TurbiniaException: When an error occurs enabling GCP Stackdriver Logging.
"""
try:
client = cloud_logging.Client(project=project_id)
cloud_handler = cloud_logging.handlers.CloudLoggingHandler(client)
logger = logging.getLogger('turbinia')
logger.addHandler(cloud_handler)
except exceptions.GoogleCloudError as exception:
msg = 'Error enabling Stackdriver Logging: {0:s}'.format(str(exception))
raise TurbiniaException(msg)
示例5: setup_stackdriver_traceback
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def setup_stackdriver_traceback(project_id):
"""Set up Google Cloud Error Reporting
This method will enable Google Cloud Error Reporting.
All exceptions that occur within a Turbinia Task will be logged.
Attributes:
project_id: The name of the Google Cloud project.
Raises:
TurbiniaException: When an error occurs enabling GCP Error Reporting.
"""
try:
client = error_reporting.Client(project=project_id)
except exceptions.GoogleCloudError as exception:
msg = 'Error enabling GCP Error Reporting: {0:s}'.format(str(exception))
raise TurbiniaException(msg)
return client
示例6: _error_result_to_exception
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def _error_result_to_exception(error_result):
"""Maps BigQuery error reasons to an exception.
The reasons and their matching HTTP status codes are documented on
the `troubleshooting errors`_ page.
.. _troubleshooting errors: https://cloud.google.com/bigquery\
/troubleshooting-errors
Args:
error_result (Mapping[str, str]): The error result from BigQuery.
Returns:
google.cloud.exceptions.GoogleCloudError: The mapped exception.
"""
reason = error_result.get("reason")
status_code = _ERROR_REASON_TO_EXCEPTION.get(
reason, http_client.INTERNAL_SERVER_ERROR
)
return exceptions.from_http_status(
status_code, error_result.get("message", ""), errors=[error_result]
)
示例7: result
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def result(self, retry=DEFAULT_RETRY, timeout=None):
"""Start the job and wait for it to complete and get the result.
Args:
retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
If multiple requests are made under the hood, ``timeout``
applies to each individual request.
Returns:
_AsyncJob: This instance.
Raises:
google.cloud.exceptions.GoogleCloudError:
if the job failed.
concurrent.futures.TimeoutError:
if the job did not complete in the given timeout.
"""
if self.state is None:
self._begin(retry=retry, timeout=timeout)
# TODO: modify PollingFuture so it can pass a retry argument to done().
return super(_AsyncJob, self).result(timeout=timeout)
示例8: _create_temp_table
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def _create_temp_table(self, timestamp, total_lines):
"""Save in a temporary table the data in the source table shuffled."""
_, temp_table_uri = self._build_destination_table(timestamp, TEMP_TABLE_SUFFIX)
try:
logging.info('Creating temporary table %s', temp_table_uri)
query = queries.QUERY_TEMP_DATA_TEMPLATE.format(
temp_table=temp_table_uri,
feature_columns=self._columns,
target_columns_shuffle=self._target_columns_shuffle,
source_table=self._source_table_uri,
total_lines=total_lines)
self._bq_client.run_query(query)
return temp_table_uri
except GoogleCloudError as gcp_exception:
raise exceptions.MLDataPrepException(
'Could not create table {}'.format(temp_table_uri),
ERR_CALCULATE_DATASET_SIZE,
gcp_exception)
示例9: test_no_bucket_exists_exception
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def test_no_bucket_exists_exception(self, mock_storage):
"""Test that ValidationError is raised when GoogleCloudError is raised."""
gcp_client = mock_storage.return_value
gcp_client.lookup_bucket.return_value = None
credentials = {"project_id": FAKE.word()}
storage_resource_name = {"bucket": FAKE.word()}
with self.assertRaises(ValidationError):
GCPProvider().cost_usage_source_is_reachable(credentials, storage_resource_name)
示例10: test_bucket_access_exception
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def test_bucket_access_exception(self, mock_storage):
"""Test that ValidationError is raised when GoogleCloudError is raised."""
gcp_client = mock_storage.return_value
gcp_client.lookup_bucket.side_effect = GoogleCloudError("GCP Error")
credentials = {"project_id": FAKE.word()}
storage_resource_name = {"bucket": FAKE.word()}
with self.assertRaises(ValidationError):
GCPProvider().cost_usage_source_is_reachable(credentials, storage_resource_name)
示例11: test_wait_for_response_exception
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def test_wait_for_response_exception(self, time_mock):
from google.cloud.container_v1.gapic.enums import Operation
from google.cloud.exceptions import GoogleCloudError
mock_op = mock.Mock()
mock_op.status = Operation.Status.ABORTING
with self.assertRaises(GoogleCloudError):
self.gke_hook.wait_for_operation(mock_op)
self.assertEqual(time_mock.call_count, 1)
示例12: mark
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def mark(*args, **kwargs):
"""Marks an entire test as eventually consistent and retries.
Args:
tries: The number of retries.
exceptions: The exceptions on which it will retry. It can be
single value or a tuple.
wait_exponential_multiplier: The exponential multiplier in
milliseconds.
wait_exponential_max: The maximum wait before the next try in
milliseconds.
"""
__tracebackhide__ = True
tries = kwargs.get('tries', STOP_MAX_ATTEMPT_NUMBER_DEFAULT)
retry_exceptions = kwargs.get(
'exceptions', (AssertionError, exceptions.GoogleCloudError))
wait_exponential_multiplier = kwargs.get(
'wait_exponential_multiplier', WAIT_EXPONENTIAL_MULTIPLIER_DEFAULT)
wait_exponential_max = kwargs.get(
'wait_exponential_max', WAIT_EXPONENTIAL_MAX_DEFAULT)
# support both `@mark` and `@mark()` syntax
if len(args) == 1 and callable(args[0]):
return retry(
wait_exponential_multiplier=wait_exponential_multiplier,
wait_exponential_max=wait_exponential_max,
stop_max_attempt_number=tries,
retry_on_exception=_retry_on_exception(retry_exceptions))(args[0])
# `mark()` syntax
def inner(f):
__tracebackhide__ = True
return retry(
wait_exponential_multiplier=wait_exponential_multiplier,
wait_exponential_max=wait_exponential_max,
stop_max_attempt_number=tries,
retry_on_exception=_retry_on_exception(retry_exceptions))(f)
return inner
示例13: client_library_errors
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def client_library_errors(e, *args):
"""Used by mark_flaky to retry on remote service errors."""
exception_class, exception_instance, traceback = e
return isinstance(
exception_instance,
(GoogleCloudError,))
示例14: update_task
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def update_task(self, task):
task.touch()
try:
with self.client.transaction():
entity = self.client.get(task.state_key)
if not entity:
self.write_new_task(task)
return
entity.update(self.get_task_dict(task))
log.debug('Updating Task {0:s} in Datastore'.format(task.name))
self.client.put(entity)
except exceptions.GoogleCloudError as e:
log.error(
'Failed to update task {0:s} in datastore: {1!s}'.format(
task.name, e))
示例15: write_new_task
# 需要导入模块: from google.cloud import exceptions [as 别名]
# 或者: from google.cloud.exceptions import GoogleCloudError [as 别名]
def write_new_task(self, task):
key = self.client.key('TurbiniaTask', task.id)
try:
entity = datastore.Entity(key)
entity.update(self.get_task_dict(task))
log.info('Writing new task {0:s} into Datastore'.format(task.name))
self.client.put(entity)
task.state_key = key
except exceptions.GoogleCloudError as e:
log.error(
'Failed to update task {0:s} in datastore: {1!s}'.format(
task.name, e))
return key