本文整理汇总了Python中sahara.openstack.common.excutils.save_and_reraise_exception函数的典型用法代码示例。如果您正苦于以下问题:Python save_and_reraise_exception函数的具体用法?Python save_and_reraise_exception怎么用?Python save_and_reraise_exception使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了save_and_reraise_exception函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_swift_availability
def check_swift_availability(self, cluster_info):
plugin_config = cluster_info['plugin_config']
# Make unique name of Swift container during Swift testing
swift_container_name = 'Swift-test-' + str(uuid.uuid4())[:8]
extra_script_parameters = {
'OS_TENANT_NAME': self.common_config.OS_TENANT_NAME,
'OS_USERNAME': self.common_config.OS_USERNAME,
'OS_PASSWORD': self.common_config.OS_PASSWORD,
'HADOOP_USER': plugin_config.HADOOP_USER,
'SWIFT_CONTAINER_NAME': swift_container_name
}
namenode_ip = cluster_info['node_info']['namenode_ip']
self.open_ssh_connection(namenode_ip, plugin_config.SSH_USERNAME)
try:
self.transfer_helper_script_to_node(
'swift_test_script.sh', parameter_list=extra_script_parameters
)
except Exception as e:
with excutils.save_and_reraise_exception():
print(str(e))
swift = self.connect_to_swift()
swift.put_container(swift_container_name)
try:
self.execute_command('./script.sh')
except Exception as e:
with excutils.save_and_reraise_exception():
print(str(e))
finally:
self.delete_swift_container(swift, swift_container_name)
self.close_ssh_connection()
示例2: create_cluster
def create_cluster(values):
ctx = context.ctx()
cluster = conductor.cluster_create(ctx, values)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
# update nodegroup image usernames
for nodegroup in cluster.node_groups:
conductor.node_group_update(
ctx, nodegroup,
{"image_username": INFRA.get_node_group_image_username(nodegroup)})
cluster = conductor.cluster_get(ctx, cluster)
# validating cluster
try:
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Validating"})
LOG.info(g.format_cluster_status(cluster))
plugin.validate(cluster)
except Exception as e:
with excutils.save_and_reraise_exception():
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Error",
"status_description": str(e)})
LOG.info(g.format_cluster_status(cluster))
context.spawn("cluster-creating-%s" % cluster.id,
_provision_cluster, cluster.id)
if CONF.use_identity_api_v3 and cluster.is_transient:
trusts.create_trust(cluster)
return conductor.cluster_get(ctx, cluster.id)
示例3: try_get_image_id_and_ssh_username
def try_get_image_id_and_ssh_username(parameter, value):
try:
return image.id, image.metadata[imgs.PROP_USERNAME]
except KeyError:
with excutils.save_and_reraise_exception():
print_error_log(parameter, value)
示例4: get_cluster_info
def get_cluster_info(self, plugin_config):
node_ip_list_with_node_processes = self.get_cluster_node_ip_list_with_node_processes(self.cluster_id)
try:
node_info = self.get_node_info(node_ip_list_with_node_processes, plugin_config)
except Exception as e:
with excutils.save_and_reraise_exception():
print("\nFailure during check of node process deployment " "on cluster node: " + str(e))
# For example: method "create_cluster_and_get_info" return
# {
# 'node_info': {
# 'tasktracker_count': 3,
# 'node_count': 6,
# 'namenode_ip': '172.18.168.242',
# 'datanode_count': 3
# },
# 'cluster_id': 'bee5c6a1-411a-4e88-95fc-d1fbdff2bb9d',
# 'node_ip_list': {
# '172.18.168.153': ['tasktracker', 'datanode'],
# '172.18.168.208': ['secondarynamenode', 'oozie'],
# '172.18.168.93': ['tasktracker'],
# '172.18.168.101': ['tasktracker', 'datanode'],
# '172.18.168.242': ['namenode', 'jobtracker'],
# '172.18.168.167': ['datanode']
# },
# 'plugin_config': <oslo.config.cfg.GroupAttr object at 0x215d9d>
# }
return {
"cluster_id": self.cluster_id,
"node_ip_list": node_ip_list_with_node_processes,
"node_info": node_info,
"plugin_config": plugin_config,
}
示例5: create_cluster
def create_cluster(self, cluster):
ctx = context.ctx()
launcher = _CreateLauncher()
try:
target_count = self._get_ng_counts(cluster)
self._nullify_ng_counts(cluster)
cluster = conductor.cluster_get(ctx, cluster)
launcher.launch_instances(ctx, cluster, target_count)
except Exception as ex:
with excutils.save_and_reraise_exception():
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._log_operation_exception(
"Can't start cluster '%s' (reason: %s)", cluster, ex)
cluster = conductor.cluster_update(
ctx, cluster, {"status": "Error",
"status_description": str(ex)})
LOG.info(g.format_cluster_status(cluster))
self._rollback_cluster_creation(cluster)
示例6: _add_params_to_script_and_transfer_to_node
def _add_params_to_script_and_transfer_to_node(self, cluster_info,
node_group,
node_with_volumes=False):
plugin_config = cluster_info['plugin_config']
hadoop_log_directory = plugin_config.HADOOP_LOG_DIRECTORY
if node_with_volumes:
hadoop_log_directory = (
plugin_config.HADOOP_LOG_DIRECTORY_ON_VOLUME)
extra_script_parameters = {
'HADOOP_VERSION': plugin_config.HADOOP_VERSION,
'HADOOP_DIRECTORY': plugin_config.HADOOP_DIRECTORY,
'HADOOP_EXAMPLES_JAR_PATH': plugin_config.HADOOP_EXAMPLES_JAR_PATH,
'HADOOP_LOG_DIRECTORY': hadoop_log_directory,
'HADOOP_USER': plugin_config.HADOOP_USER,
'NODE_COUNT': cluster_info['node_info']['node_count'],
'PLUGIN_NAME': plugin_config.PLUGIN_NAME
}
for instance in node_group['instances']:
try:
self.open_ssh_connection(
instance['management_ip'], plugin_config.SSH_USERNAME)
self.transfer_helper_script_to_node(
'map_reduce_test_script.sh', extra_script_parameters
)
self.close_ssh_connection()
except Exception as e:
with excutils.save_and_reraise_exception():
print(str(e))
示例7: cluster_config_testing
def cluster_config_testing(self, cluster_info):
cluster_id = cluster_info['cluster_id']
data = self.sahara.clusters.get(cluster_id)
self._compare_configs(
{'Enable Swift': True}, data.cluster_configs['general']
)
self._compare_configs(
CLUSTER_HDFS_CONFIG, data.cluster_configs['HDFS']
)
self._compare_configs(
CLUSTER_MR_CONFIG, data.cluster_configs['MapReduce']
)
node_groups = data.node_groups
self._check_configs_for_node_groups(node_groups)
node_ip_list_with_node_processes = (
self.get_cluster_node_ip_list_with_node_processes(cluster_id))
try:
self.transfer_helper_script_to_nodes(
node_ip_list_with_node_processes,
self.vanilla_config.SSH_USERNAME,
'cluster_config_test_script.sh'
)
except Exception as e:
with excutils.save_and_reraise_exception():
print(str(e))
self._check_config_application_on_cluster_nodes(
node_ip_list_with_node_processes
)
示例8: create_cluster
def create_cluster(self, cluster):
ctx = context.ctx()
launcher = _CreateLauncher()
try:
target_count = self._get_ng_counts(cluster)
self._nullify_ng_counts(cluster)
cluster = conductor.cluster_get(ctx, cluster)
launcher.launch_instances(ctx, cluster, target_count)
cluster = conductor.cluster_get(ctx, cluster)
self._add_volumes(ctx, cluster)
except Exception as ex:
with excutils.save_and_reraise_exception():
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._log_operation_exception(
_LW("Can't start cluster '%(cluster)s' "
"(reason: %(reason)s)"), cluster, ex)
cluster = g.change_cluster_status(
cluster, "Error", status_description=six.text_type(ex))
self._rollback_cluster_creation(cluster)
示例9: scale_cluster
def scale_cluster(self, cluster, node_group_id_map):
ctx = context.ctx()
instance_ids = []
try:
instance_ids = self._scale_cluster_instances(cluster,
node_group_id_map)
cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster)
cluster = conductor.cluster_get(ctx, cluster)
instances = g.get_instances(cluster, instance_ids)
self._await_active(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return []
self._assign_floating_ips(instances)
self._await_networks(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return []
cluster = conductor.cluster_get(ctx, cluster)
volumes.attach_to_instances(
g.get_instances(cluster, instance_ids))
except Exception as ex:
with excutils.save_and_reraise_exception():
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return []
self._log_operation_exception(
"Can't scale cluster '%s' (reason: %s)", cluster, ex)
cluster = conductor.cluster_get(ctx, cluster)
self._rollback_cluster_scaling(
cluster, g.get_instances(cluster, instance_ids), ex)
instance_ids = []
cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster)
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
# we should be here with valid cluster: if instances creation
# was not successful all extra-instances will be removed above
if instance_ids:
self._configure_instances(cluster)
return instance_ids
示例10: __enter__
def __enter__(self):
_acquire_remote_semaphore()
try:
self.bulk = BulkInstanceInteropHelper(self.instance)
return self.bulk
except Exception:
with excutils.save_and_reraise_exception():
_release_remote_semaphore()
示例11: __init__
def __init__(self, instance):
super(BulkInstanceInteropHelper, self).__init__(instance)
self.proc = procutils.start_subprocess()
try:
procutils.run_in_subprocess(self.proc, _connect,
self._get_conn_params())
except Exception:
with excutils.save_and_reraise_exception():
procutils.shutdown_subprocess(self.proc, _cleanup)
示例12: _run_wordcount_job
def _run_wordcount_job(self):
try:
self.execute_command('./script.sh run_wordcount_job')
except Exception as e:
with excutils.save_and_reraise_exception():
print('\nFailure while \'Wordcount\' job launch: ' + str(e))
self.capture_error_log_from_cluster_node(
'/tmp/MapReduceTestOutput/log.txt'
)
示例13: try_telnet
def try_telnet(self, host, port):
try:
telnetlib.Telnet(host, port)
except Exception as e:
with excutils.save_and_reraise_exception():
print(
'\nTelnet has failed: ' + str(e) +
' NODE IP: %s, PORT: %s. Passed %s minute(s).'
% (host, port, self.common_config.TELNET_TIMEOUT)
)
示例14: create_cluster
def create_cluster(self, cluster):
ctx = context.ctx()
try:
# create all instances
conductor.cluster_update(ctx, cluster, {"status": "Spawning"})
LOG.info(g.format_cluster_status(cluster))
self._create_instances(cluster)
# wait for all instances are up and networks ready
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Waiting"})
LOG.info(g.format_cluster_status(cluster))
instances = g.get_instances(cluster)
self._await_active(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._assign_floating_ips(instances)
self._await_networks(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
cluster = conductor.cluster_get(ctx, cluster)
# attach volumes
volumes.attach(cluster)
# prepare all instances
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Preparing"})
LOG.info(g.format_cluster_status(cluster))
self._configure_instances(cluster)
except Exception as ex:
with excutils.save_and_reraise_exception():
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._log_operation_exception(
"Can't start cluster '%s' (reason: %s)", cluster, ex)
cluster = conductor.cluster_update(
ctx, cluster, {"status": "Error",
"status_description": str(ex)})
LOG.info(g.format_cluster_status(cluster))
self._rollback_cluster_creation(cluster, ex)
示例15: _run
def _run(self, func, *args, **kwargs):
proc = procutils.start_subprocess()
try:
procutils.run_in_subprocess(proc, _connect,
self._get_conn_params())
return procutils.run_in_subprocess(proc, func, args, kwargs)
except Exception:
with excutils.save_and_reraise_exception():
procutils.shutdown_subprocess(proc, _cleanup)
finally:
procutils.shutdown_subprocess(proc, _cleanup)