本文整理汇总了Python中sahara.utils.general.get_instances函数的典型用法代码示例。如果您正苦于以下问题:Python get_instances函数的具体用法?Python get_instances怎么用?Python get_instances使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_instances函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_cluster
def create_cluster(self, cluster):
ctx = context.ctx()
self._update_rollback_strategy(cluster, shutdown=True)
# create all instances
cluster = g.change_cluster_status(cluster, "Spawning")
self._create_instances(cluster)
# wait for all instances are up and networks ready
cluster = g.change_cluster_status(cluster, "Waiting")
instances = g.get_instances(cluster)
self._await_active(cluster, instances)
self._assign_floating_ips(instances)
self._await_networks(cluster, instances)
cluster = conductor.cluster_get(ctx, cluster)
# attach volumes
volumes.attach_to_instances(g.get_instances(cluster))
# prepare all instances
cluster = g.change_cluster_status(cluster, "Preparing")
self._configure_instances(cluster)
self._update_rollback_strategy(cluster)
示例2: scale_cluster
def scale_cluster(self, cluster, node_group_id_map):
ctx = context.ctx()
cluster = g.change_cluster_status(cluster, "Scaling")
instance_ids = self._scale_cluster_instances(cluster,
node_group_id_map)
self._update_rollback_strategy(cluster, instance_ids=instance_ids)
cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster)
cluster = conductor.cluster_get(ctx, cluster)
instances = g.get_instances(cluster, instance_ids)
self._await_active(cluster, instances)
self._assign_floating_ips(instances)
self._await_networks(cluster, instances)
cluster = conductor.cluster_get(ctx, cluster)
volumes.attach_to_instances(
g.get_instances(cluster, instance_ids))
# we should be here with valid cluster: if instances creation
# was not successful all extra-instances will be removed above
if instance_ids:
self._configure_instances(cluster)
self._update_rollback_strategy(cluster)
return instance_ids
示例3: launch_instances
def launch_instances(self, cluster, target_count):
# create all instances
cluster = g.change_cluster_status(cluster, self.STAGES[0])
tmpl = heat.ClusterTemplate(cluster)
self._configure_template(tmpl, cluster, target_count)
stack = tmpl.instantiate(update_existing=self.UPDATE_STACK,
disable_rollback=self.DISABLE_ROLLBACK)
heat.wait_stack_completion(stack.heat_stack)
self.inst_ids = self._populate_cluster(cluster, stack)
# wait for all instances are up and networks ready
cluster = g.change_cluster_status(cluster, self.STAGES[1])
instances = g.get_instances(cluster, self.inst_ids)
self._await_networks(cluster, instances)
# prepare all instances
cluster = g.change_cluster_status(cluster, self.STAGES[2])
instances = g.get_instances(cluster, self.inst_ids)
volumes.mount_to_instances(instances)
self._configure_instances(cluster)
示例4: launch_instances
def launch_instances(self, ctx, cluster, target_count):
# create all instances
cluster = conductor.cluster_update(ctx, cluster, {"status": self.STAGES[0]})
LOG.info(g.format_cluster_status(cluster))
tmpl = heat.ClusterTemplate(cluster)
self._configure_template(ctx, tmpl, cluster, target_count)
stack = tmpl.instantiate(update_existing=self.UPDATE_STACK)
stack.wait_till_active()
self.inst_ids = self._populate_cluster(ctx, cluster, stack)
# wait for all instances are up and networks ready
cluster = conductor.cluster_update(ctx, cluster, {"status": self.STAGES[1]})
LOG.info(g.format_cluster_status(cluster))
instances = g.get_instances(cluster, self.inst_ids)
self._await_networks(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
# prepare all instances
cluster = conductor.cluster_update(ctx, cluster, {"status": self.STAGES[2]})
LOG.info(g.format_cluster_status(cluster))
instances = g.get_instances(cluster, self.inst_ids)
volumes.mount_to_instances(instances)
self._configure_instances(cluster)
示例5: test_get_instances
def test_get_instances(self):
cluster = self._make_sample()
ctx = context.ctx()
idx = 0
ids = []
for ng in cluster.node_groups:
for i in range(ng.count):
idx += 1
ids.append(self.api.instance_add(context.ctx(), ng, {
'instance_id': str(idx),
'instance_name': str(idx),
}))
cluster = self.api.cluster_get(ctx, cluster)
instances = general.get_instances(cluster, ids)
ids = set()
for inst in instances:
ids.add(inst.instance_id)
self.assertEqual(idx, len(ids))
for i in range(1, idx):
self.assertIn(str(i), ids)
instances = general.get_instances(cluster)
ids = set()
for inst in instances:
ids.add(inst.instance_id)
self.assertEqual(idx, len(ids))
for i in range(1, idx):
self.assertIn(str(i), ids)
示例6: scale_cluster
def scale_cluster(self, cluster, node_group_id_map):
ctx = context.ctx()
instance_ids = []
try:
instance_ids = self._scale_cluster_instances(cluster,
node_group_id_map)
cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster)
cluster = conductor.cluster_get(ctx, cluster)
instances = g.get_instances(cluster, instance_ids)
self._await_active(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return []
self._assign_floating_ips(instances)
self._await_networks(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return []
cluster = conductor.cluster_get(ctx, cluster)
volumes.attach_to_instances(
g.get_instances(cluster, instance_ids))
except Exception as ex:
with excutils.save_and_reraise_exception():
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return []
self._log_operation_exception(
"Can't scale cluster '%s' (reason: %s)", cluster, ex)
cluster = conductor.cluster_get(ctx, cluster)
self._rollback_cluster_scaling(
cluster, g.get_instances(cluster, instance_ids), ex)
instance_ids = []
cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster)
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
# we should be here with valid cluster: if instances creation
# was not successful all extra-instances will be removed above
if instance_ids:
self._configure_instances(cluster)
return instance_ids
示例7: create_cluster
def create_cluster(self, cluster):
ctx = context.ctx()
try:
# create all instances
conductor.cluster_update(ctx, cluster, {"status": "Spawning"})
LOG.info(g.format_cluster_status(cluster))
self._create_instances(cluster)
# wait for all instances are up and networks ready
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Waiting"})
LOG.info(g.format_cluster_status(cluster))
instances = g.get_instances(cluster)
self._await_active(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._assign_floating_ips(instances)
self._await_networks(cluster, instances)
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
cluster = conductor.cluster_get(ctx, cluster)
# attach volumes
volumes.attach_to_instances(g.get_instances(cluster))
# prepare all instances
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Preparing"})
LOG.info(g.format_cluster_status(cluster))
self._configure_instances(cluster)
except Exception as ex:
with excutils.save_and_reraise_exception():
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
self._log_operation_exception(
"Can't start cluster '%s' (reason: %s)", cluster, ex)
cluster = conductor.cluster_update(
ctx, cluster, {"status": "Error",
"status_description": str(ex)})
LOG.info(g.format_cluster_status(cluster))
self._rollback_cluster_creation(cluster, ex)
示例8: _await_networks
def _await_networks(self, cluster, instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not g.check_cluster_exists(cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
LOG.info(
_LI("Cluster '%s': all instances have IPs assigned"), cluster.id)
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
示例9: _provision_scaled_cluster
def _provision_scaled_cluster(cluster_id, node_group_id_map):
ctx, cluster, plugin = _prepare_provisioning(cluster_id)
# Decommissioning surplus nodes with the plugin
cluster = g.change_cluster_status(cluster, "Decommissioning")
instances_to_delete = []
for node_group in cluster.node_groups:
new_count = node_group_id_map[node_group.id]
if new_count < node_group.count:
instances_to_delete += node_group.instances[new_count:
node_group.count]
if instances_to_delete:
plugin.decommission_nodes(cluster, instances_to_delete)
# Scaling infrastructure
cluster = g.change_cluster_status(cluster, "Scaling")
instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)
# Setting up new nodes with the plugin
if instance_ids:
cluster = g.change_cluster_status(cluster, "Configuring")
instances = g.get_instances(cluster, instance_ids)
plugin.scale_cluster(cluster, instances)
g.change_cluster_status(cluster, "Active")
示例10: test_attach
def test_attach(self, add_step, add_event,
p_create_attach_vol, p_await, p_mount):
p_create_attach_vol.side_effect = ['/dev/vdb', '/dev/vdc'] * 2
p_await.return_value = None
p_mount.return_value = None
add_event.return_value = None
add_step.return_value = None
instance1 = {'id': '1',
'instance_id': '123',
'instance_name': 'inst_1'}
instance2 = {'id': '2',
'instance_id': '456',
'instance_name': 'inst_2'}
ng = {'volumes_per_node': 2,
'volumes_size': 2,
'volumes_availability_zone': None,
'volume_mount_prefix': '/mnt/vols',
'volume_type': None,
'name': 'master',
'cluster_id': '11',
'instances': [instance1, instance2],
'volume_local_to_instance': False}
cluster = r.ClusterResource({'node_groups': [ng]})
volumes.attach_to_instances(g.get_instances(cluster))
self.assertEqual(4, p_create_attach_vol.call_count)
self.assertEqual(2, p_await.call_count)
self.assertEqual(4, p_mount.call_count)
示例11: test_attach
def test_attach(self, p_create_attach_vol,
p_await, p_mount):
p_create_attach_vol.side_effect = ['/dev/vdb', '/dev/vdc'] * 2
p_await.return_value = None
p_mount.return_value = None
instance1 = {'id': '1',
'instance_id': '123',
'instance_name': 'inst_1'}
instance2 = {'id': '2',
'instance_id': '456',
'instance_name': 'inst_2'}
ng = {'volumes_per_node': 2,
'volumes_size': 2,
'volume_mount_prefix': '/mnt/vols',
'name': 'master',
'instances': [instance1, instance2]}
cluster = r.ClusterResource({'node_groups': [ng]})
volumes.attach_to_instances(g.get_instances(cluster))
self.assertEqual(p_create_attach_vol.call_count, 4)
self.assertEqual(p_await.call_count, 2)
self.assertEqual(p_mount.call_count, 4)
示例12: _await_networks
def _await_networks(self, cluster, instances):
if not instances:
return
cpo.add_provisioning_step(cluster.id, _("Assign IPs"), len(instances))
ips_assigned = set()
self._ips_assign(ips_assigned, cluster, instances)
LOG.info(
_LI("Cluster {cluster_id}: all instances have IPs assigned")
.format(cluster_id=cluster.id))
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned)
cpo.add_provisioning_step(
cluster.id, _("Wait for instance accessibility"), len(instances))
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info(_LI("Cluster {cluster_id}: all instances are accessible")
.format(cluster_id=cluster.id))
示例13: _add_volumes
def _add_volumes(self, ctx, cluster):
for instance in g.get_instances(cluster):
res_names = heat.client().resources.get(
cluster.name, instance.instance_name).required_by
for res_name in res_names:
vol_res = heat.client().resources.get(cluster.name, res_name)
if vol_res.resource_type == (('OS::Cinder::'
'VolumeAttachment')):
volume_id = vol_res.physical_resource_id
conductor.append_volume(ctx, instance, volume_id)
示例14: _provision_scaled_cluster
def _provision_scaled_cluster(cluster_id, node_group_id_map):
ctx, cluster, plugin = _prepare_provisioning(cluster_id)
# Decommissioning surplus nodes with the plugin
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Decommissioning"})
LOG.info(g.format_cluster_status(cluster))
instances_to_delete = []
for node_group in cluster.node_groups:
new_count = node_group_id_map[node_group.id]
if new_count < node_group.count:
instances_to_delete += node_group.instances[new_count:
node_group.count]
if instances_to_delete:
plugin.decommission_nodes(cluster, instances_to_delete)
# Scaling infrastructure
cluster = conductor.cluster_update(ctx, cluster, {"status": "Scaling"})
LOG.info(g.format_cluster_status(cluster))
instances = INFRA.scale_cluster(cluster, node_group_id_map)
# Setting up new nodes with the plugin
if instances:
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Configuring"})
LOG.info(g.format_cluster_status(cluster))
try:
instances = g.get_instances(cluster, instances)
plugin.scale_cluster(cluster, instances)
except Exception as ex:
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
LOG.exception("Can't scale cluster '%s' (reason: %s)",
cluster.name, ex)
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
if not g.check_cluster_exists(cluster):
LOG.info(g.format_cluster_deleted_message(cluster))
return
cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
示例15: shutdown_cluster
def shutdown_cluster(self, cluster):
"""Shutdown specified cluster and all related resources."""
try:
heat.client().stacks.delete(cluster.name)
except heat_exc.HTTPNotFound:
LOG.warn("Did not found stack for cluster %s" % cluster.name)
self._clean_job_executions(cluster)
ctx = context.ctx()
instances = g.get_instances(cluster)
for inst in instances:
conductor.instance_remove(ctx, inst)