本文整理汇总了Python中nailgun.task.helpers.TaskHelper.nodes_to_provision方法的典型用法代码示例。如果您正苦于以下问题:Python TaskHelper.nodes_to_provision方法的具体用法?Python TaskHelper.nodes_to_provision怎么用?Python TaskHelper.nodes_to_provision使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nailgun.task.helpers.TaskHelper
的用法示例。
在下文中一共展示了TaskHelper.nodes_to_provision方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_deploy_grow_controllers
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def test_deploy_grow_controllers(self):
cluster = self.create_env(
nodes_kwargs=[
{"roles": ["controller"]},
{"roles": ["controller"], "pending_addition": True},
{"roles": ["controller"], "pending_addition": True},
]
)
# We have to build 2 new controllers
n_nodes = TaskHelper.nodes_to_provision(cluster)
self.assertEqual(len(n_nodes), 2)
# All controllers must re-deploy (run puppet)
r_nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEqual(len(r_nodes), 3)
supertask = self.env.launch_deployment()
self.assertEqual(supertask.name, "deploy")
self.env.wait_ready(supertask)
self.assertEqual(supertask.status, "ready")
controllers = self.filter_by_role(cluster.nodes, "controller")
self.assertEqual(len(controllers), 3)
示例2: message
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def message(cls, task):
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
nodes = TaskHelper.nodes_to_provision(task.cluster)
USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
# We need to assign admin ips
# and only after that prepare syslog
# directories
task.cluster.prepare_for_provisioning()
for node in nodes:
if USE_FAKE:
continue
if node.offline:
raise errors.NodeOffline(
u'Node "%s" is offline.' " Remove it from environment and try again." % node.full_name
)
TaskHelper.prepare_syslog_dir(node)
serialized_cluster = task.cluster.replaced_provisioning_info or provisioning_serializers.serialize(task.cluster)
message = {
"method": "provision",
"respond_to": "provision_resp",
"args": {"task_uuid": task.uuid, "provisioning_info": serialized_cluster},
}
return message
示例3: prepare_for_provisioning
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def prepare_for_provisioning(self):
from nailgun.network.manager import NetworkManager
from nailgun.task.helpers import TaskHelper
netmanager = NetworkManager()
for node in TaskHelper.nodes_to_provision(self):
netmanager.assign_admin_ips(
node.id, len(node.meta.get('interfaces', [])))
示例4: prepare_for_provisioning
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def prepare_for_provisioning(self):
from nailgun.network.manager import NetworkManager
from nailgun.task.helpers import TaskHelper
netmanager = NetworkManager()
nodes = TaskHelper.nodes_to_provision(self)
TaskHelper.update_slave_nodes_fqdn(nodes)
for node in nodes:
netmanager.assign_admin_ips(node.id, len(node.meta.get("interfaces", [])))
示例5: serialize_nodes
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def serialize_nodes(cls, cluster):
"""Serialize nodes."""
nodes_to_provision = TaskHelper.nodes_to_provision(cluster)
cluster_attrs = cluster.attributes.merged_attrs_values()
serialized_nodes = []
for node in nodes_to_provision:
serialized_node = cls.serialize_node(cluster_attrs, node)
serialized_nodes.append(serialized_node)
return serialized_nodes
示例6: test_deploy_grow_controllers
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def test_deploy_grow_controllers(self):
cluster = self.create_env(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True}])
# We have to build 2 new controllers
n_nodes = TaskHelper.nodes_to_provision(cluster)
self.assertEqual(len(n_nodes), 2)
# All controllers must re-deploy
r_nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEqual(len(r_nodes), 3)
supertask = self.env.launch_deployment()
self.assertEqual(supertask.name, consts.TASK_NAMES.deploy)
self.assertNotEqual(supertask.status, consts.TASK_STATUSES.error)
controllers = self.filter_by_role(cluster.nodes, 'controller')
self.assertEqual(len(controllers), 3)
示例7: test_deploy_grow_controllers
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def test_deploy_grow_controllers(self):
cluster = self.create_env(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True}])
# We have to build 2 new controllers
n_nodes = TaskHelper.nodes_to_provision(cluster)
self.assertEquals(len(n_nodes), 2)
# All controllers must re-deploy (run puppet)
r_nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEquals(len(r_nodes), 3)
supertask = self.env.launch_deployment()
self.assertEquals(supertask.name, 'deploy')
self.env.wait_ready(supertask)
self.assertEquals(supertask.status, 'ready')
controllers = self.filter_by_role(cluster.nodes, 'controller')
self.assertEquals(len(controllers), 3)
示例8: message
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def message(cls, task):
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
nodes = TaskHelper.nodes_to_provision(task.cluster)
USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
# We need to assign admin ips
# and only after that prepare syslog
# directories
task.cluster.prepare_for_provisioning()
for node in nodes:
if USE_FAKE:
continue
if node.offline:
raise errors.NodeOffline(
u'Node "%s" is offline.'
' Remove it from environment and try again.' %
node.full_name)
TaskHelper.prepare_syslog_dir(node)
node.status = 'provisioning'
db().commit()
serialized_cluster = task.cluster.replaced_provisioning_info or \
provisioning_serializers.serialize(task.cluster)
message = {
'method': 'provision',
'respond_to': 'provision_resp',
'args': {
'task_uuid': task.uuid,
'provisioning_info': serialized_cluster}}
return message
示例9: get_default_nodes
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def get_default_nodes(self, cluster):
TaskHelper.nodes_to_provision(cluster)
示例10: message
# 需要导入模块: from nailgun.task.helpers import TaskHelper [as 别名]
# 或者: from nailgun.task.helpers.TaskHelper import nodes_to_provision [as 别名]
def message(cls, task):
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
# this variable is used to set 'auth_key' in cobbler ks_meta
cluster_attrs = task.cluster.attributes.merged_attrs_values()
nodes = TaskHelper.nodes_to_provision(task.cluster)
netmanager = NetworkManager()
USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
# TODO: For now we send nodes data to orchestrator
# which is cobbler oriented. But for future we
# need to use more abstract data structure.
nodes_data = []
for node in nodes:
if not node.online:
if not USE_FAKE:
raise Exception(
u"Node '%s' (id=%s) is offline."
" Remove it from environment and try again." %
(node.name, node.id)
)
else:
logger.warning(
u"Node '%s' (id=%s) is offline."
" Remove it from environment and try again." %
(node.name, node.id)
)
node_data = {
'profile': settings.COBBLER_PROFILE,
'power_type': 'ssh',
'power_user': 'root',
'power_address': node.ip,
'name': TaskHelper.make_slave_name(node.id, node.role),
'hostname': node.fqdn,
'name_servers': '\"%s\"' % settings.DNS_SERVERS,
'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
'netboot_enabled': '1',
'ks_meta': {
'puppet_auto_setup': 1,
'puppet_master': settings.PUPPET_MASTER_HOST,
'puppet_version': settings.PUPPET_VERSION,
'puppet_enable': 0,
'mco_auto_setup': 1,
'install_log_2_syslog': 1,
'mco_pskey': settings.MCO_PSKEY,
'mco_vhost': settings.MCO_VHOST,
'mco_host': settings.MCO_HOST,
'mco_user': settings.MCO_USER,
'mco_password': settings.MCO_PASSWORD,
'mco_connector': settings.MCO_CONNECTOR,
'mco_enable': 1,
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
'ks_spaces': "\"%s\"" % json.dumps(
node.attributes.volumes).replace("\"", "\\\"")
}
}
if node.status == "discover":
logger.info(
"Node %s seems booted with bootstrap image",
node.id
)
node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
else:
# If it's not in discover, we expect it to be booted
# in target system.
# TODO: Get rid of expectations!
logger.info(
"Node %s seems booted with real system",
node.id
)
node_data['power_pass'] = settings.PATH_TO_SSH_KEY
# FIXME: move this code (updating) into receiver.provision_resp
if not USE_FAKE:
node.status = "provisioning"
orm().add(node)
orm().commit()
# here we assign admin network IPs for node
# one IP for every node interface
netmanager.assign_admin_ips(
node.id,
len(node.meta.get('interfaces', []))
)
admin_net_id = netmanager.get_admin_network_id()
admin_ips = set([i.ip_addr for i in orm().query(IPAddr).
filter_by(node=node.id).
filter_by(network=admin_net_id)])
for i in node.meta.get('interfaces', []):
if 'interfaces' not in node_data:
node_data['interfaces'] = {}
node_data['interfaces'][i['name']] = {
'mac_address': i['mac'],
'static': '0',
'netmask': settings.ADMIN_NETWORK['netmask'],
'ip_address': admin_ips.pop(),
}
# interfaces_extra field in cobbler ks_meta
# means some extra data for network interfaces
#.........这里部分代码省略.........