本文整理汇总了Python中nailgun.db.sqlalchemy.models.Task.create_subtask方法的典型用法代码示例。如果您正苦于以下问题:Python Task.create_subtask方法的具体用法?Python Task.create_subtask怎么用?Python Task.create_subtask使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nailgun.db.sqlalchemy.models.Task
的用法示例。
在下文中一共展示了Task.create_subtask方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_proper_progress_calculation
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def test_proper_progress_calculation(self):
supertask = Task(uuid=str(uuid.uuid4()), name="super", status="running")
self.db.add(supertask)
self.db.commit()
subtask_weight = 0.4
task_deletion = supertask.create_subtask("node_deletion", weight=subtask_weight)
task_provision = supertask.create_subtask("provision", weight=subtask_weight)
subtask_progress = random.randint(1, 20)
deletion_kwargs = {"task_uuid": task_deletion.uuid, "progress": subtask_progress, "status": "running"}
provision_kwargs = {"task_uuid": task_provision.uuid, "progress": subtask_progress, "status": "running"}
self.receiver.provision_resp(**provision_kwargs)
self.db.commit()
self.receiver.remove_nodes_resp(**deletion_kwargs)
self.db.commit()
self.db.refresh(task_deletion)
self.db.refresh(task_provision)
self.db.refresh(supertask)
calculated_progress = helpers.TaskHelper.calculate_parent_task_progress([task_deletion, task_provision])
self.assertEqual(supertask.progress, calculated_progress)
示例2: test_node_deletion_subtask_progress
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def test_node_deletion_subtask_progress(self):
supertask = Task(
uuid=str(uuid.uuid4()),
name="super",
status="running"
)
self.db.add(supertask)
self.db.commit()
task_deletion = supertask.create_subtask("node_deletion")
task_provision = supertask.create_subtask("provision", weight=0.4)
subtask_progress = random.randint(1, 20)
deletion_kwargs = {'task_uuid': task_deletion.uuid,
'progress': subtask_progress}
provision_kwargs = {'task_uuid': task_provision.uuid,
'progress': subtask_progress}
def progress_difference():
self.receiver.provision_resp(**provision_kwargs)
self.db.refresh(task_provision)
self.assertEqual(task_provision.progress, subtask_progress)
self.db.refresh(supertask)
progress_before_delete_subtask = supertask.progress
self.receiver.remove_nodes_resp(**deletion_kwargs)
self.db.refresh(task_deletion)
self.assertEqual(task_deletion.progress, subtask_progress)
self.db.refresh(supertask)
progress_after_delete_subtask = supertask.progress
return abs(progress_after_delete_subtask -
progress_before_delete_subtask)
without_coeff = progress_difference()
task_deletion.progress = 0
task_deletion.weight = 0.5
self.db.merge(task_deletion)
task_provision.progress = 0
self.db.merge(task_provision)
supertask.progress = 0
self.db.merge(supertask)
self.db.commit()
with_coeff = progress_difference()
# some freaking magic is here but haven't found
# better way to test what is already working
self.assertTrue((without_coeff / with_coeff) < 2)
示例3: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self, force=False, **kwargs):
try:
self.clear_tasks_history(force=force)
except errors.TaskAlreadyRunning:
raise errors.DeploymentAlreadyStarted(
"Can't reset environment '{0}' when "
"running deployment task exists.".format(
self.cluster.id
)
)
# FIXME(aroma): remove updating of 'deployed_before'
# when stop action is reworked. 'deployed_before'
# flag identifies whether stop action is allowed for the
# cluster. Please, refer to [1] for more details.
# [1]: https://bugs.launchpad.net/fuel/+bug/1529691
objects.Cluster.set_deployed_before_flag(self.cluster, value=False)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt
)
for node in nodes:
objects.Node.reset_vms_created_state(node)
objects.ClusterPluginLinkCollection.delete_by_cluster_id(
self.cluster.id)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
reset_nodes = supertask.create_subtask(
consts.TASK_NAMES.reset_nodes
)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.remove_keys
)
remove_ironic_bootstrap_task = supertask.create_subtask(
consts.TASK_NAMES.remove_ironic_bootstrap
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(reset_nodes),
tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例4: test_task_contains_field_parent
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def test_task_contains_field_parent(self):
parent_task = Task(
name=consts.TASK_NAMES.deployment,
cluster=self.cluster_db,
status=consts.TASK_STATUSES.running,
progress=10
)
child_task = parent_task.create_subtask(
name=consts.TASK_NAMES.deployment,
status=consts.TASK_STATUSES.running,
progress=10
)
cluster_tasks = self.app.get(
reverse(
'TaskCollectionHandler',
kwargs={'cluster_id': self.cluster_db.id}
),
headers=self.default_headers
).json_body
child_task_data = next(
t for t in cluster_tasks if t['id'] == child_task.id
)
self.assertEqual(parent_task.id, child_task_data['parent_id'])
parent_task_data = next(
t for t in cluster_tasks if t['id'] == parent_task.id
)
self.assertIsNone(parent_task_data['parent_id'])
示例5: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self):
deploy_running = db().query(Task).filter_by(
cluster=self.cluster,
name=consts.TASK_NAMES.deploy,
status='running'
).first()
if deploy_running:
raise errors.DeploymentAlreadyStarted(
u"Can't reset environment '{0}' when "
u"deployment is running".format(
self.cluster.id
)
)
obsolete_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
).filter(
Task.name.in_([
consts.TASK_NAMES.deploy,
consts.TASK_NAMES.deployment,
consts.TASK_NAMES.stop_deployment
])
)
for task in obsolete_tasks:
db().delete(task)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
for node in nodes:
objects.Node.reset_vms_created_state(node)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.reset_environment
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(supertask),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例6: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self):
logger.info(
u"Trying to start deployment at cluster '{0}'".format(
self.cluster.name or self.cluster.id
)
)
network_info = self.serialize_network_cfg(self.cluster)
logger.info(
u"Network info:\n{0}".format(
json.dumps(network_info, indent=4)
)
)
current_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
name='deploy')
for task in current_tasks:
if task.status == "running":
raise errors.DeploymentAlreadyStarted()
elif task.status in ("ready", "error"):
db().delete(task)
db().commit()
obsolete_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
).filter(
Task.name.in_([
'stop_deployment',
'reset_environment'
])
)
for task in obsolete_tasks:
db().delete(task)
db().commit()
task_messages = []
nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
raise errors.WrongNodeStatus("No changes to deploy")
supertask = Task(name='deploy', cluster=self.cluster)
db().add(supertask)
db().commit()
# Run validation if user didn't redefine
# provisioning and deployment information
if not self.cluster.replaced_provisioning_info \
and not self.cluster.replaced_deployment_info:
try:
self.check_before_deployment(supertask)
except errors.CheckBeforeDeploymentError:
return supertask
# in case of Red Hat
if self.cluster.release.operating_system == "RHEL":
try:
redhat_messages = self._redhat_messages(
supertask,
# provision only?
[
{"uid": n.id, "platform_name": n.platform_name}
for n in nodes_to_provision
]
)
except Exception as exc:
TaskHelper.update_task_status(
supertask.uuid,
status='error',
progress=100,
msg=str(exc)
)
return supertask
task_messages.extend(redhat_messages)
# /in case of Red Hat
task_deletion, task_provision, task_deployment = None, None, None
if nodes_to_delete:
task_deletion = supertask.create_subtask("node_deletion")
logger.debug("Launching deletion task: %s", task_deletion.uuid)
self._call_silently(task_deletion, tasks.DeletionTask)
if nodes_to_provision:
TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
logger.debug("There are nodes to provision: %s",
" ".join([n.fqdn for n in nodes_to_provision]))
task_provision = supertask.create_subtask("provision")
# For more accurate progress calulation
task_provision.weight = 0.4
provision_message = self._call_silently(
task_provision,
tasks.ProvisionTask,
nodes_to_provision,
method_name='message'
#.........这里部分代码省略.........
示例7: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self, **kwargs):
# FIXME(aroma): remove updating of 'deployed_before'
# when stop action is reworked. 'deployed_before'
# flag identifies whether stop action is allowed for the
# cluster. Please, refer to [1] for more details.
# [1]: https://bugs.launchpad.net/fuel/+bug/1529691
objects.Cluster.set_deployed_before_flag(self.cluster, value=False)
deploy_running = db().query(Task).filter_by(
cluster=self.cluster,
name=consts.TASK_NAMES.deploy,
status='running'
).first()
if deploy_running:
raise errors.DeploymentAlreadyStarted(
u"Can't reset environment '{0}' when "
u"deployment is running".format(
self.cluster.id
)
)
obsolete_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
).filter(
Task.name.in_([
consts.TASK_NAMES.deploy,
consts.TASK_NAMES.deployment,
consts.TASK_NAMES.stop_deployment
])
)
for task in obsolete_tasks:
db().delete(task)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
for node in nodes:
objects.Node.reset_vms_created_state(node)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.reset_environment
)
remove_ironic_bootstrap_task = supertask.create_subtask(
consts.TASK_NAMES.reset_environment
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(supertask),
tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例8: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self, nodes_to_delete, mclient_remove=True):
cluster_id = None
if hasattr(self, 'cluster'):
cluster_id = self.cluster.id
objects.TaskCollection.lock_cluster_tasks(cluster_id)
logger.info("Trying to execute node deletion task with nodes %s",
', '.join(str(node.id) for node in nodes_to_delete))
self.verify_nodes_with_cluster(nodes_to_delete)
objects.NodeCollection.lock_nodes(nodes_to_delete)
if cluster_id is None:
# DeletionTask operates on cluster's nodes.
# Nodes that are not in cluster are simply deleted.
objects.NodeCollection.delete_by_ids([
n.id for n in nodes_to_delete])
db().flush()
task = Task(name=consts.TASK_NAMES.node_deletion,
progress=100,
status=consts.TASK_STATUSES.ready)
db().add(task)
db().flush()
return task
task = Task(name=consts.TASK_NAMES.node_deletion,
cluster=self.cluster)
db().add(task)
for node in nodes_to_delete:
objects.Node.update(node,
{'status': consts.NODE_STATUSES.removing})
db().flush()
nodes_to_deploy = []
objects.Cluster.adjust_nodes_lists_on_controller_removing(
self.cluster, nodes_to_delete, nodes_to_deploy)
if nodes_to_deploy:
objects.NodeCollection.lock_nodes(nodes_to_deploy)
# updating nodes
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
logger.debug("There are nodes to deploy: %s",
" ".join([n.fqdn for n in nodes_to_deploy]))
task_deployment = task.create_subtask(
consts.TASK_NAMES.deployment)
deployment_message = self._call_silently(
task_deployment,
tasks.DeploymentTask,
nodes_to_deploy,
method_name='message'
)
db().flush()
# if failed to generate task message for orchestrator
# then task is already set to error
if task_deployment.status == consts.TASK_STATUSES.error:
return task_deployment
rpc.cast('naily', [deployment_message])
db().commit()
self._call_silently(
task,
tasks.DeletionTask,
nodes=tasks.DeletionTask.prepare_nodes_for_task(
nodes_to_delete, mclient_remove=mclient_remove))
return task
示例9: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self):
#开始执行部署变更
logger.info(
u"Trying to start deployment at cluster '{0}'".format(
self.cluster.name or self.cluster.id
)
)
#显示网络信息
network_info = self.serialize_network_cfg(self.cluster)
logger.info(
u"Network info:\n{0}".format(
jsonutils.dumps(network_info, indent=4)
)
)
self._remove_obsolete_tasks() #obsolete 过时的
supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
db().add(supertask)
nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
task_messages = []
#如果是openstack环境,就执行原来的判断看集群中是否有节点的变化
if self.cluster.cluster_type==1:
if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
db().rollback()
raise errors.WrongNodeStatus("No changes to deploy")
# we should have task committed for processing in other threads
db().commit()
TaskHelper.create_action_log(supertask)
# Run validation if user didn't redefine
# provisioning and deployment information
if (not objects.Cluster.get_provisioning_info(self.cluster) and
not objects.Cluster.get_deployment_info(self.cluster)):
try:
if self.cluster.cluster_type==1:
self.check_before_deployment(supertask)
except errors.CheckBeforeDeploymentError:
db().commit()
return supertask
task_deletion, task_provision, task_deployment = None, None, None
if nodes_to_delete:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# For more accurate progress calculation
task_weight = 0.4
task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
weight=task_weight)
logger.debug("Launching deletion task: %s", task_deletion.uuid)
self._call_silently(task_deletion, tasks.DeletionTask)
# we should have task committed for processing in other threads
db().commit()
if nodes_to_provision:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# updating nodes
nodes_to_provision = objects.NodeCollection.lock_nodes(
nodes_to_provision
)
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
logger.debug("There are nodes to provision: %s",
" ".join([n.fqdn for n in nodes_to_provision]))
# For more accurate progress calulation
task_weight = 0.4
task_provision = supertask.create_subtask(TASK_NAMES.provision,
weight=task_weight)
# we should have task committed for processing in other threads
db().commit()
provision_message = self._call_silently(
task_provision,
tasks.ProvisionTask,
nodes_to_provision,
method_name='message'
)
task_provision = objects.Task.get_by_uid(
task_provision.id,
fail_if_not_found=True,
lock_for_update=True
)
# if failed to generate task message for orchestrator
# then task is already set to error
if task_provision.status == TASK_STATUSES.error:
return supertask
task_provision.cache = provision_message
db().commit()
task_messages.append(provision_message)
else:
pass
#.........这里部分代码省略.........
示例10: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self):
# 开始执行部署变更
logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))
# 显示网络信息(openstack部署前执行网络验证)
network_info = self.serialize_network_cfg(self.cluster)
logger.info(u"Network info:\n{0}".format(jsonutils.dumps(network_info, indent=4)))
self._remove_obsolete_tasks() # obsolete 过时的
supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
db().add(supertask)
nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
task_messages = []
# 如果是openstack环境,就执行原来流程判断看集群中是否有节点的变化
if self.cluster.cluster_type == 1:
if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
db().rollback()
raise errors.WrongNodeStatus("No changes to deploy")
# we should have task committed for processing in other threads
db().commit()
TaskHelper.create_action_log(supertask)
# Run validation if user didn't redefine
# provisioning and deployment information
if not objects.Cluster.get_provisioning_info(self.cluster) and not objects.Cluster.get_deployment_info(
self.cluster
):
try:
if self.cluster.cluster_type == 1:
self.check_before_deployment(supertask)
except errors.CheckBeforeDeploymentError:
db().commit()
return supertask
task_deletion, task_provision, task_deployment = None, None, None
if nodes_to_delete:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# For more accurate progress calculation
task_weight = 0.4
task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion, weight=task_weight)
logger.debug("Launching deletion task: %s", task_deletion.uuid)
self._call_silently(task_deletion, tasks.DeletionTask)
# we should have task committed for processing in other threads
db().commit()
if nodes_to_provision:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# updating nodes
nodes_to_provision = objects.NodeCollection.lock_nodes(nodes_to_provision)
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))
# For more accurate progress calulation
task_weight = 0.4
task_provision = supertask.create_subtask(TASK_NAMES.provision, weight=task_weight)
# we should have task committed for processing in other threads
db().commit()
provision_message = self._call_silently(
task_provision, tasks.ProvisionTask, nodes_to_provision, method_name="message"
)
task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True)
# if failed to generate task message for orchestrator
# then task is already set to error
if task_provision.status == TASK_STATUSES.error:
return supertask
task_provision.cache = provision_message
db().commit()
task_messages.append(provision_message)
else:
pass
# nodes_to_deploy=self.cluster.nodes
if nodes_to_deploy:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# locking nodes before updating
objects.NodeCollection.lock_nodes(nodes_to_deploy)
# updating nodes
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
task_deployment = supertask.create_subtask(TASK_NAMES.deployment)
# we should have task committed for processing in other threads
db().commit()
deployment_message = self._call_silently(
task_deployment, tasks.DeploymentTask, nodes_to_deploy, method_name="message"
)
# clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
# db().add(clusterdeploymsg)
#.........这里部分代码省略.........
示例11: execute
# 需要导入模块: from nailgun.db.sqlalchemy.models import Task [as 别名]
# 或者: from nailgun.db.sqlalchemy.models.Task import create_subtask [as 别名]
def execute(self):
logger.info(
u"Trying to start deployment at cluster '{0}'".format(
self.cluster.name or self.cluster.id
)
)
network_info = self.serialize_network_cfg(self.cluster)
logger.info(
u"Network info:\n{0}".format(
jsonutils.dumps(network_info, indent=4)
)
)
self._remove_obsolete_tasks()
supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
db().add(supertask)
# we should have task committed for processing in other threads
db().commit()
nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
task_messages = []
if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
db().rollback()
raise errors.WrongNodeStatus("No changes to deploy")
# Run validation if user didn't redefine
# provisioning and deployment information
if not self.cluster.replaced_provisioning_info and \
not self.cluster.replaced_deployment_info:
try:
self.check_before_deployment(supertask)
except errors.CheckBeforeDeploymentError:
db().commit()
return supertask
task_deletion, task_provision, task_deployment = None, None, None
if nodes_to_delete:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# For more accurate progress calculation
task_weight = 0.4
task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
weight=task_weight)
logger.debug("Launching deletion task: %s", task_deletion.uuid)
# we should have task committed for processing in other threads
db().commit()
self._call_silently(task_deletion, tasks.DeletionTask)
if nodes_to_provision:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# updating nodes
nodes_to_provision = objects.NodeCollection.lock_nodes(
nodes_to_provision
)
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
logger.debug("There are nodes to provision: %s",
" ".join([n.fqdn for n in nodes_to_provision]))
# For more accurate progress calulation
task_weight = 0.4
task_provision = supertask.create_subtask(TASK_NAMES.provision,
weight=task_weight)
# we should have task committed for processing in other threads
db().commit()
provision_message = self._call_silently(
task_provision,
tasks.ProvisionTask,
nodes_to_provision,
method_name='message'
)
task_provision = objects.Task.get_by_uid(
task_provision.id,
fail_if_not_found=True,
lock_for_update=True
)
# if failed to generate task message for orchestrator
# then task is already set to error
if task_provision.status == TASK_STATUSES.error:
return supertask
task_provision.cache = provision_message
db().commit()
task_messages.append(provision_message)
if nodes_to_deploy:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# locking nodes before updating
objects.NodeCollection.lock_nodes(nodes_to_deploy)
# updating nodes
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
logger.debug("There are nodes to deploy: %s",
" ".join([n.fqdn for n in nodes_to_deploy]))
task_deployment = supertask.create_subtask(TASK_NAMES.deployment)
# we should have task committed for processing in other threads
#.........这里部分代码省略.........