当前位置: 首页>>代码示例>>Python>>正文


Python TaskHelper.nodes_to_delete方法代码示例

本文整理汇总了Python中nailgun.task.task.TaskHelper.nodes_to_delete方法的典型用法代码示例。如果您正苦于以下问题:Python TaskHelper.nodes_to_delete方法的具体用法?Python TaskHelper.nodes_to_delete怎么用?Python TaskHelper.nodes_to_delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nailgun.task.task.TaskHelper的用法示例。


在下文中一共展示了TaskHelper.nodes_to_delete方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None,
                force=False, graph_type=None, **kwargs):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )
        try:
            self.check_running_task()
        except errors.TaskAlreadyRunning:
            raise errors.DeploymentAlreadyStarted(
                'Cannot perform the actions because '
                'there are another running tasks.'
            )

        supertask = Task(name=self.deployment_type, cluster=self.cluster,
                         dry_run=is_dry_run(kwargs),
                         status=consts.TASK_STATUSES.pending)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster, force)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        self.ensure_nodes_changed(
            nodes_to_provision, nodes_to_deploy, nodes_to_delete
        )

        db().flush()
        TaskHelper.create_action_log(supertask)

        current_cluster_status = self.cluster.status
        # update cluster status
        if not is_dry_run(kwargs):
            self.cluster.status = consts.CLUSTER_STATUSES.deployment

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks,
            force=force,
            graph_type=graph_type,
            current_cluster_status=current_cluster_status,
            **kwargs
        )

        return supertask
开发者ID:sebrandon1,项目名称:fuel-web,代码行数:57,代码来源:manager.py

示例2: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self.check_no_running_deployment(self.cluster)
        self._remove_obsolete_tasks()

        supertask = Task(name=self.deployment_type, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        db().flush()
        TaskHelper.create_action_log(supertask)

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks
        )

        return supertask
开发者ID:SmartInfrastructures,项目名称:fuel-web-dev,代码行数:48,代码来源:manager.py

示例3: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None,
                force=False, graph_type=None, **kwargs):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        self.check_no_running_deployment(self.cluster)
        self._remove_obsolete_tasks()

        supertask = Task(name=self.deployment_type, cluster=self.cluster,
                         status=consts.TASK_STATUSES.pending)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = nodes_to_provision_deploy or \
            TaskHelper.nodes_to_deploy(self.cluster, force)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        self.ensure_nodes_changed(
            nodes_to_provision, nodes_to_deploy, nodes_to_delete
        )

        db().flush()
        TaskHelper.create_action_log(supertask)

        # we should have task committed for processing in other threads
        db().commit()
        nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
                               if nodes_to_provision_deploy else None)
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
            nodes_to_provision_deploy=nodes_ids_to_deploy,
            deployment_tasks=deployment_tasks,
            force=force,
            graph_type=graph_type
        )

        return supertask
开发者ID:ekorekin,项目名称:fuel-web,代码行数:45,代码来源:manager.py

示例4: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self._remove_obsolete_tasks()

        supertask = Task(name=consts.TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            supertask.id,
        )

        return supertask
开发者ID:vefimova,项目名称:fuel-web,代码行数:41,代码来源:manager.py

示例5: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id,
            )
        )

        current_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
            name="deploy"
        )
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        db().add(self.cluster)
        db().commit()

        supertask = Task(
            name="deploy",
            cluster=self.cluster
        )
        db().add(supertask)
        db().commit()
        if not self.cluster.replaced_provisioning_info \
           and not self.cluster.replaced_deployment_info:
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                return supertask
        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [
                        {"uid": n.id, "platform_name": n.platform_name}
                        for n in nodes_to_provision
                    ]
                )
            except Exception as exc:
                TaskHelper.update_task_status(
                    supertask.uuid,
                    status='error',
                    progress=100,
                    msg=str(exc)
                )
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(
                task_deletion,
                tasks.DeletionTask
            )

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                method_name='message'
            )
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == 'error':
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
#.........这里部分代码省略.........
开发者ID:glacialheart2013,项目名称:fuel-web,代码行数:103,代码来源:manager.py

示例6: _execute_async_content

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def _execute_async_content(self, supertask, deployment_tasks=None,
                               nodes_to_provision_deploy=None, force=False,
                               graph_type=None):
        """Processes supertask async in mule

        :param supertask: SqlAlchemy task object
        """

        nodes_to_delete = []
        affected_nodes = []

        if nodes_to_provision_deploy:
            nodes_to_deploy = objects.NodeCollection.get_by_ids(
                nodes_to_provision_deploy)
            nodes_to_provision = filter(lambda n: any([
                n.pending_addition,
                n.needs_reprovision]),
                nodes_to_deploy)
        else:
            nodes_to_deploy = self.get_nodes_to_deploy(force=force)
            nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
            nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)

        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not (nodes_to_provision_deploy or
                objects.Cluster.get_provisioning_info(self.cluster) or
                objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        if self.cluster.status == consts.CLUSTER_STATUSES.operational:
            # rerun particular tasks on all deployed nodes
            modified_node_ids = {n.id for n in nodes_to_deploy}
            modified_node_ids.update(n.id for n in nodes_to_provision)
            modified_node_ids.update(n.id for n in nodes_to_delete)
            affected_nodes = objects.Cluster.get_nodes_by_status(
                self.cluster,
                status=consts.NODE_STATUSES.ready,
                exclude=modified_node_ids
            ).all()

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = self.delete_nodes(supertask, nodes_to_delete)

        if nodes_to_provision:
            logger.debug("There are nodes to provision: %s",
                         " ".join([objects.Node.get_node_fqdn(n)
                                   for n in nodes_to_provision]))

            # For more accurate progress calculation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                status=consts.TASK_STATUSES.pending,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )
            db().commit()

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        deployment_message = None
        if (nodes_to_deploy or affected_nodes or
                objects.Release.is_lcm_supported(self.cluster.release)):
            if nodes_to_deploy:
                logger.debug("There are nodes to deploy: %s",
                             " ".join((objects.Node.get_node_fqdn(n)
                                       for n in nodes_to_deploy)))
            if affected_nodes:
                logger.debug("There are nodes affected by deployment: %s",
#.........这里部分代码省略.........
开发者ID:ekorekin,项目名称:fuel-web,代码行数:103,代码来源:manager.py

示例7: _execute_async_content

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def _execute_async_content(self, supertask, deployment_tasks=None,
                               nodes_to_provision_deploy=None):
        """Processes supertask async in mule
        :param supertask: SqlAlchemy task object
        """

        nodes_to_delete = []

        if nodes_to_provision_deploy:
            nodes_to_deploy = objects.NodeCollection.get_by_ids(
                nodes_to_provision_deploy)
            nodes_to_provision = filter(lambda n: any([
                n.pending_addition,
                n.needs_reprovision]),
                nodes_to_deploy)
        else:
            nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
            nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
            nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)

        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not(nodes_to_provision_deploy) and \
            (not objects.Cluster.get_provisioning_info(self.cluster) and
                not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = self.delete_nodes(supertask, nodes_to_delete)

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )
            db().commit()

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(
                name=consts.TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                deployment_tasks=deployment_tasks,
                method_name='message'
#.........这里部分代码省略.........
开发者ID:thefuyang,项目名称:fuel-web,代码行数:103,代码来源:manager.py

示例8: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id,
            )
        )
        current_tasks = orm().query(Task).filter_by(
            cluster_id=self.cluster.id,
            name="deploy"
        )
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    orm().delete(subtask)
                orm().delete(task)
                orm().commit()

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)

        if not any([nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        orm().add(self.cluster)
        orm().commit()

        supertask = Task(
            name="deploy",
            cluster=self.cluster
        )
        orm().add(supertask)
        orm().commit()
        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            self._call_silently(
                task_deletion,
                tasks.DeletionTask
            )

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)

            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots systems, so it has extreamly small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                method_name='message'
            )
            task_provision.cache = provision_message
            orm().add(task_provision)
            orm().commit()

            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                method_name='message'
            )
            task_deployment.cache = deployment_message
            orm().add(task_deployment)
            orm().commit()

            rpc.cast('naily', [provision_message, deployment_message])

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
        return supertask
开发者ID:akolinko,项目名称:product,代码行数:81,代码来源:manager.py

示例9: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        current_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
            name='deploy')

        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                db().delete(task)
                db().commit()

        obsolete_tasks = db().query(Task).filter_by(
            cluster_id=self.cluster.id,
        ).filter(
            Task.name.in_([
                'stop_deployment',
                'reset_environment'
            ])
        )
        for task in obsolete_tasks:
            db().delete(task)
        db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        supertask = Task(name='deploy', cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # Run validation if user didn't redefine
        # provisioning and deployment information
        if not self.cluster.replaced_provisioning_info \
           and not self.cluster.replaced_deployment_info:
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            # For more accurate progress calulation
            task_weight = 0.4
            task_deletion = supertask.create_subtask("node_deletion",
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask("provision",
                                                      weight=task_weight)
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == 'error':
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
#.........这里部分代码省略.........
开发者ID:iberezovskiy,项目名称:fuel-web,代码行数:103,代码来源:manager.py

示例10: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))

        current_tasks = db().query(Task).filter_by(cluster_id=self.cluster.id, name="deploy")
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
                db().commit()

        task_messages = []

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = "deployment"
        db().add(self.cluster)
        db().commit()

        supertask = Task(name="deploy", cluster=self.cluster)
        db().add(supertask)
        db().commit()

        # checking admin intersection with untagged
        network_info = NetworkConfigurationSerializer.serialize_for_cluster(self.cluster)
        check_networks = supertask.create_subtask("check_networks")
        self._call_silently(check_networks, tasks.CheckNetworksTask, data=network_info, check_admin_untagged=True)
        db().refresh(check_networks)
        if check_networks.status == "error":
            return supertask
        db().delete(check_networks)
        db().commit()

        # checking prerequisites
        check_before = supertask.create_subtask("check_before_deployment")
        logger.debug("Checking prerequisites task: %s", check_before.uuid)
        self._call_silently(check_before, tasks.CheckBeforeDeploymentTask)
        db().refresh(check_before)
        # if failed to check prerequisites
        # then task is already set to error
        if check_before.status == "error":
            logger.debug("Checking prerequisites failed: %s", check_before.message)
            return supertask
        logger.debug("Checking prerequisites is successful, starting deployment...")
        db().delete(check_before)
        db().commit()

        # in case of Red Hat
        if self.cluster.release.operating_system == "RHEL":
            try:
                redhat_messages = self._redhat_messages(
                    supertask,
                    # provision only?
                    [{"uid": n.id, "platform_name": n.platform_name} for n in nodes_to_provision],
                )
            except Exception as exc:
                TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg=str(exc))
                return supertask
            task_messages.extend(redhat_messages)
        # /in case of Red Hat

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))
            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots it, so it has extremely small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(task_provision, tasks.ProvisionTask, method_name="message")
            db().refresh(task_provision)

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == "error":
                return supertask

            task_provision.cache = provision_message
            db().add(task_provision)
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(task_deployment, tasks.DeploymentTask, method_name="message")
#.........这里部分代码省略.........
开发者ID:nfschina,项目名称:fuelweb,代码行数:103,代码来源:manager.py

示例11: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        #开始执行部署变更
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )
        #显示网络信息
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self._remove_obsolete_tasks() #obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        #如果是openstack环境,就执行原来的判断看集群中是否有节点的变化
        if self.cluster.cluster_type==1:
           if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
              db().rollback()
              raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster) and
                not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                if self.cluster.cluster_type==1:
                   self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass
#.........这里部分代码省略.........
开发者ID:yxh1990,项目名称:fuel-cloudmaster,代码行数:103,代码来源:manager+-+鍓湰.py

示例12: _execute_async_content

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def _execute_async_content(self, supertask):

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        # Run validation if user didn't redefine
        # provisioning and deployment information

        if (not objects.Cluster.get_provisioning_info(self.cluster) and
                not objects.Cluster.get_deployment_info(self.cluster)):
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(
                consts.TASK_NAMES.node_deletion,
                weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(
                task_deletion,
                tasks.DeletionTask,
                tasks.DeletionTask.get_task_nodes_for_cluster(self.cluster))
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(
                consts.TASK_NAMES.provision,
                weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == consts.TASK_STATUSES.error:
                return

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(
                consts.TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                method_name='message'
            )

            task_deployment = objects.Task.get_by_uid(
                task_deployment.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
#.........这里部分代码省略.........
开发者ID:vefimova,项目名称:fuel-web,代码行数:103,代码来源:manager.py

示例13: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        # 开始执行部署变更
        logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))
        # 显示网络信息(openstack部署前执行网络验证)
        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(u"Network info:\n{0}".format(jsonutils.dumps(network_info, indent=4)))

        self._remove_obsolete_tasks()  # obsolete 过时的

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        # 如果是openstack环境,就执行原来流程判断看集群中是否有节点的变化
        if self.cluster.cluster_type == 1:
            if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
                db().rollback()
                raise errors.WrongNodeStatus("No changes to deploy")

        # we should have task committed for processing in other threads
        db().commit()
        TaskHelper.create_action_log(supertask)

        # Run validation if user didn't redefine
        # provisioning and deployment information

        if not objects.Cluster.get_provisioning_info(self.cluster) and not objects.Cluster.get_deployment_info(
            self.cluster
        ):
            try:
                if self.cluster.cluster_type == 1:
                    self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion, weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)

            self._call_silently(task_deletion, tasks.DeletionTask)
            # we should have task committed for processing in other threads
            db().commit()

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(nodes_to_provision)
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision, weight=task_weight)

            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision, tasks.ProvisionTask, nodes_to_provision, method_name="message"
            )

            task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True)
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)
        else:
            pass

        # nodes_to_deploy=self.cluster.nodes
        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)

            # we should have task committed for processing in other threads
            db().commit()
            deployment_message = self._call_silently(
                task_deployment, tasks.DeploymentTask, nodes_to_deploy, method_name="message"
            )

            # clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
            # db().add(clusterdeploymsg)
#.........这里部分代码省略.........
开发者ID:yxh1990,项目名称:fuel-cloudmaster,代码行数:103,代码来源:manager.py

示例14: execute

# 需要导入模块: from nailgun.task.task import TaskHelper [as 别名]
# 或者: from nailgun.task.task.TaskHelper import nodes_to_delete [as 别名]
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id
            )
        )

        network_info = self.serialize_network_cfg(self.cluster)
        logger.info(
            u"Network info:\n{0}".format(
                jsonutils.dumps(network_info, indent=4)
            )
        )

        self._remove_obsolete_tasks()

        supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
        db().add(supertask)
        # we should have task committed for processing in other threads
        db().commit()

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
        nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)

        task_messages = []
        if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
            db().rollback()
            raise errors.WrongNodeStatus("No changes to deploy")

        # Run validation if user didn't redefine
        # provisioning and deployment information
        if not self.cluster.replaced_provisioning_info and \
                not self.cluster.replaced_deployment_info:
            try:
                self.check_before_deployment(supertask)
            except errors.CheckBeforeDeploymentError:
                db().commit()
                return supertask

        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # For more accurate progress calculation
            task_weight = 0.4
            task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion,
                                                     weight=task_weight)
            logger.debug("Launching deletion task: %s", task_deletion.uuid)
            # we should have task committed for processing in other threads
            db().commit()
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_provision:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # updating nodes
            nodes_to_provision = objects.NodeCollection.lock_nodes(
                nodes_to_provision
            )
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
            logger.debug("There are nodes to provision: %s",
                         " ".join([n.fqdn for n in nodes_to_provision]))

            # For more accurate progress calulation
            task_weight = 0.4
            task_provision = supertask.create_subtask(TASK_NAMES.provision,
                                                      weight=task_weight)
            # we should have task committed for processing in other threads
            db().commit()
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                nodes_to_provision,
                method_name='message'
            )

            task_provision = objects.Task.get_by_uid(
                task_provision.id,
                fail_if_not_found=True,
                lock_for_update=True
            )
            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_provision.status == TASK_STATUSES.error:
                return supertask

            task_provision.cache = provision_message
            db().commit()
            task_messages.append(provision_message)

        if nodes_to_deploy:
            objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
            # locking nodes before updating
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = supertask.create_subtask(TASK_NAMES.deployment)
            # we should have task committed for processing in other threads
#.........这里部分代码省略.........
开发者ID:igajsin,项目名称:fuel-web,代码行数:103,代码来源:manager.py


注:本文中的nailgun.task.task.TaskHelper.nodes_to_delete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。