本文整理汇总了Python中nailgun.task.task.TaskHelper类的典型用法代码示例。如果您正苦于以下问题:Python TaskHelper类的具体用法?Python TaskHelper怎么用?Python TaskHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TaskHelper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute
def execute(self, data, check_admin_untagged=False):
check_networks = db().query(Task).filter_by(
cluster=self.cluster,
name="check_networks"
).first()
if check_networks:
db().delete(check_networks)
db().commit()
task = Task(
name="check_networks",
cluster=self.cluster
)
db().add(task)
db().commit()
self._call_silently(
task,
tasks.CheckNetworksTask,
data,
check_admin_untagged
)
db().refresh(task)
if task.status == 'running':
TaskHelper.update_task_status(
task.uuid,
status="ready",
progress=100
)
return task
示例2: _call_silently
def _call_silently(self, task, instance, *args, **kwargs):
# create action_log for task
al = TaskHelper.create_action_log(task)
method = getattr(instance, kwargs.pop('method_name', 'execute'))
if task.status == consts.TASK_STATUSES.error:
TaskHelper.update_action_log(task, al)
return
try:
to_return = method(task, *args, **kwargs)
# update action_log instance for task
# for asynchronous task it will be not final update
# as they also are updated in rpc receiver
TaskHelper.update_action_log(task, al)
return to_return
except errors.NoChanges as e:
self._finish_task(task, al, consts.TASK_STATUSES.ready, str(e))
except Exception as exc:
if any([
not hasattr(exc, "log_traceback"),
hasattr(exc, "log_traceback") and exc.log_traceback
]):
logger.error(traceback.format_exc())
self._finish_task(task, al, consts.TASK_STATUSES.error, str(exc))
示例3: check_before_deployment
def check_before_deployment(self, supertask):
# checking admin intersection with untagged
network_info = self.serialize_network_cfg(self.cluster)
network_info["networks"] = [n for n in network_info["networks"] if n["name"] != "fuelweb_admin"]
check_networks = supertask.create_subtask(TASK_NAMES.check_networks)
self._call_silently(check_networks, tasks.CheckNetworksTask, data=network_info, check_admin_untagged=True)
if check_networks.status == TASK_STATUSES.error:
logger.warning("Checking networks failed: %s", check_networks.message)
raise errors.CheckBeforeDeploymentError(check_networks.message)
TaskHelper.set_ready_if_not_finished(check_networks)
db().delete(check_networks)
db().refresh(supertask)
db().flush()
# checking prerequisites
check_before = supertask.create_subtask(TASK_NAMES.check_before_deployment)
logger.debug("Checking prerequisites task: %s", check_before.uuid)
self._call_silently(check_before, tasks.CheckBeforeDeploymentTask)
# if failed to check prerequisites
# then task is already set to error
if check_before.status == TASK_STATUSES.error:
logger.warning("Checking prerequisites failed: %s", check_before.message)
raise errors.CheckBeforeDeploymentError(check_before.message)
logger.debug("Checking prerequisites is successful, starting deployment...")
TaskHelper.set_ready_if_not_finished(check_before)
db().delete(check_before)
db().refresh(supertask)
db().flush()
示例4: execute
def execute(self, data, check_admin_untagged=False):
task = Task(name="check_networks", cluster=self.cluster)
db().add(task)
db().commit()
self._call_silently(task, tasks.CheckNetworksTask, data, check_admin_untagged)
db().refresh(task)
if task.status == "running":
TaskHelper.update_task_status(task.uuid, status="ready", progress=100)
return task
示例5: execute
def execute(self, force=False, **kwargs):
try:
self.clear_tasks_history(force=force)
except errors.TaskAlreadyRunning:
raise errors.DeploymentAlreadyStarted(
"Can't reset environment '{0}' when "
"running deployment task exists.".format(
self.cluster.id
)
)
# FIXME(aroma): remove updating of 'deployed_before'
# when stop action is reworked. 'deployed_before'
# flag identifies whether stop action is allowed for the
# cluster. Please, refer to [1] for more details.
# [1]: https://bugs.launchpad.net/fuel/+bug/1529691
objects.Cluster.set_deployed_before_flag(self.cluster, value=False)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt
)
for node in nodes:
objects.Node.reset_vms_created_state(node)
objects.ClusterPluginLinkCollection.delete_by_cluster_id(
self.cluster.id)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
reset_nodes = supertask.create_subtask(
consts.TASK_NAMES.reset_nodes
)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.remove_keys
)
remove_ironic_bootstrap_task = supertask.create_subtask(
consts.TASK_NAMES.remove_ironic_bootstrap
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(reset_nodes),
tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例6: execute
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None,
force=False, graph_type=None, **kwargs):
logger.info(
u"Trying to start deployment at cluster '{0}'".format(
self.cluster.name or self.cluster.id
)
)
try:
self.check_running_task()
except errors.TaskAlreadyRunning:
raise errors.DeploymentAlreadyStarted(
'Cannot perform the actions because '
'there are another running tasks.'
)
supertask = Task(name=self.deployment_type, cluster=self.cluster,
dry_run=is_dry_run(kwargs),
status=consts.TASK_STATUSES.pending)
db().add(supertask)
nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
nodes_to_deploy = nodes_to_provision_deploy or \
TaskHelper.nodes_to_deploy(self.cluster, force)
nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
self.ensure_nodes_changed(
nodes_to_provision, nodes_to_deploy, nodes_to_delete
)
db().flush()
TaskHelper.create_action_log(supertask)
current_cluster_status = self.cluster.status
# update cluster status
if not is_dry_run(kwargs):
self.cluster.status = consts.CLUSTER_STATUSES.deployment
# we should have task committed for processing in other threads
db().commit()
nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
if nodes_to_provision_deploy else None)
mule.call_task_manager_async(
self.__class__,
'_execute_async',
self.cluster.id,
supertask.id,
nodes_to_provision_deploy=nodes_ids_to_deploy,
deployment_tasks=deployment_tasks,
force=force,
graph_type=graph_type,
current_cluster_status=current_cluster_status,
**kwargs
)
return supertask
示例7: _call_silently
def _call_silently(self, task, instance, *args, **kwargs):
method = getattr(instance, kwargs.pop("method_name", "execute"))
if task.status == "error":
return
try:
return method(task, *args, **kwargs)
except Exception as exc:
err = str(exc)
if any([not hasattr(exc, "log_traceback"), hasattr(exc, "log_traceback") and exc.log_traceback]):
logger.error(traceback.format_exc())
TaskHelper.update_task_status(task.uuid, status="error", progress=100, msg=err)
示例8: execute
def execute(self):
deploy_running = db().query(Task).filter_by(
cluster=self.cluster,
name=consts.TASK_NAMES.deploy,
status='running'
).first()
if deploy_running:
raise errors.DeploymentAlreadyStarted(
u"Can't reset environment '{0}' when "
u"deployment is running".format(
self.cluster.id
)
)
obsolete_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
).filter(
Task.name.in_([
consts.TASK_NAMES.deploy,
consts.TASK_NAMES.deployment,
consts.TASK_NAMES.stop_deployment
])
)
for task in obsolete_tasks:
db().delete(task)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
for node in nodes:
objects.Node.reset_vms_created_state(node)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.reset_environment
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(supertask),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例9: _finish_task
def _finish_task(self, task, log_item, status, message):
data = {'status': status, 'progress': 100, 'message': message}
# update task entity with given data
objects.Task.update(task, data)
# NOTE(romcheg): Flushing the data is required to unlock
# tasks in order to temporary fix issues with
# the deadlock detection query in tests and let the tests pass.
# TODO(akislitsky): Get rid of this flush as soon as
# task locking issues are resolved.
db().flush()
TaskHelper.update_action_log(task, log_item)
db().commit()
示例10: execute
def execute(self):
task = Task(
name='check_before_deployment',
cluster=self.cluster
)
db().add(task)
db().commit()
self._call_silently(task, tasks.CheckBeforeDeploymentTask)
db().refresh(task)
if task.status == 'running':
TaskHelper.update_task_status(
task.uuid, status="ready", progress=100)
return task
示例11: create_action_log
def create_action_log(self, task_instance, operation_nodes):
create_kwargs = TaskHelper.prepare_action_log_kwargs(
task_instance,
operation_nodes
)
objects.ActionLog.create(create_kwargs)
示例12: execute
def execute(self, nodes_to_provision_deploy=None, deployment_tasks=None):
logger.info(
u"Trying to start deployment at cluster '{0}'".format(
self.cluster.name or self.cluster.id
)
)
network_info = self.serialize_network_cfg(self.cluster)
logger.info(
u"Network info:\n{0}".format(
jsonutils.dumps(network_info, indent=4)
)
)
self.check_no_running_deployment(self.cluster)
self._remove_obsolete_tasks()
supertask = Task(name=self.deployment_type, cluster=self.cluster)
db().add(supertask)
nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
nodes_to_deploy = nodes_to_provision_deploy or \
TaskHelper.nodes_to_deploy(self.cluster)
nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
db().rollback()
raise errors.WrongNodeStatus("No changes to deploy")
db().flush()
TaskHelper.create_action_log(supertask)
# we should have task committed for processing in other threads
db().commit()
nodes_ids_to_deploy = ([node.id for node in nodes_to_provision_deploy]
if nodes_to_provision_deploy else None)
mule.call_task_manager_async(
self.__class__,
'_execute_async',
self.cluster.id,
supertask.id,
nodes_to_provision_deploy=nodes_ids_to_deploy,
deployment_tasks=deployment_tasks
)
return supertask
示例13: make_deploy_msgs
def make_deploy_msgs(self,cluster,supertask,deploymsg,status):
#ebs_rolelist=["gangliasrv","nagiossrv","gangliacli","nagioscli"]
task_messages=[]
ebs_rolelist=["keepalived","onecmdb","activemq","redis","nagios","ganglia","mysql","cloudmaster","clmrs","octopus","cmagent","x86master","rabbitmq","cmgather","monitor","AlarmApp","autoAlarm","ceilometer","PerformancePM","notify"]
if status== 1:
ebs_rolelist=["notify","PerformancePM","ceilometer","autoAlarm","AlarmApp","monitor","cmgather","rabbitmq","x86master","cmagent","octopus","clmrs","cloudmaster","mysql","ganglia","nagios","redis","activemq","onecmdb","keepalived"]
#获取当前集群下所有处于已就绪的节点
nodes_to_startorstop=TaskHelper.nodes_to_startorstop(cluster)
#获取所有节点的所有角色集合
nodes_roles=[]
for node in nodes_to_startorstop:
nodes_roles.extend(node.roles)
nodes_roles=list(set(nodes_roles))
logger.info(deploymsg)
for role in nodes_roles:
if role in ebs_rolelist:
task_deployment = supertask.create_subtask(TASK_NAMES.deployment)
db().commit()
newdeploymsg=copy.deepcopy(deploymsg)
newdeploymsg['respond_to']="start_stop_resp"
newdeploymsg['args']['task_uuid']=task_deployment.uuid
deployment_infos=[]
for deployment_info in deploymsg['args']['deployment_info']:
if deployment_info["role"] != role:
newdeploymsg['args']['deployment_info'].remove(deployment_info)
else:
if status == 2:
deployment_info[role]['action']="start"
logger.info(u"匹配到角色{0},节点id {1},开始启动...".format(role,deployment_info["ip"]))
else:
deployment_info[role]['action']="stop"
logger.info(u"匹配到角色{0},节点id {1},开始停止...".format(role,deployment_info["ip"]))
deployment_infos.append(deployment_info)
newdeploymsg['args']['deployment_info']=deployment_infos
task_messages.append(newdeploymsg)
task_deployment = objects.Task.get_by_uid(
task_deployment.id,
fail_if_not_found=True,
lock_for_update=True
)
# if failed to generate task message for orchestrator
# then task is already set to error
if task_deployment.status == TASK_STATUSES.error:
return supertask
task_deployment.cache = newdeploymsg
db().commit()
self.update_cluster_role_status(cluster,role,status)
new_task_messages=[]
logger.info(len(task_messages))
for ebsrole in ebs_rolelist:
for task in task_messages:
if task['args']['deployment_info'][0]['role'] == ebsrole:
new_task_messages.append(task)
return new_task_messages
示例14: _call_silently
def _call_silently(self, task, instance, *args, **kwargs):
# create action_log for task
al = TaskHelper.create_action_log(task)
method = getattr(instance, kwargs.pop('method_name', 'execute'))
if task.status == TASK_STATUSES.error:
TaskHelper.update_action_log(task, al)
return
try:
to_return = method(task, *args, **kwargs)
# update action_log instance for task
# for asynchronous task it will be not final update
# as they also are updated in rpc receiver
TaskHelper.update_action_log(task, al)
return to_return
except Exception as exc:
err = str(exc)
if any([
not hasattr(exc, "log_traceback"),
hasattr(exc, "log_traceback") and exc.log_traceback
]):
logger.error(traceback.format_exc())
# update task entity with given data
data = {'status': 'error',
'progress': 100,
'message': err}
objects.Task.update(task, data)
TaskHelper.update_action_log(task, al)
示例15: _redhat_messages
def _redhat_messages(self, supertask, nodes_info):
account = db().query(RedHatAccount).first()
if not account:
TaskHelper.update_task_status(supertask.uuid, status="error", progress=100, msg="RHEL account is not found")
return supertask
rhel_data = {
"release_id": supertask.cluster.release.id,
"release_name": supertask.cluster.release.name,
"redhat": {
"license_type": account.license_type,
"username": account.username,
"password": account.password,
"satellite": account.satellite,
"activation_key": account.activation_key,
},
}
subtasks = [
supertask.create_subtask("redhat_check_credentials"),
supertask.create_subtask("redhat_check_licenses"),
]
map(lambda t: setattr(t, "weight", 0.01), subtasks)
db().commit()
subtask_messages = [
self._call_silently(subtasks[0], tasks.RedHatCheckCredentialsTask, rhel_data, method_name="message"),
self._call_silently(
subtasks[1], tasks.RedHatCheckLicensesTask, rhel_data, nodes_info, method_name="message"
),
]
for task, message in zip(subtasks, subtask_messages):
task.cache = message
db().commit()
map(db().refresh, subtasks)
for task in subtasks:
if task.status == "error":
raise errors.RedHatSetupError(task.message)
return subtask_messages