本文整理汇总了Python中nailgun.rpc.cast函数的典型用法代码示例。如果您正苦于以下问题:Python cast函数的具体用法?Python cast怎么用?Python cast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cast函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute
def execute(cls, task):
logger.debug("DeploymentTask.execute(task=%s)" % task.uuid)
message = cls.message(task)
task.cache = message
orm().add(task)
orm().commit()
rpc.cast('naily', message)
示例2: execute
def execute(cls, task):
logger.debug("ProvisionTask.execute(task=%s)" % task.uuid)
message = cls.message(task)
task.cache = message
db().add(task)
db().commit()
rpc.cast('naily', message)
示例3: execute
def execute(self, nodes_to_provision):
"""Run provisioning task on specified nodes
"""
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
logger.debug('Nodes to provision: {0}'.format(
' '.join([n.fqdn for n in nodes_to_provision])))
task_provision = Task(name='provision', cluster=self.cluster)
db().add(task_provision)
db().commit()
provision_message = self._call_silently(
task_provision,
tasks.ProvisionTask,
nodes_to_provision,
method_name='message'
)
db().refresh(task_provision)
task_provision.cache = provision_message
for node in nodes_to_provision:
node.pending_addition = False
node.status = 'provisioning'
node.progress = 0
db().commit()
rpc.cast('naily', provision_message)
return task_provision
示例4: execute
def execute(self, nodes_to_deployment):
# locking nodes for update
objects.NodeCollection.lock_nodes(nodes_to_deployment)
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment)
logger.debug("Nodes to deploy: {0}".format(" ".join([n.fqdn for n in nodes_to_deployment])))
task_deployment = Task(name="deployment", cluster=self.cluster)
db().add(task_deployment)
deployment_message = self._call_silently(
task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name="message"
)
db().refresh(task_deployment)
# locking task
task_deployment = objects.Task.get_by_uid(task_deployment.id, fail_if_not_found=True, lock_for_update=True)
# locking nodes
objects.NodeCollection.lock_nodes(nodes_to_deployment)
task_deployment.cache = deployment_message
for node in nodes_to_deployment:
node.status = "deploying"
node.progress = 0
db().commit()
rpc.cast("naily", deployment_message)
return task_deployment
示例5: _execute_sync
def _execute_sync(self, sub_transaction):
cluster = sub_transaction.cluster
graph = objects.Cluster.get_deployment_graph(cluster, sub_transaction.graph_type)
nodes = _get_nodes_to_run(cluster, graph.get("node_filter"), sub_transaction.cache.get("nodes"))
for node in nodes:
# set progress to show that node is in progress state
node.progress = 1
if not sub_transaction.dry_run:
node.error_type = None
node.error_msg = None
resolver = role_resolver.RoleResolver(nodes)
_adjust_graph_tasks(graph, cluster, resolver, sub_transaction.cache.get("tasks"))
context = lcm.TransactionContext(
_get_expected_state(cluster, nodes),
_get_current_state(cluster, nodes, graph["tasks"], sub_transaction.cache.get("force")),
)
# Attach desired state to the sub transaction, so when we continue
# our top-level transaction, the new state will be calculated on
# top of this.
_dump_expected_state(sub_transaction, context.new, graph["tasks"])
message = make_astute_message(sub_transaction, context, graph, resolver)
# Once rpc.cast() is called, the message is sent to Astute. By
# that moment all transaction instanced must exist in database,
# otherwise we may get wrong result due to RPC receiver won't
# found entry to update.
db().commit()
rpc.cast("naily", [message])
示例6: execute
def execute(self, nodes_to_deployment):
TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment)
logger.debug('Nodes to deploy: {0}'.format(
' '.join([n.fqdn for n in nodes_to_deployment])))
task_deployment = Task(name='deployment', cluster=self.cluster)
db().add(task_deployment)
db().commit()
deployment_message = self._call_silently(
task_deployment,
tasks.DeploymentTask,
nodes_to_deployment,
method_name='message')
db().refresh(task_deployment)
task_deployment.cache = deployment_message
for node in nodes_to_deployment:
node.status = 'deploying'
node.progress = 0
db().commit()
rpc.cast('naily', deployment_message)
return task_deployment
示例7: execute
def execute(cls, task, data):
logger.debug("%s(uuid=%s) is running" % (cls.__name__, task.uuid))
message = cls.message(task, data)
task.cache = message
task.result = {"release_info": data}
db().add(task)
db().commit()
rpc.cast("naily", message)
示例8: execute
def execute(self, force=False, **kwargs):
try:
self.clear_tasks_history(force=force)
except errors.TaskAlreadyRunning:
raise errors.DeploymentAlreadyStarted(
"Can't reset environment '{0}' when "
"running deployment task exists.".format(
self.cluster.id
)
)
# FIXME(aroma): remove updating of 'deployed_before'
# when stop action is reworked. 'deployed_before'
# flag identifies whether stop action is allowed for the
# cluster. Please, refer to [1] for more details.
# [1]: https://bugs.launchpad.net/fuel/+bug/1529691
objects.Cluster.set_deployed_before_flag(self.cluster, value=False)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt
)
for node in nodes:
objects.Node.reset_vms_created_state(node)
objects.ClusterPluginLinkCollection.delete_by_cluster_id(
self.cluster.id)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
reset_nodes = supertask.create_subtask(
consts.TASK_NAMES.reset_nodes
)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.remove_keys
)
remove_ironic_bootstrap_task = supertask.create_subtask(
consts.TASK_NAMES.remove_ironic_bootstrap
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(reset_nodes),
tasks.RemoveIronicBootstrap.message(remove_ironic_bootstrap_task),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例9: execute
def execute(self, task=None):
# task is there for prev compatibility
message = self.get_message()
logger.debug("%s method is called with: %s",
self.task.name, message)
db().commit()
rpc.cast('naily', message)
示例10: _execute_sync
def _execute_sync(self, sub_transaction):
cluster = sub_transaction.cluster
graph = objects.Cluster.get_deployment_graph(
cluster, sub_transaction.graph_type
)
nodes = _get_nodes_to_run(
cluster,
graph.get('node_filter'),
sub_transaction.cache.get('nodes')
)
logger.debug(
"execute graph %s on nodes %s",
sub_transaction.graph_type, [n.id for n in nodes]
)
for node in nodes:
# set progress to show that node is in progress state
node.progress = 1
if not sub_transaction.dry_run:
node.error_type = None
node.error_msg = None
# we should initialize primary roles for cluster before
# role resolve has been created
objects.Cluster.set_primary_tags(cluster, nodes)
resolver = resolvers.TagResolver(nodes)
_adjust_graph_tasks(
graph,
cluster,
resolver,
sub_transaction.cache.get('tasks'))
context = lcm.TransactionContext(
_get_expected_state(cluster, nodes),
_get_current_state(
cluster, nodes, graph['tasks'],
sub_transaction.cache.get('force')
))
# Attach desired state to the sub transaction, so when we continue
# our top-level transaction, the new state will be calculated on
# top of this.
_dump_expected_state(sub_transaction, context.new, graph['tasks'])
message = make_astute_message(
sub_transaction, context, graph, resolver
)
objects.Transaction.on_start(sub_transaction)
helpers.TaskHelper.create_action_log(sub_transaction)
# Once rpc.cast() is called, the message is sent to Astute. By
# that moment all transaction instanced must exist in database,
# otherwise we may get wrong result due to RPC receiver won't
# found entry to update.
db().commit()
rpc.cast('naily', [message])
示例11: execute
def execute(self):
deploy_running = db().query(Task).filter_by(
cluster=self.cluster,
name=consts.TASK_NAMES.deploy,
status='running'
).first()
if deploy_running:
raise errors.DeploymentAlreadyStarted(
u"Can't reset environment '{0}' when "
u"deployment is running".format(
self.cluster.id
)
)
obsolete_tasks = db().query(Task).filter_by(
cluster_id=self.cluster.id,
).filter(
Task.name.in_([
consts.TASK_NAMES.deploy,
consts.TASK_NAMES.deployment,
consts.TASK_NAMES.stop_deployment
])
)
for task in obsolete_tasks:
db().delete(task)
nodes = objects.Cluster.get_nodes_by_role(
self.cluster, consts.VIRTUAL_NODE_TYPES.virt)
for node in nodes:
objects.Node.reset_vms_created_state(node)
db().commit()
supertask = Task(
name=consts.TASK_NAMES.reset_environment,
cluster=self.cluster
)
db().add(supertask)
al = TaskHelper.create_action_log(supertask)
remove_keys_task = supertask.create_subtask(
consts.TASK_NAMES.reset_environment
)
db.commit()
rpc.cast('naily', [
tasks.ResetEnvironmentTask.message(supertask),
tasks.RemoveClusterKeys.message(remove_keys_task)
])
TaskHelper.update_action_log(supertask, al)
return supertask
示例12: _execute_async
def _execute_async(self, task_deployment_id, nodes_ids_to_deployment,
deployment_tasks=None, graph_type=None, force=False,
dry_run=False, noop_run=False):
"""Supposed to be executed inside separate process.
:param task_deployment_id: id of task
:param nodes_ids_to_deployment: node ids
:param graph_type: graph type
:param force: force
:param dry_run: the dry run flag
:param noop_run: the noop run flag
"""
task_deployment = objects.Task.get_by_uid(
task_deployment_id,
fail_if_not_found=True,
lock_for_update=False
)
nodes_to_deployment = objects.NodeCollection.filter_by_list(
None,
'id',
nodes_ids_to_deployment,
order_by='id'
)
self.reset_error_message(nodes_to_deployment, dry_run)
deployment_message = self._call_silently(
task_deployment,
self.get_deployment_task(),
nodes_to_deployment,
deployment_tasks=deployment_tasks,
method_name='message',
graph_type=graph_type,
force=force,
dry_run=dry_run,
noop_run=noop_run
)
db().refresh(task_deployment)
# locking task
task_deployment = objects.Task.get_by_uid(
task_deployment_id,
fail_if_not_found=True,
lock_for_update=True
)
task_deployment.cache = deployment_message
db().commit()
rpc.cast('naily', deployment_message)
return task_deployment
示例13: execute
def execute(self, nodes_to_provision, **kwargs):
"""Run provisioning task on specified nodes."""
# locking nodes
nodes_ids = [node.id for node in nodes_to_provision]
nodes = objects.NodeCollection.filter_by_list(
None,
'id',
nodes_ids,
order_by='id'
)
logger.debug('Nodes to provision: {0}'.format(
' '.join([objects.Node.get_node_fqdn(n)
for n in nodes_to_provision])))
task_provision = Task(name=consts.TASK_NAMES.provision,
status=consts.TASK_STATUSES.pending,
cluster=self.cluster)
db().add(task_provision)
for node in nodes:
objects.Node.reset_vms_created_state(node)
db().commit()
provision_message = self._call_silently(
task_provision,
tasks.ProvisionTask,
nodes_to_provision,
method_name='message'
)
task_provision = objects.Task.get_by_uid(
task_provision.id,
fail_if_not_found=True,
lock_for_update=True
)
task_provision.cache = provision_message
objects.NodeCollection.lock_for_update(nodes).all()
for node in nodes_to_provision:
node.pending_addition = False
node.status = consts.NODE_STATUSES.provisioning
node.progress = 0
db().commit()
rpc.cast('naily', provision_message)
return task_provision
示例14: execute
def execute(cls, task):
logger.debug("DumpTask: task=%s" % task.uuid)
message = {
'method': 'dump_environment',
'respond_to': 'dump_environment_resp',
'args': {
'task_uuid': task.uuid,
'lastdump': settings.DUMP["lastdump"]
}
}
task.cache = message
db().add(task)
db().commit()
rpc.cast('naily', message)
示例15: execute
def execute(cls, task, data):
logger.debug("Download release task(uuid=%s) is running" % task.uuid)
message = {
'method': 'download_release',
'respond_to': 'download_release_resp',
'args': {
'task_uuid': task.uuid,
'release_info': data
}
}
task.cache = message
task.result = {'release_info': data}
db().add(task)
db().commit()
rpc.cast('naily', message)