本文整理汇总了Python中nailgun.orchestrator.deployment_serializers.serialize函数的典型用法代码示例。如果您正苦于以下问题:Python serialize函数的具体用法?Python serialize怎么用?Python serialize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了serialize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_deployment_serialization_ignore_customized
def test_deployment_serialization_ignore_customized(self, _):
cluster = self._create_cluster_with_extensions()
data = [{"uid": n.uid} for n in cluster.nodes]
mserializer = mock.MagicMock()
mserializer.return_value = mock.MagicMock()
mserializer.return_value.serialize.return_value = data
with mock.patch(
'nailgun.orchestrator.deployment_serializers.'
'get_serializer_for_cluster',
return_value=mserializer):
with mock.patch('nailgun.orchestrator.deployment_serializers.'
'fire_callback_on_deployment_data_serialization'
) as mfire_callback:
replaced_data = ["it's", "something"]
with mock.patch.object(
cluster.nodes[0], 'replaced_deployment_info',
new_callable=mock.Mock(return_value=replaced_data)):
graph = orchestrator_graph.AstuteGraph(cluster)
deployment_serializers.serialize(
graph, cluster, cluster.nodes, ignore_customized=True)
mfire_callback.assert_called_once_with(data, cluster, cluster.nodes)
示例2: test_block_device_disks
def test_block_device_disks(self):
self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
self.cluster_db = self.env.clusters[0]
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['cinder-block-device']
)
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['controller']
)
serialized_for_astute = deployment_serializers.serialize(
AstuteGraph(self.cluster_db),
self.cluster_db,
self.cluster_db.nodes)
for node in serialized_for_astute['nodes']:
self.assertIn("node_volumes", node)
for node_volume in node["node_volumes"]:
if node_volume["id"] == "cinder-block-device":
self.assertEqual(node_volume["volumes"], [])
else:
self.assertNotEqual(node_volume["volumes"], [])
示例3: message
def message(cls, task, nodes):
logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)
for n in nodes:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
n.status = 'provisioned'
n.progress = 0
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
serialized_cluster = deployment_serializers.serialize(
orchestrator_graph, task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster
}
)
db().flush()
return rpc_message
示例4: message
def message(cls, task):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
task.cluster.prepare_for_deployment()
nodes = TaskHelper.nodes_to_deploy(task.cluster)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
# However, we must not pass nodes which are set to be deleted.
if n.pending_deletion:
continue
if n.id in nodes_ids: # It's node which we need to redeploy
n.pending_addition = False
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
if n.status in ('deploying'):
n.status = 'provisioned'
n.progress = 0
db().add(n)
db().commit()
# here we replace provisioning data if user redefined them
serialized_cluster = task.cluster.replaced_deployment_info or \
deployment_serializers.serialize(task.cluster)
return {
'method': 'deploy',
'respond_to': 'deploy_resp',
'args': {
'task_uuid': task.uuid,
'deployment_info': serialized_cluster}}
示例5: message
def message(cls, task, nodes):
logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)
for n in nodes:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
n.status = 'provisioned'
n.progress = 0
# here we replace deployment data if user redefined them
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster
}
)
db().commit()
return rpc_message
示例6: _serialize
def _serialize(self, cluster, nodes):
if objects.Release.is_lcm_supported(cluster.release):
return deployment_serializers.serialize_for_lcm(
cluster, nodes, ignore_customized=True
)
graph = orchestrator_graph.AstuteGraph(cluster)
return deployment_serializers.serialize(
graph, cluster, nodes, ignore_customized=True)
示例7: _serialize
def _serialize(self, cluster, nodes):
if objects.Release.is_lcm_supported(cluster.release):
serialized = deployment_serializers.serialize_for_lcm(
cluster, nodes, ignore_customized=True
)
else:
graph = orchestrator_graph.AstuteGraph(cluster)
serialized = deployment_serializers.serialize(
graph, cluster, nodes, ignore_customized=True)
return _deployment_info_in_compatible_format(
serialized, utils.parse_bool(web.input(split='0').split)
)
示例8: message
def message(cls, task, nodes):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in (NODE_STATUSES.deploying,):
n.status = NODE_STATUSES.provisioned
n.progress = 0
db().add(n)
db().flush()
deployment_tasks=[]
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
orchestrator_graph.only_tasks(deployment_tasks)
# serialized_cluster = deployment_serializers.serialize(
# orchestrator_graph,task.cluster, nodes)
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
pre_deployment = plugins_serializers.pre_deployment_serialize(
task.cluster, nodes)
post_deployment = plugins_serializers.post_deployment_serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster,
'pre_deployment': pre_deployment,
'post_deployment': post_deployment
}
)
db().commit()
return rpc_message
示例9: test_deployment_serialization_ignore_customized_false
def test_deployment_serialization_ignore_customized_false(self, _):
cluster = self._create_cluster_with_extensions(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
]
)
data = [{"uid": n.uid} for n in cluster.nodes]
expected_data = copy.deepcopy(data[1:])
mserializer = mock.MagicMock()
mserializer.return_value = mock.MagicMock()
mserializer.return_value.serialize.return_value = data
with mock.patch(
'nailgun.orchestrator.deployment_serializers.'
'get_serializer_for_cluster',
return_value=mserializer):
with mock.patch('nailgun.orchestrator.deployment_serializers.'
'fire_callback_on_deployment_data_serialization',
) as mfire_callback:
replaced_data = ["it's", "something"]
with mock.patch.object(
cluster.nodes[0], 'replaced_deployment_info',
new_callable=mock.Mock(return_value=replaced_data)):
graph = orchestrator_graph.AstuteGraph(cluster)
deployment_serializers.serialize(
graph, cluster, cluster.nodes, ignore_customized=False)
self.assertEqual(mfire_callback.call_args[0][0], expected_data)
self.assertIs(mfire_callback.call_args[0][1], cluster)
self.assertItemsEqual(
mfire_callback.call_args[0][2], cluster.nodes[1:])
示例10: message
def message(cls, task):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
task.cluster.prepare_for_deployment()
nodes = TaskHelper.nodes_to_deploy(task.cluster)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(cluster=task.cluster).order_by(Node.id):
# However, we must not pass nodes which are set to be deleted.
if n.pending_deletion:
continue
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in ("deploying"):
n.status = "provisioned"
n.progress = 0
db().add(n)
db().commit()
# here we replace provisioning data if user redefined them
serialized_cluster = task.cluster.replaced_deployment_info or deployment_serializers.serialize(task.cluster)
# After searilization set pending_addition to False
for node in db().query(Node).filter(Node.id.in_(nodes_ids)):
node.pending_addition = False
db().commit()
return {
"method": "deploy",
"respond_to": "deploy_resp",
"args": {"task_uuid": task.uuid, "deployment_info": serialized_cluster},
}
示例11: message
def message(cls, task, nodes):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in (NODE_STATUSES.deploying,):
n.status = NODE_STATUSES.provisioned
n.progress = 0
db().add(n)
db().flush()
# here we replace deployment data if user redefined them
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return make_astute_message(
'deploy',
'deploy_resp',
{
'task_uuid': task.uuid,
'deployment_info': serialized_cluster
}
)
示例12: message
def message(cls, task, nodes):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
TaskHelper.raise_if_node_offline(nodes)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in ('deploying'):
n.status = 'provisioned'
n.progress = 0
db().add(n)
db().commit()
# here we replace provisioning data if user redefined them
serialized_cluster = task.cluster.replaced_deployment_info or \
deployment_serializers.serialize(task.cluster, nodes)
# After searilization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return {
'method': 'deploy',
'respond_to': 'deploy_resp',
'args': {
'task_uuid': task.uuid,
'deployment_info': serialized_cluster}}
示例13: test_disks_attrs
#.........这里部分代码省略.........
{
"model": "TOSHIBA MK1002TS",
"name": "sda",
"disk": "sda",
"size": 1004886016
},
]
expected_node_volumes_hash = [
{
u'name': u'sda',
u'extra': [],
u'free_space': 330,
u'volumes': [
{
u'type': u'boot',
u'size': 300
},
{
u'mount': u'/boot',
u'type': u'partition',
u'file_system': u'ext2',
u'name': u'Boot',
u'size': 200
},
{
u'type': u'lvm_meta_pool',
u'size': 64
},
{
u'vg': u'os',
u'type': u'pv',
u'lvm_meta_size': 64,
u'size': 394
},
{
u'vg': u'vm',
u'type': u'pv',
u'lvm_meta_size': 0,
u'size': 0
}
],
u'type': u'disk',
u'id': u'sda',
u'bootable': True,
u'size': 958
},
{
u'_allocate_size': u'min',
u'label': u'Base System',
u'min_size': 19456,
u'volumes': [
{
u'mount': u'/',
u'size': -3766,
u'type': u'lv',
u'name': u'root',
u'file_system': u'ext4'
},
{
u'mount': u'swap',
u'size': 4096,
u'type': u'lv',
u'name': u'swap',
u'file_system': u'swap'
}
],
u'type': u'vg',
u'id': u'os'
},
{
u'_allocate_size': u'all',
u'label': u'Virtual Storage',
u'min_size': 5120,
u'volumes': [
{
u'mount': u'/var/lib/nova',
u'size': 0,
u'type': u'lv',
u'name': u'nova',
u'file_system': u'xfs'
}
],
u'type': u'vg',
u'id': u'vm'
}
]
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute'],
meta={"disks": disks},
)
serialized_for_astute = deployment_serializers.serialize(
AstuteGraph(self.cluster_db),
self.cluster_db,
self.cluster_db.nodes)
for node in serialized_for_astute['nodes']:
self.assertIn("node_volumes", node)
self.assertItemsEqual(
expected_node_volumes_hash, node["node_volumes"])
示例14: get_deployment_info
def get_deployment_info(cluster, nodes):
return deployment_serializers.serialize(
AstuteGraph(cluster), cluster, nodes)
示例15: _serialize
def _serialize(self, cluster, nodes):
return deployment_serializers.serialize(
cluster, nodes, ignore_customized=True)