本文整理汇总了Python中nailgun.network.manager.NetworkManager.allow_network_assignment_to_all_interfaces方法的典型用法代码示例。如果您正苦于以下问题:Python NetworkManager.allow_network_assignment_to_all_interfaces方法的具体用法?Python NetworkManager.allow_network_assignment_to_all_interfaces怎么用?Python NetworkManager.allow_network_assignment_to_all_interfaces使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nailgun.network.manager.NetworkManager
的用法示例。
在下文中一共展示了NetworkManager.allow_network_assignment_to_all_interfaces方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PUT
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def PUT(self, cluster_id):
cluster = self.get_object_or_404(Cluster, cluster_id)
data = self.checked_data()
network_manager = NetworkManager()
for key, value in data.iteritems():
if key == "nodes":
# Todo: sepatate nodes for deletion and addition by set().
new_nodes = db().query(Node).filter(
Node.id.in_(value)
)
nodes_to_remove = [n for n in cluster.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in cluster.nodes]
for node in nodes_to_add:
if not node.online:
raise web.badrequest(
"Can not add offline node to cluster")
map(cluster.nodes.remove, nodes_to_remove)
map(cluster.nodes.append, nodes_to_add)
for node in nodes_to_remove:
network_manager.clear_assigned_networks(node.id)
network_manager.clear_all_allowed_networks(node.id)
for node in nodes_to_add:
network_manager.allow_network_assignment_to_all_interfaces(
node.id
)
network_manager.assign_networks_to_main_interface(node.id)
else:
setattr(cluster, key, value)
db().commit()
return self.render(cluster)
示例2: POST
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def POST(self):
"""
:returns: JSONized Cluster object.
:http: * 201 (cluster successfully created)
* 400 (invalid cluster data specified)
* 409 (cluster with such parameters already exists)
"""
# It's used for cluster creating only.
data = self.checked_data()
cluster = Cluster()
cluster.release = db().query(Release).get(data["release"])
# TODO: use fields
for field in ('name', 'mode', 'net_manager'):
if data.get(field):
setattr(cluster, field, data.get(field))
db().add(cluster)
db().commit()
attributes = Attributes(
editable=cluster.release.attributes_metadata.get("editable"),
generated=cluster.release.attributes_metadata.get("generated"),
cluster=cluster
)
attributes.generate_fields()
netmanager = NetworkManager()
try:
netmanager.create_network_groups(cluster.id)
cluster.add_pending_changes("attributes")
cluster.add_pending_changes("networks")
if 'nodes' in data and data['nodes']:
nodes = db().query(Node).filter(
Node.id.in_(data['nodes'])
).all()
map(cluster.nodes.append, nodes)
db().commit()
for node in nodes:
netmanager.allow_network_assignment_to_all_interfaces(
node.id
)
netmanager.assign_networks_to_main_interface(node.id)
raise web.webapi.created(json.dumps(
ClusterHandler.render(cluster),
indent=4
))
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR
) as e:
# Cluster was created in this request,
# so we no need to use ClusterDeletionManager.
# All relations wiil be cascade deleted automaticly.
# TODO: investigate transactions
db().delete(cluster)
raise web.badrequest(e.message)
示例3: POST
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def POST(self):
data = self.checked_data()
node = Node()
for key, value in data.iteritems():
if key == "meta":
node.create_meta(value)
else:
setattr(node, key, value)
node.name = "Untitled (%s)" % data['mac'][-5:]
node.timestamp = datetime.now()
self.db.add(node)
self.db.commit()
node.attributes = NodeAttributes()
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
self.db.add(node)
self.db.commit()
network_manager = NetworkManager()
# Add interfaces for node from 'meta'.
if node.meta and node.meta.get('interfaces'):
network_manager.update_interfaces_info(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(node.id)
network_manager.assign_networks_to_main_interface(node.id)
try:
ram = str(round(float(
node.meta['memory']['total']) / 1073741824, 1))
except (KeyError, TypeError, ValueError):
ram = "unknown"
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
notifier.notify("discover",
"New node with %s CPU core(s) "
"and %s GB memory is discovered" %
(cores, ram), node_id=node.id)
raise web.webapi.created(json.dumps(
NodeHandler.render(node),
indent=4
))
示例4: PUT
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def PUT(self, node_id):
""":returns: JSONized Node object.
:http: * 200 (OK)
* 400 (invalid node data specified)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
if not node.attributes:
node.attributes = NodeAttributes(node_id=node.id)
data = self.checked_data(self.validator.validate_update)
network_manager = NetworkManager()
old_cluster_id = node.cluster_id
if data.get("pending_roles") == [] and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
if "cluster_id" in data:
if data["cluster_id"] is None and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
node.roles = node.pending_roles = []
node.cluster_id = data["cluster_id"]
if node.cluster_id != old_cluster_id:
if old_cluster_id:
network_manager.clear_assigned_networks(node)
network_manager.clear_all_allowed_networks(node.id)
if node.cluster_id:
network_manager.assign_networks_by_default(node)
network_manager.allow_network_assignment_to_all_interfaces(node)
regenerate_volumes = any(
(
"roles" in data and set(data["roles"]) != set(node.roles),
"pending_roles" in data and set(data["pending_roles"]) != set(node.pending_roles),
node.cluster_id != old_cluster_id,
)
)
for key, value in data.iteritems():
# we don't allow to update id explicitly
# and updated cluster_id before all other fields
if key in ("id", "cluster_id"):
continue
setattr(node, key, value)
if not node.status in ("provisioning", "deploying") and regenerate_volumes:
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
except Exception as exc:
msg = (u"Failed to generate volumes " "info for node '{0}': '{1}'").format(
node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().commit()
return self.render(node)
示例5: PUT
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def PUT(self, node_id):
node = self.get_object_or_404(Node, node_id)
if not node.attributes:
node.attributes = NodeAttributes(node_id=node.id)
data = self.checked_data(self.validator.validate_update)
network_manager = NetworkManager()
for key, value in data.iteritems():
# we don't allow to update id explicitly
if key == "id":
continue
setattr(node, key, value)
if key == 'cluster_id':
if value:
network_manager.allow_network_assignment_to_all_interfaces(
node.id
)
network_manager.assign_networks_to_main_interface(node.id)
else:
network_manager.clear_assigned_networks(node.id)
network_manager.clear_all_allowed_networks(node.id)
if not node.status in ('provisioning', 'deploying') \
and "role" in data or "cluster_id" in data:
try:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().commit()
return self.render(node)
示例6: PUT
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def PUT(self, cluster_id):
""":returns: JSONized Cluster object.
:http: * 200 (OK)
* 400 (invalid cluster data specified)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
data = self.checked_data(cluster_id=cluster_id)
network_manager = NetworkManager()
for key, value in data.iteritems():
if key == "nodes":
# TODO(NAME): sepatate nodes
#for deletion and addition by set().
new_nodes = db().query(Node).filter(
Node.id.in_(value)
)
nodes_to_remove = [n for n in cluster.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in cluster.nodes]
for node in nodes_to_add:
if not node.online:
raise web.badrequest(
"Can not add offline node to cluster")
map(cluster.nodes.remove, nodes_to_remove)
map(cluster.nodes.append, nodes_to_add)
for node in nodes_to_remove:
network_manager.clear_assigned_networks(node)
network_manager.clear_all_allowed_networks(node.id)
for node in nodes_to_add:
network_manager.allow_network_assignment_to_all_interfaces(
node)
network_manager.assign_networks_by_default(node)
else:
setattr(cluster, key, value)
db().commit()
return self.render(cluster)
示例7: PUT
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
#.........这里部分代码省略.........
or self.validator.validate_existent_node_mac_update(nd)
else:
node = q.get(nd["id"])
if is_agent:
node.timestamp = datetime.now()
if not node.online:
node.online = True
msg = u"Node '{0}' is back online".format(
node.human_readable_name)
logger.info(msg)
notifier.notify("discover", msg, node_id=node.id)
db().commit()
old_cluster_id = node.cluster_id
if nd.get("pending_roles") == [] and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
if "cluster_id" in nd:
if nd["cluster_id"] is None and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
node.roles = node.pending_roles = []
node.cluster_id = nd["cluster_id"]
for key, value in nd.iteritems():
if is_agent and (key, value) == ("status", "discover") \
and node.status == "provisioning":
# We don't update provisioning back to discover
logger.debug(
"Node is already provisioning - "
"status not updated by agent"
)
continue
if key == "meta":
node.update_meta(value)
else:
setattr(node, key, value)
db().commit()
if not node.attributes:
node.attributes = NodeAttributes()
db().commit()
if not node.attributes.volumes:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
db().commit()
if not node.status in ('provisioning', 'deploying'):
variants = (
"disks" in node.meta and
len(node.meta["disks"]) != len(
filter(
lambda d: d["type"] == "disk",
node.attributes.volumes
)
),
"roles" in nd,
"cluster_id" in nd
)
if any(variants):
try:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().commit()
if is_agent:
# Update node's NICs.
if node.meta and 'interfaces' in node.meta:
# we won't update interfaces if data is invalid
network_manager.update_interfaces_info(node.id)
nodes_updated.append(node)
db().commit()
if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
if old_cluster_id:
network_manager.clear_assigned_networks(node)
network_manager.clear_all_allowed_networks(node.id)
if nd['cluster_id']:
network_manager.allow_network_assignment_to_all_interfaces(
node)
network_manager.assign_networks_to_main_interface(node)
# we need eagerload everything that is used in render
nodes = db().query(Node).options(
joinedload('cluster'),
joinedload('interfaces'),
joinedload('interfaces.assigned_networks')).\
filter(Node.id.in_([n.id for n in nodes_updated])).all()
return self.render(nodes)
示例8: POST
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def POST(self):
""":returns: JSONized Node object.
:http: * 201 (cluster successfully created)
* 400 (invalid node data specified)
* 403 (node has incorrect status)
* 409 (node with such parameters already exists)
"""
data = self.checked_data()
if data.get("status", "") != "discover":
error = web.forbidden()
error.data = "Only bootstrap nodes are allowed to be registered."
msg = u"Node with mac '{0}' was not created, " \
u"because request status is '{1}'."\
.format(data[u'mac'], data[u'status'])
logger.warning(msg)
raise error
node = Node()
if "cluster_id" in data:
# FIXME(vk): this part is needed only for tests. Normally,
# nodes are created only by agent and POST requests don't contain
# cluster_id, but our integration and unit tests widely use it.
# We need to assign cluster first
cluster_id = data.pop("cluster_id")
if cluster_id:
node.cluster = db.query(Cluster).get(cluster_id)
for key, value in data.iteritems():
if key == "id":
continue
elif key == "meta":
node.create_meta(value)
else:
setattr(node, key, value)
node.name = "Untitled (%s)" % data['mac'][-5:]
node.timestamp = datetime.now()
db().add(node)
db().commit()
node.attributes = NodeAttributes()
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().add(node)
db().commit()
network_manager = NetworkManager()
# Add interfaces for node from 'meta'.
if node.meta and node.meta.get('interfaces'):
network_manager.update_interfaces_info(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(node)
network_manager.assign_networks_to_main_interface(node)
try:
# we use multiplier of 1024 because there are no problems here
# with unfair size calculation
ram = str(round(float(
node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
except Exception as exc:
logger.warning(traceback.format_exc())
ram = "unknown RAM"
try:
# we use multiplier of 1000 because disk vendors specify HDD size
# in terms of decimal capacity. Sources:
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
# http://physics.nist.gov/cuu/Units/binary.html
hd_size = round(float(
sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
# if HDD > 100 GB we show it's size in TB
if hd_size > 100:
hd_size = str(hd_size / 1000) + " TB HDD"
else:
hd_size = str(hd_size) + " GB HDD"
except Exception as exc:
logger.warning(traceback.format_exc())
hd_size = "unknown HDD"
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
notifier.notify(
"discover",
"New node is discovered: %s CPUs / %s / %s " %
(cores, ram, hd_size),
#.........这里部分代码省略.........
示例9: POST
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import allow_network_assignment_to_all_interfaces [as 别名]
def POST(self):
data = self.checked_data()
node = Node()
for key, value in data.iteritems():
if key == "id":
continue
elif key == "meta":
node.create_meta(value)
else:
setattr(node, key, value)
node.name = "Untitled (%s)" % data['mac'][-5:]
node.timestamp = datetime.now()
db().add(node)
db().commit()
node.attributes = NodeAttributes()
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().add(node)
db().commit()
network_manager = NetworkManager()
# Add interfaces for node from 'meta'.
if node.meta and node.meta.get('interfaces'):
network_manager.update_interfaces_info(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(node.id)
network_manager.assign_networks_to_main_interface(node.id)
try:
# we use multiplier of 1024 because there are no problems here
# with unfair size calculation
ram = str(round(float(
node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
except Exception as exc:
logger.warning(traceback.format_exc())
ram = "unknown RAM"
try:
# we use multiplier of 1000 because disk vendors specify HDD size
# in terms of decimal capacity. Sources:
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
# http://physics.nist.gov/cuu/Units/binary.html
hd_size = round(float(
sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
# if HDD > 100 GB we show it's size in TB
if hd_size > 100:
hd_size = str(hd_size / 1000) + " TB HDD"
else:
hd_size = str(hd_size) + " GB HDD"
except Exception as exc:
logger.warning(traceback.format_exc())
hd_size = "unknown HDD"
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
notifier.notify(
"discover",
"New node is discovered: %s CPUs / %s / %s " %
(cores, ram, hd_size),
node_id=node.id
)
raise web.webapi.created(json.dumps(
NodeHandler.render(node),
indent=4
))