本文整理汇总了Python中nailgun.network.manager.NetworkManager.update_interfaces_info方法的典型用法代码示例。如果您正苦于以下问题:Python NetworkManager.update_interfaces_info方法的具体用法?Python NetworkManager.update_interfaces_info怎么用?Python NetworkManager.update_interfaces_info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nailgun.network.manager.NetworkManager
的用法示例。
在下文中一共展示了NetworkManager.update_interfaces_info方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import update_interfaces_info [as 别名]
def run(self):
super(FakeDeletionThread, self).run()
receiver = NailgunReceiver
kwargs = {
'task_uuid': self.task_uuid,
'nodes': self.data['args']['nodes'],
'status': 'ready'
}
nodes_to_restore = self.data['args'].get('nodes_to_restore', [])
resp_method = getattr(receiver, self.respond_to)
resp_method(**kwargs)
for node_data in nodes_to_restore:
node = Node(**node_data)
# Offline node just deleted from db
# and could not recreated with status
# discover
if not node.online:
continue
node.status = 'discover'
db().add(node)
db().commit()
node.attributes = NodeAttributes(node_id=node.id)
node.attributes.volumes = node.volume_manager.gen_volumes_info()
NetworkManager.update_interfaces_info(node)
db().commit()
ram = round(node.meta.get('ram') or 0, 1)
cores = node.meta.get('cores') or 'unknown'
notifier.notify("discover",
"New node with %s CPU core(s) "
"and %s GB memory is discovered" %
(cores, ram), node_id=node.id)
示例2: POST
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import update_interfaces_info [as 别名]
def POST(self):
data = self.checked_data()
node = Node()
for key, value in data.iteritems():
if key == "meta":
node.create_meta(value)
else:
setattr(node, key, value)
node.name = "Untitled (%s)" % data['mac'][-5:]
node.timestamp = datetime.now()
self.db.add(node)
self.db.commit()
node.attributes = NodeAttributes()
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
self.db.add(node)
self.db.commit()
network_manager = NetworkManager()
# Add interfaces for node from 'meta'.
if node.meta and node.meta.get('interfaces'):
network_manager.update_interfaces_info(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(node.id)
network_manager.assign_networks_to_main_interface(node.id)
try:
ram = str(round(float(
node.meta['memory']['total']) / 1073741824, 1))
except (KeyError, TypeError, ValueError):
ram = "unknown"
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
notifier.notify("discover",
"New node with %s CPU core(s) "
"and %s GB memory is discovered" %
(cores, ram), node_id=node.id)
raise web.webapi.created(json.dumps(
NodeHandler.render(node),
indent=4
))
示例3: upload_fixture
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import update_interfaces_info [as 别名]
#.........这里部分代码省略.........
"".join(map(lambda n: n.capitalize(), model_name.split("_")))]
).next()
except StopIteration:
raise Exception("Couldn't find model {0}".format(model_name))
obj['model'] = getattr(models, capitalize_model_name(model_name))
keys[obj['model'].__tablename__] = {}
# Check if it's already uploaded
obj_from_db = db.query(obj['model']).get(pk)
if obj_from_db:
logger.info("Fixture model '%s' with pk='%s' already"
" uploaded. Skipping", model_name, pk)
continue
queue.put(obj)
pending_objects = []
while True:
try:
obj = queue.get_nowait()
except:
break
new_obj = obj['model']()
fk_fields = {}
for field, value in obj["fields"].iteritems():
f = getattr(obj['model'], field)
impl = f.impl
fk_model = None
if hasattr(f.comparator.prop, "argument"):
if hasattr(f.comparator.prop.argument, "__call__"):
fk_model = f.comparator.prop.argument()
else:
fk_model = f.comparator.prop.argument.class_
if fk_model:
if value not in keys[fk_model.__tablename__]:
if obj not in pending_objects:
queue.put(obj)
pending_objects.append(obj)
continue
else:
logger.error(
u"Can't resolve foreign key "
"'{0}' for object '{1}'".format(
field,
obj["model"]
)
)
break
else:
value = keys[fk_model.__tablename__][value].id
if isinstance(impl, orm.attributes.ScalarObjectAttributeImpl):
if value:
fk_fields[field] = (value, fk_model)
elif isinstance(impl, orm.attributes.CollectionAttributeImpl):
if value:
fk_fields[field] = (value, fk_model)
elif isinstance(
f.property.columns[0].type, sqlalchemy.types.DateTime
):
if value:
setattr(
new_obj,
field,
datetime.strptime(value, "%d-%m-%Y %H:%M:%S")
)
else:
setattr(
new_obj,
field,
datetime.now()
)
else:
setattr(new_obj, field, value)
for field, data in fk_fields.iteritems():
if isinstance(data[0], int):
setattr(new_obj, field, db.query(data[1]).get(data[0]))
elif isinstance(data[0], list):
for v in data[0]:
getattr(new_obj, field).append(
db.query(data[1]).get(v)
)
db.add(new_obj)
db.commit()
keys[obj['model'].__tablename__][obj["pk"]] = new_obj
# UGLY HACK for testing
if new_obj.__class__.__name__ == 'Node':
new_obj.attributes = models.NodeAttributes()
db.commit()
new_obj.attributes.volumes = \
new_obj.volume_manager.gen_volumes_info()
network_manager = NetworkManager()
network_manager.update_interfaces_info(new_obj.id)
db.commit()
示例4: PUT
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import update_interfaces_info [as 别名]
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (invalid nodes data specified)
"""
data = self.checked_data(
self.validator.validate_collection_update
)
network_manager = NetworkManager()
q = db().query(Node)
nodes_updated = []
for nd in data:
is_agent = nd.pop("is_agent") if "is_agent" in nd else False
node = None
if "mac" in nd:
node = q.filter_by(mac=nd["mac"]).first() \
or self.validator.validate_existent_node_mac_update(nd)
else:
node = q.get(nd["id"])
if is_agent:
node.timestamp = datetime.now()
if not node.online:
node.online = True
msg = u"Node '{0}' is back online".format(
node.human_readable_name)
logger.info(msg)
notifier.notify("discover", msg, node_id=node.id)
db().commit()
old_cluster_id = node.cluster_id
if nd.get("pending_roles") == [] and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
if "cluster_id" in nd:
if nd["cluster_id"] is None and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
node.roles = node.pending_roles = []
node.cluster_id = nd["cluster_id"]
for key, value in nd.iteritems():
if is_agent and (key, value) == ("status", "discover") \
and node.status == "provisioning":
# We don't update provisioning back to discover
logger.debug(
"Node is already provisioning - "
"status not updated by agent"
)
continue
if key == "meta":
node.update_meta(value)
else:
setattr(node, key, value)
db().commit()
if not node.attributes:
node.attributes = NodeAttributes()
db().commit()
if not node.attributes.volumes:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
db().commit()
if not node.status in ('provisioning', 'deploying'):
variants = (
"disks" in node.meta and
len(node.meta["disks"]) != len(
filter(
lambda d: d["type"] == "disk",
node.attributes.volumes
)
),
"roles" in nd,
"cluster_id" in nd
)
if any(variants):
try:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().commit()
if is_agent:
# Update node's NICs.
if node.meta and 'interfaces' in node.meta:
# we won't update interfaces if data is invalid
network_manager.update_interfaces_info(node.id)
nodes_updated.append(node)
#.........这里部分代码省略.........
示例5: POST
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import update_interfaces_info [as 别名]
def POST(self):
""":returns: JSONized Node object.
:http: * 201 (cluster successfully created)
* 400 (invalid node data specified)
* 403 (node has incorrect status)
* 409 (node with such parameters already exists)
"""
data = self.checked_data()
if data.get("status", "") != "discover":
error = web.forbidden()
error.data = "Only bootstrap nodes are allowed to be registered."
msg = u"Node with mac '{0}' was not created, " \
u"because request status is '{1}'."\
.format(data[u'mac'], data[u'status'])
logger.warning(msg)
raise error
node = Node()
if "cluster_id" in data:
# FIXME(vk): this part is needed only for tests. Normally,
# nodes are created only by agent and POST requests don't contain
# cluster_id, but our integration and unit tests widely use it.
# We need to assign cluster first
cluster_id = data.pop("cluster_id")
if cluster_id:
node.cluster = db.query(Cluster).get(cluster_id)
for key, value in data.iteritems():
if key == "id":
continue
elif key == "meta":
node.create_meta(value)
else:
setattr(node, key, value)
node.name = "Untitled (%s)" % data['mac'][-5:]
node.timestamp = datetime.now()
db().add(node)
db().commit()
node.attributes = NodeAttributes()
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().add(node)
db().commit()
network_manager = NetworkManager()
# Add interfaces for node from 'meta'.
if node.meta and node.meta.get('interfaces'):
network_manager.update_interfaces_info(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(node)
network_manager.assign_networks_to_main_interface(node)
try:
# we use multiplier of 1024 because there are no problems here
# with unfair size calculation
ram = str(round(float(
node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
except Exception as exc:
logger.warning(traceback.format_exc())
ram = "unknown RAM"
try:
# we use multiplier of 1000 because disk vendors specify HDD size
# in terms of decimal capacity. Sources:
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
# http://physics.nist.gov/cuu/Units/binary.html
hd_size = round(float(
sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
# if HDD > 100 GB we show it's size in TB
if hd_size > 100:
hd_size = str(hd_size / 1000) + " TB HDD"
else:
hd_size = str(hd_size) + " GB HDD"
except Exception as exc:
logger.warning(traceback.format_exc())
hd_size = "unknown HDD"
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
notifier.notify(
"discover",
"New node is discovered: %s CPUs / %s / %s " %
(cores, ram, hd_size),
#.........这里部分代码省略.........
示例6: POST
# 需要导入模块: from nailgun.network.manager import NetworkManager [as 别名]
# 或者: from nailgun.network.manager.NetworkManager import update_interfaces_info [as 别名]
def POST(self):
data = self.checked_data()
node = Node()
for key, value in data.iteritems():
if key == "id":
continue
elif key == "meta":
node.create_meta(value)
else:
setattr(node, key, value)
node.name = "Untitled (%s)" % data['mac'][-5:]
node.timestamp = datetime.now()
db().add(node)
db().commit()
node.attributes = NodeAttributes()
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().add(node)
db().commit()
network_manager = NetworkManager()
# Add interfaces for node from 'meta'.
if node.meta and node.meta.get('interfaces'):
network_manager.update_interfaces_info(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(node.id)
network_manager.assign_networks_to_main_interface(node.id)
try:
# we use multiplier of 1024 because there are no problems here
# with unfair size calculation
ram = str(round(float(
node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
except Exception as exc:
logger.warning(traceback.format_exc())
ram = "unknown RAM"
try:
# we use multiplier of 1000 because disk vendors specify HDD size
# in terms of decimal capacity. Sources:
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
# http://physics.nist.gov/cuu/Units/binary.html
hd_size = round(float(
sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
# if HDD > 100 GB we show it's size in TB
if hd_size > 100:
hd_size = str(hd_size / 1000) + " TB HDD"
else:
hd_size = str(hd_size) + " GB HDD"
except Exception as exc:
logger.warning(traceback.format_exc())
hd_size = "unknown HDD"
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
notifier.notify(
"discover",
"New node is discovered: %s CPUs / %s / %s " %
(cores, ram, hd_size),
node_id=node.id
)
raise web.webapi.created(json.dumps(
NodeHandler.render(node),
indent=4
))