本文整理汇总了Python中healthnmon.resourcemodel.healthnmonResourceModel.VmHost.get_id方法的典型用法代码示例。如果您正苦于以下问题:Python VmHost.get_id方法的具体用法?Python VmHost.get_id怎么用?Python VmHost.get_id使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类healthnmon.resourcemodel.healthnmonResourceModel.VmHost
的用法示例。
在下文中一共展示了VmHost.get_id方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_host_removed_event_none_host
# 需要导入模块: from healthnmon.resourcemodel.healthnmonResourceModel import VmHost [as 别名]
# 或者: from healthnmon.resourcemodel.healthnmonResourceModel.VmHost import get_id [as 别名]
def test_host_removed_event_none_host(self):
deleted_host = VmHost()
deleted_host.set_id('compute1')
deleted_host.set_name('compute1')
self.mox.StubOutWithMock(api, 'vm_host_get_all')
api.vm_host_get_all(mox.IgnoreArg()).AndReturn([deleted_host])
self.mox.StubOutWithMock(api, 'vm_get_all')
api.vm_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.StubOutWithMock(api, 'storage_volume_get_all')
api.storage_volume_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.StubOutWithMock(api, 'subnet_get_all')
api.subnet_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.StubOutWithMock(nova_db, 'compute_node_get_all')
nova_db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.StubOutWithMock(api, 'vm_host_delete_by_ids')
api.vm_host_delete_by_ids(
mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(None)
self.mox.StubOutWithMock(
InventoryCacheManager, 'get_compute_conn_driver')
InventoryCacheManager.get_compute_conn_driver(
'compute1',
Constants.VmHost).AndReturn(fake.get_connection())
self.mox.ReplayAll()
compute_service = dict(host='host1')
compute = dict(id='compute1', hypervisor_type='fake',
service=compute_service)
rm_context = \
rmcontext.ComputeRMContext(rmType=compute['hypervisor_type'],
rmIpAddress=compute_service['host'],
rmUserName='ubuntu164',
rmPassword='password')
InventoryCacheManager.get_all_compute_inventory().clear()
InventoryCacheManager.get_all_compute_inventory()['compute1'] = \
ComputeInventory(rm_context)
InventoryCacheManager.get_compute_inventory(
'compute1').update_compute_info(rm_context, deleted_host)
self.assertEquals(
len(InventoryCacheManager.get_all_compute_inventory()), 1)
InventoryCacheManager.get_inventory_cache(
)[Constants.VmHost][deleted_host.get_id()] = None
inv_manager = InventoryManager()
inv_manager._refresh_from_db(None)
self.assertEquals(
len(InventoryCacheManager.get_all_compute_inventory()), 0)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
示例2: test_timestamp_columns
# 需要导入模块: from healthnmon.resourcemodel.healthnmonResourceModel import VmHost [as 别名]
# 或者: from healthnmon.resourcemodel.healthnmonResourceModel.VmHost import get_id [as 别名]
def test_timestamp_columns(self):
"""
Test the time stamp columns createEpoch,
modifiedEpoch and deletedEpoch
"""
vmhost = VmHost()
vmhost.set_id('VH1')
virSw1 = VirtualSwitch()
virSw1.set_id('VS1_VH1')
portGrp1 = PortGroup()
portGrp1.set_id('PG1_VH1')
virSw1.add_portGroups(portGrp1)
vmhost.add_virtualSwitches(virSw1)
vmhost.add_portGroups(portGrp1)
# Check for createEpoch
epoch_before = utils.get_current_epoch_ms()
healthnmon_db_api.vm_host_save(self.admin_context, vmhost)
epoch_after = utils.get_current_epoch_ms()
vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
self.admin_context, [vmhost.get_id()])[0]
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, vmhost_queried.get_createEpoch()))
for virSw in vmhost_queried.get_virtualSwitches():
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, virSw.get_createEpoch()))
for pg in virSw.get_portGroups():
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, pg.get_createEpoch()))
# Check for lastModifiedEpoch after modifying host
vmhost_modified = vmhost_queried
test_utils.unset_timestamp_fields(vmhost_modified)
vmhost_modified.set_name('changed_name')
epoch_before = utils.get_current_epoch_ms()
healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified)
epoch_after = utils.get_current_epoch_ms()
vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
self.admin_context, [vmhost.get_id()])[0]
self.assert_(vmhost_modified.get_createEpoch(
) == vmhost_queried.get_createEpoch())
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch()))
for virSw in vmhost_queried.get_virtualSwitches():
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, virSw.get_lastModifiedEpoch()))
for pg in virSw.get_portGroups():
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, pg.get_lastModifiedEpoch()))
# Check for createdEpoch after adding switch and portgroup to host
vmhost_modified = vmhost_queried
test_utils.unset_timestamp_fields(vmhost_modified)
virSw2 = VirtualSwitch()
virSw2.set_id('VS2_VH1')
portGrp2 = PortGroup()
portGrp2.set_id('PG2_VH1')
virSw2.add_portGroups(portGrp2)
vmhost_modified.add_virtualSwitches(virSw2)
vmhost_modified.add_portGroups(portGrp2)
epoch_before = utils.get_current_epoch_ms()
healthnmon_db_api.vm_host_save(self.admin_context, vmhost_modified)
epoch_after = utils.get_current_epoch_ms()
vmhost_queried = healthnmon_db_api.vm_host_get_by_ids(
self.admin_context, [vmhost.get_id()])[0]
self.assert_(vmhost_modified.get_createEpoch(
) == vmhost_queried.get_createEpoch())
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, vmhost_queried.get_lastModifiedEpoch()))
for virSw in vmhost_queried.get_virtualSwitches():
if virSw.get_id() == virSw2.get_id():
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, virSw.get_createEpoch()))
else:
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, virSw.get_lastModifiedEpoch()))
for pg in virSw.get_portGroups():
if pg.get_id() == portGrp2.get_id():
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, pg.get_createEpoch()))
else:
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, pg.get_lastModifiedEpoch()))
# Check for deletedEpoch
epoch_before = utils.get_current_epoch_ms()
healthnmon_db_api.vm_host_delete_by_ids(
self.admin_context, [vmhost_queried.get_id()])
epoch_after = utils.get_current_epoch_ms()
deleted_host = healthnmon_db_api.vm_host_get_all_by_filters(
self.admin_context,
{"id": vmhost_queried.get_id()}, None, None)[0]
self.assertTrue(deleted_host.get_deleted())
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, deleted_host.get_deletedEpoch()))
deleted_switches = healthnmon_db_api.\
virtual_switch_get_all_by_filters(self.admin_context,
{"id": (virSw1.get_id(),
virSw2.get_id())},
None, None)
for deleted_switch in deleted_switches:
self.assertTrue(deleted_switch.get_deleted())
self.assert_(test_utils.is_timestamp_between(
epoch_before, epoch_after, deleted_switch.get_deletedEpoch()))
#.........这里部分代码省略.........