本文整理汇总了Python中mos_tests.functions.network_checks.check_ping_from_vm函数的典型用法代码示例。如果您正苦于以下问题:Python check_ping_from_vm函数的具体用法?Python check_ping_from_vm怎么用?Python check_ping_from_vm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了check_ping_from_vm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_vms_page_size_less_hp_count
def test_vms_page_size_less_hp_count(self, env, os_conn, networks,
computes_with_mixed_hp, flavors,
security_group, keypair, scarce_page,
expected_size, cleanup):
"""This test checks vms with hw:mem_page_size=large when count of
2Mb huge pages is not enough to boot vm while count of free 1Gb huge
page allows it (and vice versa)
Steps:
1. Create net1 with subnet and router1 with interface to net1
2. Check that hp count of the 1st type is not enough for vm
3. Boot vm and check that it use hp of the 2nd type
"""
host = computes_with_mixed_hp[0]
flavors[0].set_keys({'hw:mem_page_size': 'large'})
self.boot_vms_to_allocate_hp(os_conn, env, host, scarce_page,
networks[0],
ram_left_free=flavors[0].ram - 1024)
vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
nics=[{'net-id': networks[0]}],
key_name=keypair.name,
security_groups=[security_group.id],
availability_zone='nova:{}'.format(host))
assert self.get_instance_page_size(os_conn, vm) == expected_size
network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
示例2: test_reset_primary_controller
def test_reset_primary_controller(self, router,
prepare_openstack, devops_env):
"""Reset primary controller (l3 agent on it should be
with ACTIVE ha_state)
Scenario:
1. Create network1, network2
2. Create router1 and connect it with network1, network2 and
external net
3. Boot vm1 in network1
4. Boot vm2 in network2 and associate floating ip
5. Add rules for ping
6. Find node with active ha_state for router
7. If node from step 6 isn't primary controller,
reschedule router1 to primary by banning all another
and then clear them
8. Reset primary controller
10. Start ping vm2 from vm1 by floating ip
11. Check that ping lost no more than 10 packets
12. One agent has ACTIVE ha_state, others (2) has STAND BY ha_state
"""
router_id = router['router']['id']
agents = self.get_active_l3_agents_for_router(router_id)
l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
primary_controller = self.env.primary_controller
for node in self.env.get_nodes_by_role('controller'):
if node != primary_controller:
proxy_node = node.data['fqdn']
break
else:
raise Exception("Can't find non primary controller")
server1 = self.os_conn.nova.servers.find(name="server01")
server2 = self.os_conn.nova.servers.find(name="server02")
server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']
# Reschedule active l3 agent to primary if needed
self.reschedule_active_l3_agt(router_id, primary_controller,
l3_agent_controller)
from_node = l3_agent_controller.data['fqdn']
self.wait_router_rescheduled(router_id=router_id,
from_node=from_node,
timeout_seconds=5 * 60)
logger.info("Reset primary controller {}".format(
primary_controller.data['fqdn']))
devops_node = devops_env.get_node_by_fuel_node(primary_controller)
devops_node.reset()
# To ensure that the l3 agt is moved from the affected controller
self.wait_router_rescheduled(router_id=router_id,
from_node=primary_controller.data['fqdn'],
timeout_seconds=5 * 60)
network_checks.check_ping_from_vm(
self.env, self.os_conn, vm=server1,
vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)
self.check_l3_ha_agent_states(router_id)
示例3: test_vms_connectivity_sriov_numa_after_resize
def test_vms_connectivity_sriov_numa_after_resize(self, env, os_conn,
sriov_hosts, aggregate,
ubuntu_image_id, keypair,
vf_ports, flavors):
"""This test checks vms between VMs launched on vf port after resizing
Steps:
1. Create net1 with subnet, router1 with interface to net1
2. Create vm1 on vf port with m1.small.performance on 1 NUMA-node
3. Resize vm1 to m1.medium flavor
4. Wait and ping 8.8.8.8 from vm1
5. Resize vm1 to m1.small.performance flavor
6. Wait and ping 8.8.8.8 from vm1
7. Resize vm1 to m1.small
8. Wait and ping 8.8.8.8 from vm1
"""
hosts = list(set(sriov_hosts) & set(aggregate.hosts))
if len(hosts) < 1:
pytest.skip(
"At least one host is required with SR-IOV and 2 numa nodes")
m1_cpu_flavor = flavors[0]
m1_medium = os_conn.nova.flavors.find(name='m1.medium')
m1_large = os_conn.nova.flavors.find(name='m1.large')
vm = self.create_vm(os_conn, hosts[0], m1_cpu_flavor, keypair,
vf_ports[0], ubuntu_image_id)
for flavor in [m1_medium, m1_cpu_flavor, m1_large]:
self.resize(os_conn, vm, flavor)
network_checks.check_ping_from_vm(
env, os_conn, vm, vm_keypair=keypair, vm_login='ubuntu')
示例4: test_vms_page_size_any_no_hp
def test_vms_page_size_any_no_hp(self, env, os_conn, networks, keypair,
computes_with_mixed_hp, flavors,
security_group, cleanup):
"""This test checks vms with any/large hw:mem_page_size when both 2Mb
and 1Gb huge pages are unavailable
Steps:
1. Create net1 with subnet and router1 with interface to net1
2. Boot vms in order to allocate all huge pages
3. Boot vm with required mem_page_size and check result:
vm should be in error state for 'large', for 'any' mem_page_size
vm is active and 4kb pages are used (i.e. no huge pages)
"""
host = computes_with_mixed_hp[0]
zone = 'nova:{}'.format(host)
self.boot_vms_to_allocate_hp(os_conn, env, host, page_2mb, networks[0])
self.boot_vms_to_allocate_hp(os_conn, env, host, page_1gb, networks[0])
flavors[0].set_keys({'hw:mem_page_size': 'any'})
vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
nics=[{'net-id': networks[0]}],
key_name=keypair.name,
security_groups=[security_group.id],
availability_zone=zone)
assert self.get_instance_page_size(os_conn, vm) is None
network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
示例5: test_ban_all_l3_agents_and_clear_them
def test_ban_all_l3_agents_and_clear_them(self, router, prepare_openstack):
"""Disable all l3 agents and enable them
Scenario:
1. Create network1, network2
2. Create router1 and connect it with network1, network2 and
external net
3. Boot vm1 in network1
4. Boot vm2 in network2 and associate floating ip
5. Add rules for ping
6. Disable all neutron-l3-agent
7. Wait until all agents died
8. Enable all neutron-l3-agent
9. Wait until all agents alive
10. Check ping vm2 from vm1 by floating ip
"""
server1 = self.os_conn.nova.servers.find(name="server01")
server2 = self.os_conn.nova.servers.find(name="server02")
server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']
agents = self.os_conn.get_l3_for_router(router['router']['id'])
agent_ids = [x['id'] for x in agents['agents']]
controller = self.env.get_nodes_by_role('controller')[0]
with controller.ssh() as remote:
logger.info('disable all l3 agents')
remote.check_call('pcs resource disable neutron-l3-agent')
self.os_conn.wait_agents_down(agent_ids)
logger.info('enable all l3 agents')
remote.check_call('pcs resource enable neutron-l3-agent')
self.os_conn.wait_agents_alive(agent_ids)
network_checks.check_ping_from_vm(
self.env, self.os_conn, vm=server1,
vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)
示例6: test_vms_page_size_one_type_hps_available_only
def test_vms_page_size_one_type_hps_available_only(
self, env, os_conn, networks, computes_with_mixed_hp, flavors,
security_group, keypair, mem_page_size, vm_page_size,
size_to_allocate, cleanup):
"""This test checks that vms with any/large hw:mem_page_size uses 2Mb
huge pages in case when only 2Mb pages are available
Steps:
1. Create net1 with subnet and router1 with interface to net1
2. Allocate all 1Gb huge pages for each numa node
3. Boot vm with any or large hw:mem_page_size
4. Check that 2Mb huge pages are used for vm
"""
host = computes_with_mixed_hp[0]
self.boot_vms_to_allocate_hp(os_conn, env, host, size_to_allocate,
networks[0])
flavors[0].set_keys({'hw:mem_page_size': mem_page_size})
vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
nics=[{'net-id': networks[0]}],
key_name=keypair.name,
security_groups=[security_group.id],
availability_zone='nova:{}'.format(host))
assert self.get_instance_page_size(os_conn, vm) == vm_page_size
network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
示例7: test_basic_operation_with_fixed_ips
def test_basic_operation_with_fixed_ips(env, os_conn, instances, keypair, network):
"""Basic operations with fixed IPs on an instance
Scenario:
1. Create net01, net01__subnet
2. Boot instances vm1 and vm2 in net01
3. Check that they ping each other by their fixed IPs
4. Add a fixed IP to vm1
nova add-fixed-ip vm1 $NET_ID
5. Remove old fixed IP from vm1
nova remove-fixed-ip vm1 <old_fixed_ip>
6. Wait some time
7. Check that vm2 can send pings to vm1 by its new fixed IP
"""
for instance1, instance2 in zip(instances, instances[::-1]):
ip = os_conn.get_nova_instance_ips(instance2)["fixed"]
network_checks.check_ping_from_vm(env, os_conn, instance1, vm_keypair=keypair, ip_to_ping=ip)
instance1, instance2 = instances
old_ip = os_conn.get_nova_instance_ips(instance1)["fixed"]
instance1.add_fixed_ip(network["network"]["id"])
instance1.remove_fixed_ip(old_ip)
new_ip = os_conn.get_nova_instance_ips(instance1)["fixed"]
network_checks.check_ping_from_vm(env, os_conn, instance2, vm_keypair=keypair, ip_to_ping=new_ip)
示例8: test_destroy_primary_controller
def test_destroy_primary_controller(self, router, prepare_openstack,
devops_env):
"""Destroy primary controller (l3 agent on it should be
with ACTIVE ha_state)
Scenario:
1. Create network1, network2
2. Create router1 and connect it with network1, network2 and
external net
3. Boot vm1 in network1
4. Boot vm2 in network2 and associate floating ip
5. Add rules for ping
6. Find node with active ha_state for router
7. If node from step 6 isn't primary controller,
reschedule router1 to primary by banning all another
and then clear them
8. Destroy primary controller
9. Wait time while env is unstable
10. Check ping
"""
router_id = router['router']['id']
agents = self.get_active_l3_agents_for_router(router_id)
l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
primary_controller = self.env.primary_controller
other_controllers = [x for x
in self.env.get_nodes_by_role('controller')
if x != primary_controller]
# Rescedule active l3 agent to primary if needed
if primary_controller != l3_agent_controller:
with primary_controller.ssh() as remote:
for node in other_controllers:
remote.check_call(
'pcs resource ban neutron-l3-agent {}'.format(
node.data['fqdn']))
from_node = l3_agent_controller.data['fqdn']
self.wait_router_rescheduled(router_id=router_id,
from_node=from_node,
timeout_seconds=5 * 60)
for node in other_controllers:
remote.check_call(
'pcs resource clear neutron-l3-agent {}'.format(
node.data['fqdn']))
server1 = self.os_conn.nova.servers.find(name="server01")
server2 = self.os_conn.nova.servers.find(name="server02")
server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']
logger.info("Destroy primary controller {}".format(
primary_controller.data['fqdn']))
devops_node = devops_env.get_node_by_fuel_node(primary_controller)
devops_node.destroy()
self.wait_router_rescheduled(router_id=router['router']['id'],
from_node=primary_controller.data['fqdn'],
timeout_seconds=5 * 60)
network_checks.check_ping_from_vm(
self.env, self.os_conn, vm=server1,
vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)
示例9: _prepare_openstack
def _prepare_openstack(self):
"""Prepare OpenStack for scenarios run
Steps:
1. Update default security group
2. Create networks net01: net01__subnet, 192.168.1.0/24
3. Launch vm1 and vm2 in net01 network on a single compute compute
4. Go to vm1 console and send pings to vm2
"""
self.instance_keypair = self.os_conn.create_key(key_name='instancekey')
zone = self.os_conn.nova.availability_zones.find(zoneName="nova")
host = zone.hosts.keys()[0]
self.setup_rules_for_default_sec_group()
# create 1 network and 2 instances
net, subnet = self.create_internal_network_with_subnet()
self.os_conn.create_server(
name='server01',
availability_zone='{}:{}'.format(zone.zoneName, host),
key_name=self.instance_keypair.name,
nics=[{'net-id': net['network']['id']}],
max_count=2,
wait_for_avaliable=False,
wait_for_active=False)
self.server1 = self.os_conn.nova.servers.find(name="server01-1")
server2 = self.os_conn.nova.servers.find(name="server01-2")
servers = [self.server1, server2]
self.os_conn.wait_servers_active(servers)
self.os_conn.wait_servers_ssh_ready(servers)
# check pings
self.server2_ip = self.os_conn.get_nova_instance_ips(server2)['fixed']
network_checks.check_ping_from_vm(env=self.env,
os_conn=self.os_conn,
vm=self.server1,
vm_keypair=self.instance_keypair,
ip_to_ping=self.server2_ip,
timeout=3 * 60,
vm_login='cirros',
vm_password='cubswin:)')
# make a list of all ovs agent ids
self.ovs_agent_ids = [
agt['id'] for agt in
self.os_conn.neutron.list_agents(
binary='neutron-openvswitch-agent')['agents']]
# make a list of ovs agents that resides only on controllers
controllers = [node.data['fqdn']
for node in self.env.get_nodes_by_role('controller')]
ovs_agts = self.os_conn.neutron.list_agents(
binary='neutron-openvswitch-agent')['agents']
self.ovs_conroller_agents = [agt['id'] for agt in ovs_agts
if agt['host'] in controllers]
示例10: test_restore_deleted_instance
def test_restore_deleted_instance(
self, set_recl_inst_interv, instances, volumes):
"""Restore previously deleted instance.
Actions:
1. Update '/etc/nova/nova.conf' with 'reclaim_instance_interval=86400'
and restart Nova on all nodes;
2. Create net and subnet;
3. Create and run two instances (vm1, vm2) inside same net;
4. Check that ping are successful between vms;
5. Create a volume and attach it to an instance vm1;
6. Delete instance vm1 and check that it's in 'SOFT_DELETE' state;
7. Restore vm1 instance and check that it's in 'ACTIVE' state;
8. Check that ping are successful between vms;
"""
timeout = 60 # (sec) timeout to wait instance for status change
# Create two vms
vm1, vm2 = instances
# Ping one vm from another
vm1_ip = self.os_conn.get_nova_instance_ips(vm1).values()[0]
vm2_ip = self.os_conn.get_nova_instance_ips(vm2).values()[0]
network_checks.check_ping_from_vm(
self.env, self.os_conn, vm1, ip_to_ping=vm2_ip, timeout=60)
# Create a volume and attach it to an instance vm1
volume = common_functions.create_volume(
self.os_conn.cinder, image_id=None)
self.os_conn.nova.volumes.create_server_volume(
server_id=vm1.id, volume_id=volume.id, device='/dev/vdb')
volumes.append(volume)
# Delete instance vm1 and check that it's in "SOFT_DELETED" state
common_functions.delete_instance(self.os_conn.nova, vm1.id)
assert vm1 not in self.os_conn.get_servers()
common_functions.wait(
lambda: self.os_conn.server_status_is(vm1, 'SOFT_DELETED'),
timeout_seconds=timeout, sleep_seconds=5,
waiting_for='instance {0} changes status to SOFT_DELETED'.format(
vm1.name))
# Restore vm1 instance and check that it's in "ACTIVE" state now
resp = self.os_conn.nova.servers.restore(vm1.id)
assert resp[0].ok
common_functions.wait(
lambda: self.os_conn.is_server_active(vm1.id),
timeout_seconds=timeout, sleep_seconds=5,
waiting_for='instance {0} changes status to ACTIVE'.format(
vm1.name))
# Ping one vm from another
network_checks.check_ping_from_vm(
self.env, self.os_conn, vm2, ip_to_ping=vm1_ip, timeout=60)
示例11: test_ovs_restart_pcs_disable_enable
def test_ovs_restart_pcs_disable_enable(self, count):
"""Restart openvswitch-agents with pcs disable/enable on controllers
Steps:
1. Update default security group
2. Create router01, create networks net01: net01__subnet,
192.168.1.0/24, net02: net02__subnet, 192.168.2.0/24 and
attach them to router01.
3. Launch vm1 in net01 network and vm2 in net02 network
on different computes
4. Go to vm1 console and send pings to vm2
5. Disable ovs-agents on a controller, restart service
neutron-plugin-openvswitch-agent on all computes, and enable
them back. To do this, launch the script against master node.
6. Wait 30 seconds, send pings from vm1 to vm2 and check that
it is successful.
7. Repeat steps 6-7 'count' argument times
Duration 10m
"""
self._prepare_openstack()
for _ in range(count):
# Check that all ovs agents are alive
self.os_conn.wait_agents_alive(self.ovs_agent_ids)
# Disable ovs agent on a controller
common.disable_ovs_agents_on_controller(self.env)
# Then check that all ovs went down
self.os_conn.wait_agents_down(self.ovs_conroller_agents)
# Restart ovs agent service on all computes
common.restart_ovs_agents_on_computes(self.env)
# Enable ovs agent on a controller
common.enable_ovs_agents_on_controllers(self.env)
# Then check that all ovs agents are alive
self.os_conn.wait_agents_alive(self.ovs_agent_ids)
# sleep is used to check that system will be stable for some time
# after restarting service
time.sleep(30)
network_checks.check_ping_from_vm(
self.env, self.os_conn, self.server1, self.instance_keypair,
self.server2_ip, timeout=10 * 60)
# check all agents are alive
assert all([agt['alive'] for agt in
self.os_conn.neutron.list_agents()['agents']])
示例12: test_destroy_non_primary_controller
def test_destroy_non_primary_controller(self, router,
prepare_openstack, devops_env):
"""Reset primary controller (l3 agent on it should be
with ACTIVE ha_state)
Scenario:
1. Create network1, network2
2. Create router1 and connect it with network1, network2 and
external net
3. Boot vm1 in network1
4. Boot vm2 in network2 and associate floating ip
5. Add rules for ping
6. Find node with active ha_state for router
7. If node from step 6 isn't primary controller,
reschedule router1 to primary by banning all another
and then clear them
8. Destroy primary controller
9. Wait time while env is unstable
10. Check ping
11. One agent has ACTIVE ha_state, others (2) has STAND BY ha_state
"""
router_id = router['router']['id']
agents = self.get_active_l3_agents_for_router(router_id)
l3_agent_controller = self.env.find_node_by_fqdn(agents[0]['host'])
controller = self.env.non_primary_controllers[0]
server1 = self.os_conn.nova.servers.find(name="server01")
server2 = self.os_conn.nova.servers.find(name="server02")
server2_ip = self.os_conn.get_nova_instance_ips(server2)['floating']
# Reschedule active l3 agent to the non primary if needed
self.reschedule_active_l3_agt(router_id, controller,
l3_agent_controller)
logger.info("Destroy non primary controller {}".format(
controller.data['fqdn']))
devops_node = devops_env.get_node_by_fuel_node(controller)
self.env.destroy_nodes([devops_node])
# To ensure that the l3 agt is moved from the affected controller
self.wait_router_rescheduled(router_id=router_id,
from_node=controller.data['fqdn'],
timeout_seconds=5 * 60)
network_checks.check_ping_from_vm(
self.env, self.os_conn, vm=server1,
vm_keypair=self.instance_keypair, ip_to_ping=server2_ip)
self.check_l3_ha_agent_states(router_id)
示例13: test_ovs_restart_pcs_vms_on_single_compute_in_single_network
def test_ovs_restart_pcs_vms_on_single_compute_in_single_network(self):
"""Check connectivity for instances scheduled on a single compute in
a single private network
Steps:
1. Update default security group
2. Create networks net01: net01__subnet, 192.168.1.0/24
3. Launch vm1 and vm2 in net01 network on a single compute compute
4. Go to vm1 console and send pings to vm2
5. Disable ovs-agents on all controllers, restart service
neutron-plugin-openvswitch-agent on all computes, and enable
them back. To do this, launch the script against master node.
6. Wait 30 seconds, send pings from vm1 to vm2 and check that
it is successful.
Duration 10m
"""
self._prepare_openstack()
# Check that all ovs agents are alive
self.os_conn.wait_agents_alive(self.ovs_agent_ids)
# Disable ovs agent on all controllers
common.disable_ovs_agents_on_controller(self.env)
# Then check that all ovs went down
self.os_conn.wait_agents_down(self.ovs_conroller_agents)
# Restart ovs agent service on all computes
common.restart_ovs_agents_on_computes(self.env)
# Enable ovs agent on all controllers
common.enable_ovs_agents_on_controllers(self.env)
# Then check that all ovs agents are alive
self.os_conn.wait_agents_alive(self.ovs_agent_ids)
# sleep is used to check that system will be stable for some time
# after restarting service
time.sleep(30)
network_checks.check_ping_from_vm(
self.env, self.os_conn, self.server1, self.instance_keypair,
self.server2_ip, timeout=3 * 60)
# check all agents are alive
assert all([agt['alive'] for agt in
self.os_conn.neutron.list_agents()['agents']])
示例14: test_ovs_restart_pcs_ban_clear
def test_ovs_restart_pcs_ban_clear(self):
"""Restart openvswitch-agents with pcs ban/clear on controllers
Steps:
1. Update default security group
2. Create router01, create networks.
3. Launch vm1 in net01 network and vm2 in net02 network
on different computes.
4. Go to vm1 console and send pings to vm2
5. Ban ovs-agents on all controllers, clear them and restart
service neutron-plugin-openvswitch-agent on all computes.
To do this, launch the script against master node.
6. Wait 30 seconds, send pings from vm1 to vm2 and
check that it is successful.
Duration 10m
"""
self._prepare_openstack()
# Check that all ovs agents are alive
self.os_conn.wait_agents_alive(self.ovs_agent_ids)
# Ban ovs agents on all controllers
common.ban_ovs_agents_controllers(self.env)
# Then check that all ovs went down
self.os_conn.wait_agents_down(self.ovs_conroller_agents)
# Cleat ovs agent on all controllers
common.clear_ovs_agents_controllers(self.env)
# Restart ovs agent service on all computes
common.restart_ovs_agents_on_computes(self.env)
# Then check that all ovs agents are alive
self.os_conn.wait_agents_alive(self.ovs_agent_ids)
# sleep is used to check that system will be stable for some time
# after restarting service
time.sleep(30)
network_checks.check_ping_from_vm(
self.env, self.os_conn, self.server1, self.instance_keypair,
self.server2_ip, timeout=3 * 60)
# check all agents are alive
assert all([agt['alive'] for agt in
self.os_conn.neutron.list_agents()['agents']])
示例15: check_no_ping_from_vm
def check_no_ping_from_vm(self,
vm,
vm_keypair=None,
ip_to_ping=None,
timeout=None,
vm_login='cirros',
vm_password='cubswin:)'):
logger.info('Expecting that ping from VM should fail')
# Get ping results
with pytest.raises(AssertionError):
network_checks.check_ping_from_vm(env=self.env,
os_conn=self.os_conn,
vm=vm,
vm_keypair=vm_keypair,
ip_to_ping=ip_to_ping,
timeout=timeout)