本文整理汇总了Python中mos_tests.functions.common.wait函数的典型用法代码示例。如果您正苦于以下问题:Python wait函数的具体用法?Python wait怎么用?Python wait使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了wait函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: delete_instances
def delete_instances(os_conn, instances):
instances_ids = [x.id for x in instances]
# Stop instances (to prevent error during deletion)
for instance_id in instances_ids:
os_conn.nova.servers.stop(instance_id)
def isnstances_shutdowned():
instances = [x
for x in os_conn.nova.servers.list()
if x.id in instances_ids]
if any([x.status == 'ERROR' for x in instances]):
raise Exception(
'Some server(s) became to ERROR state after stop')
return all([x.status == 'SHUTOFF' for x in instances])
common.wait(isnstances_shutdowned, timeout_seconds=10 * 60)
# Delete instances
for instance_id in instances_ids:
os_conn.nova.servers.delete(instance_id)
def instances_deleted():
not_deleted = [x
for x in os_conn.nova.servers.list()
if x.id in instances_ids]
if len(not_deleted) == 0:
return True
if any([x.status == 'ERROR' for x in not_deleted]):
raise Exception(
'Some server(s) became to ERROR state after deletion')
common.wait(instances_deleted, timeout_seconds=2 * 60)
示例2: assign_floating_ip
def assign_floating_ip(self, srv, use_neutron=False):
if use_neutron:
# Find external net id for tenant
nets = self.neutron.list_networks()['networks']
err_msg = "Active external network not found in nets:{}"
ext_net_ids = [
net['id'] for net in nets
if net['router:external'] and net['status'] == "ACTIVE"]
assert ext_net_ids, err_msg.format(nets)
net_id = ext_net_ids[0]
# Find instance port
ports = self.neutron.list_ports(device_id=srv.id)['ports']
err_msg = "Not found active ports for instance:{}"
assert ports, err_msg.format(srv.id)
port = ports[0]
# Create floating IP
body = {'floatingip': {'floating_network_id': net_id,
'port_id': port['id']}}
flip = self.neutron.create_floatingip(body)
# Wait active state for port
port_id = flip['floatingip']['port_id']
wait(lambda:
self.neutron.show_port(port_id)['port']['status'] == "ACTIVE",
timeout_seconds=60,
waiting_for="floating_ip port is active")
return flip['floatingip']
fl_ips_pool = self.nova.floating_ip_pools.list()
if fl_ips_pool:
floating_ip = self.nova.floating_ips.create(
pool=fl_ips_pool[0].name)
self.nova.servers.add_floating_ip(srv, floating_ip)
return floating_ip
示例3: check_vm_is_available
def check_vm_is_available(self, vm,
username=None, password=None, pkeys=None):
"""Check that instance is available for connect from controller.
:param vm: instance to ping from it compute node
:param username: username to login to instance
:param password: password to connect to instance
:param pkeys: private keys to connect to instance
"""
vm = self.os_conn.get_instance_detail(vm)
srv_host = self.env.find_node_by_fqdn(
self.os_conn.get_srv_hypervisor_name(vm)).data['ip']
vm_ip = self.os_conn.get_nova_instance_ips(vm)['floating']
with self.env.get_ssh_to_node(srv_host) as remote:
cmd = "ping -c1 {0}".format(vm_ip)
waiting_for_msg = (
'Waiting for instance with ip {0} has '
'connectivity from node with ip {1}.').format(vm_ip, srv_host)
wait(lambda: remote.execute(cmd)['exit_code'] == 0,
sleep_seconds=10, timeout_seconds=3 * 60,
waiting_for=waiting_for_msg)
return self.check_vm_is_accessible_with_ssh(
vm_ip, username=username, password=password, pkeys=pkeys)
示例4: _prepare_neutron_server_and_env
def _prepare_neutron_server_and_env(self, net_count):
"""Prepares neutron service network count on dhcp agent
and prepares env.
:param net_count: how many networks musth dhcp agent handle
"""
def _check_neutron_restart():
try:
self.os_conn.list_networks()['networks']
except Exception as e:
logger.debug(e)
return False
return True
all_controllers = self.env.get_nodes_by_role('controller')
for controller in all_controllers:
with controller.ssh() as remote:
self._apply_new_neutron_param_value(remote, net_count)
wait(
lambda: _check_neutron_restart(),
timeout_seconds=60 * 3,
sleep_seconds=(1, 60, 5),
waiting_for='neutron to be up')
self._prepare_openstack_state()
示例5: clear_l3_agent
def clear_l3_agent(self, _ip, router_name, node, wait_for_alive=False):
"""Clear L3 agent ban and wait until router moved to this node
Clear previously banned L3 agent on node wait until router moved
to this node
:param _ip: ip of server to to execute clear command
:param router_name: name of router to wait until it move to node
:param node: name of node to clear
:param wait_for_alive:
"""
router = self.os_conn.neutron.list_routers(
name=router_name)['routers'][0]
with self.env.get_ssh_to_node(_ip) as remote:
remote.check_call(
"pcs resource clear neutron-l3-agent {0}".format(node))
logger.info("Clear L3 agent on node {0}".format(node))
# wait for l3 agent alive
if wait_for_alive:
wait(
lambda: self.os_conn.get_l3_for_router(
router['id'])['agents'][0]['alive'] is True,
timeout_seconds=60 * 3, waiting_for="L3 agent is alive",
sleep_seconds=(1, 60)
)
示例6: delete_env
def delete_env(self, timeout=2):
self.env.reset()
wait(lambda: self.env.status == 'new',
timeout_seconds=60 * timeout,
sleep_seconds=20,
waiting_for="Env reset finish")
self.env.delete()
示例7: test_destroy_master_rabbit_node
def test_destroy_master_rabbit_node(self):
"""Destroy one of RabbitMQ nodes .
Actions:
1. Poweroff master standalone rabbitmq node;
2. Wait some time for rabbitmq cluster to recover;
3. Check RabbitMQ health with rabbitmqctl;
4. Check that old master is offline;
5. Check that new master != old master;
"""
timeout = 5 # minutes, wait for rabbit recover
# Get master standalone rabbit node for disabling
old_master = self.rabbit_node('master')
old_master_fqdn = old_master.data['fqdn']
# Disable master rabbit node
logger.debug("Disabling RabbitMQ master node")
self.disable_node(old_master)
# Wait for rabbit cluster to recover
logger.debug("Sleeping for %s minutes" % timeout)
sleep(60 * timeout)
# Check rabbit status,
wait(lambda: self.is_rabbit_cluster_ok(),
timeout_seconds=60 * timeout,
sleep_seconds=30,
waiting_for="RabbitMQ became online")
# Check that old master now offline
assert self.rabbit_nodes_statuses()[old_master_fqdn] == 'offline'
# Check that now we have a new master
assert old_master_fqdn not in self.rabbit_nodes_roles()
示例8: test_image_size_attributes
def test_image_size_attributes(instance, os_conn):
"""Check the OS-EXT-IMG-SIZE:size extended attribute
Scenario:
1. Create net and subnet
2. Check that TestVM image has an OS-EXT-IMG-SIZE:size attribute
3. Boot instance with TestVM image on net
4. Wait for instance to reach ACTIVE status
5. Create new image as snapshot of instance
6. Check that the created snapshot has an
OS-EXT-IMG-SIZE:size attribute
"""
attr = 'OS-EXT-IMG-SIZE:size'
test_vm_image = os_conn.nova.images.find(name='TestVM')
assert hasattr(test_vm_image, attr)
common.wait(lambda: os_conn.is_server_active(instance),
timeout_seconds=60,
waiting_for='instance to became to ACTIVE status')
instance = os_conn.nova.servers.get(instance.id)
snapshot_id = instance.create_image('snap1')
snapshot = os_conn.nova.images.get(snapshot_id)
assert hasattr(snapshot, attr)
示例9: test_os_instance_attributes
def test_os_instance_attributes(request, error_instance, os_conn, attrs):
"""Check instance extended attributes
Scenario:
1. Create net and subnet
2. Boot instance on net
3. Check that attributes `attrs` are visible in instance attributes
4. Wait instance to reach ACTIVE status
5. Check that attributes `attrs` are visible in instance attributes
6. Boot instance in ERROR status
7. Check that attributes `attrs` are visible in instance attributes
"""
instance = request.getfuncargvalue('instance')
common.wait(lambda: os_conn.server_status_is(instance, 'BUILD'),
timeout_seconds=60,
waiting_for='instance to became to BUILD status')
instance = os_conn.nova.servers.get(instance.id)
for attr in attrs:
assert hasattr(instance, attr)
common.wait(lambda: os_conn.is_server_active(instance),
timeout_seconds=60,
waiting_for='instance to became to ACTIVE status')
instance = os_conn.nova.servers.get(instance.id)
for attr in attrs:
assert hasattr(instance, attr)
error_instance = os_conn.nova.servers.get(error_instance.id)
for attr in attrs:
assert hasattr(error_instance, attr)
示例10: reset_computes
def reset_computes(self, hostnames, env_name):
def get_hypervisors():
return [x for x in self.os_conn.nova.hypervisors.list()
if x.hypervisor_hostname in hostnames]
node_states = defaultdict(list)
def is_nodes_started():
for hypervisor in get_hypervisors():
state = hypervisor.state
prev_states = node_states[hypervisor.hypervisor_hostname]
if len(prev_states) == 0 or state != prev_states[-1]:
prev_states.append(state)
return all(x[-2:] == ['down', 'up'] for x in node_states.values())
logger.info('Resetting computes {}'.format(hostnames))
for hostname in hostnames:
node = self.env.find_node_by_fqdn(hostname)
devops_node = DevopsClient.get_node_by_mac(env_name=env_name,
mac=node.data['mac'])
devops_node.reset()
wait(is_nodes_started, timeout_seconds=10 * 60)
示例11: boot_instance
def boot_instance(self, image, flavor, keypair, **kwargs):
"""Boot and return ironic instance
:param os_conn: initialized `os_conn` fixture
:type os_conn: mos_tests.environment.os_actions.OpenStackActions
:param image: image to boot instance with it
:type image: warlock.core.image
:param flavor: baremetal flavor
:type flavor: novaclient.v2.flavors.Flavor
:param keypair: SSH keypair to instance
:type keypair: novaclient.v2.keypairs.Keypair
:return: created instance
:rtype: novaclient.v2.servers.Server
"""
common.wait(self.all_nodes_provisioned,
timeout_seconds=3 * 60,
sleep_seconds=15,
waiting_for='ironic nodes to be provisioned')
baremetal_net = self.os_conn.nova.networks.find(label='baremetal')
return self.os_conn.create_server('ironic-server',
image_id=image.id,
flavor=flavor.id,
key_name=keypair.name,
nics=[{'net-id': baremetal_net.id}],
timeout=60 * 10,
**kwargs)
示例12: test_image_create_delete_from_url
def test_image_create_delete_from_url(glance, suffix, option):
"""Check image creation and deletion from URL
Scenario:
1. Create image from URL
2. Wait until image has active `status`
3. Delete image
4. Check that image deleted
"""
name = 'Test_{}'.format(suffix)
image_url = settings.GLANCE_IMAGE_URL
cmd = ('image-create --name {name} --container-format bare '
'--disk-format qcow2 {option} {image_url} --progress'.format(
name=name,
option=option,
image_url=image_url))
image = parser.details(glance(cmd))
def is_image_active():
image_data = parser.details(glance('image-show {id}'.format(**image)))
return image_data['status'] == 'active'
wait(is_image_active, timeout_seconds=60, waiting_for='image is active')
glance('image-delete {id}'.format(**image))
check_image_not_in_list(glance, image)
示例13: _prepare_neutron_server_and_env
def _prepare_neutron_server_and_env(self, net_count):
"""Prepares neutron service network count on dhcp agent
and prepares env.
:param net_count: how many networks musth dhcp agent handle
"""
def _check_neutron_restart():
try:
self.os_conn.list_networks()['networks']
except Exception as e:
logger.debug(e)
return False
return True
all_controllers = self.env.get_nodes_by_role('controller')
for controller in all_controllers:
with controller.ssh() as remote:
res = self._apply_new_neutron_param_value(remote, net_count)
error_msg = (
'Neutron service restart with new value failed, '
'exit code {exit_code},'
'stdout {stdout}, stderr {stderr}').format(**res)
assert 0 == res['exit_code'], error_msg
wait_msg = "Waiting for neutron is up"
wait(
lambda: _check_neutron_restart(),
timeout_seconds=60 * 3,
sleep_seconds=(1, 60, 5),
waiting_for=wait_msg)
self._prepare_openstack_state()
示例14: test_reboot_conductor
def test_reboot_conductor(env, ironic, os_conn, ironic_nodes, make_image,
flavors, keypair, devops_env, boot_instance_before):
"""Check ironic state after restart conductor node
Scenario:
1. Boot Ironic instance (if `boot_instance_before`)
2. Reboot Ironic conductor.
3. Wait 5-10 minutes.
4. Run network verification.
5. Run OSTF including Ironic tests.
6. Verify that CLI ironicclient can list nodes, ports, chassis, drivers
7. Boot new Ironic instance (if not `boot_instance_before`).
"""
flavor, ironic_node = zip(flavors, ironic_nodes)[0]
image = make_image(node_driver=ironic_node.driver)
if boot_instance_before:
instance = ironic.boot_instance(image=image,
flavor=flavor,
keypair=keypair)
conductor = env.get_nodes_by_role('ironic')[0]
devops_node = devops_env.get_node_by_fuel_node(conductor)
devops_node.reset()
time.sleep(10)
common.wait(conductor.is_ssh_avaliable,
timeout_seconds=60 * 10,
sleep_seconds=20,
waiting_for='ironic conductor node to reboot')
def is_ironic_available():
try:
ironic.client.driver.list()
return True
except Exception:
return False
common.wait(is_ironic_available,
timeout_seconds=60 * 5,
sleep_seconds=20,
waiting_for='ironic conductor service to start')
result = env.wait_network_verification()
assert result.status == 'ready', 'Result data:\n{0}'.format(result.data)
env.wait_for_ostf_pass(['sanity'], timeout_seconds=60 * 5)
with env.get_nodes_by_role('controller')[0].ssh() as remote:
ironic_cli = os_cli.Ironic(remote)
for cmd in ['node-list', 'port-list', 'chassis-list', 'driver-list']:
ironic_cli(cmd)
if not boot_instance_before:
instance = ironic.boot_instance(image=image,
flavor=flavor,
keypair=keypair)
assert os_conn.nova.servers.get(instance.id).status == 'ACTIVE'
示例15: test_force_delete_inst_before_deferred_cleanup
def test_force_delete_inst_before_deferred_cleanup(
self, set_recl_inst_interv, instances, volumes):
"""Force delete of instance before deferred cleanup
Actions:
1. Update '/etc/nova/nova.conf' with long 'reclaim_instance_interval'
and restart Nova on all nodes;
2. Create net and subnet;
3. Create and run two instances (vm1, vm2) inside same net;
4. Create a volume and attach it to an instance vm1;
5. Delete instance vm1 and check that it's in 'SOFT_DELETE' state;
6. Delete instance vm1 with 'force' option and check that it's not
present.
7. Check that volume is released now and has an Available state;
8. Attach the volume to vm2 instance to ensure that the volume's reuse
doesn't call any errors.
"""
timeout = 60 # (sec) timeout to wait instance for status change
# Create two vms
vm1, vm2 = instances
# Create a volume and attach it to an instance vm1
volume = common_functions.create_volume(
self.os_conn.cinder, image_id=None)
self.os_conn.nova.volumes.create_server_volume(
server_id=vm1.id, volume_id=volume.id, device='/dev/vdb')
volumes.append(volume)
# Delete instance vm1 and check that it's in "SOFT_DELETED" state
common_functions.delete_instance(self.os_conn.nova, vm1.id)
assert vm1 not in self.os_conn.get_servers()
common_functions.wait(
lambda: self.os_conn.server_status_is(vm1, 'SOFT_DELETED'),
timeout_seconds=timeout, sleep_seconds=5,
waiting_for='instance {0} changes status to SOFT_DELETED'.format(
vm1.name))
# Force delete and check vm1 not present
common_functions.delete_instance(self.os_conn.nova, vm1.id, force=True)
common_functions.wait(
lambda: self.os_conn.is_server_deleted(vm1.id),
timeout_seconds=timeout, sleep_seconds=5,
waiting_for='instance {0} to be forced deleted'.format(vm1.name))
# Check that volume is released now and has an Available state
assert common_functions.check_volume_status(
self.os_conn.cinder, volume.id, 'available', 1)
# Check volume is not attached
assert self.os_conn.cinder.volumes.get(volume.id).attachments == []
# Attach the volume to vm2 instance
self.os_conn.nova.volumes.create_server_volume(
server_id=vm2.id, volume_id=volume.id, device='/dev/vdb')
# Check volume status is 'in-use' after re-attach
assert common_functions.check_volume_status(
self.os_conn.cinder, volume.id, 'in-use', 1)
# Check that volume has correct server id
volume = self.os_conn.cinder.volumes.get(volume.id)
assert volume.attachments[0]['server_id'] == vm2.id