本文整理汇总了Python中tcutils.util.get_random_name函数的典型用法代码示例。如果您正苦于以下问题:Python get_random_name函数的具体用法?Python get_random_name怎么用?Python get_random_name使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_random_name函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_domain_user_group
def test_domain_user_group(self):
''' Test user group within a domain
1) Create project
2) Create user
3) Create user_group and attach user to it
4) Attach user_group to domain and project with admin roles
5) Get project connections with user and create projects it should be allowed
6)Verify user_group by creating vn and vms
'''
username = get_random_name('TestUser-1')
password = get_random_name('TestUser-1')
project_name = get_random_name('TestProject-1')
domain_name = self.connections.domain_name
user_group = get_random_name('TestGroup-1')
project_fix = self.create_project(
domain_name,project_name,username,password)
self.admin_connections.auth.create_user(
user=username, password=password,
tenant_name=project_name, domain_name=domain_name)
self.admin_connections.auth.create_user_group(
group=user_group, domain_name=domain_name)
self.admin_connections.auth.add_user_to_group(
user=username, group=user_group)
self.admin_connections.auth.add_group_to_domain(
group=user_group,role='admin', domain=domain_name)
self.admin_connections.auth.add_group_to_tenant(
project=project_name, group=user_group,role='admin')
proj_conn = project_fix.get_project_connections()
ret = self.setup_common_objects(
connections=proj_conn, project_fix=project_fix)
assert ret,'Failed to setup and test common objects'
示例2: create_2_legs
def create_2_legs(self):
vn1_name = get_random_name('bgpaas_vn')
vn1_subnets = [get_random_cidr()]
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
test_vm = self.create_vm(vn1_fixture, 'test_vm',
image_name='ubuntu-traffic')
assert test_vm.wait_till_vm_is_up()
vn2_name = get_random_name('bgpaas_vn')
vn2_subnets = [get_random_cidr()]
vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
bgpaas_vm1 = self.useFixture(
VMFixture(
project_name=self.inputs.project_name,
connections=self.connections,
vn_objs=[
vn1_fixture.obj,
vn2_fixture.obj],
vm_name='bgpaas_vm1',
node_name=None,
image_name='vsrx'))
assert bgpaas_vm1.wait_till_vm_is_up()
ret_dict = {
'vn1_fixture': vn1_fixture,
'vn2_fixture': vn2_fixture,
'test_vm': test_vm,
'bgpaas_vm1': bgpaas_vm1,
}
return ret_dict
示例3: config_svc_chain
def config_svc_chain(self, rules, vn_list, heat_objs, stack_name='svc_chain'):
res_name = 'svc_chain'
if self.heat_api_version == 2:
res_name += '_v2'
template = self.get_template(res_name)
env = self.get_env(res_name)
env['parameters']['policy_name'] = get_random_name('sc')
if self.heat_api_version == 2:
template['resources']['policy']['properties']['network_policy_entries']['network_policy_entries_policy_rule'].extend(rules)
else:
env['parameters']['policy_name'] = get_random_name('sc')
env['parameters']['src_vn_id'] = vn_list[1].uuid
env['parameters']['dst_vn_id'] = vn_list[2].uuid
template['resources']['private_policy']['properties']['entries']['policy_rule'].extend(rules)
stack_name = get_random_name(stack_name)
svc_hs_obj = self.config_heat_obj(stack_name, template, env)
if self.heat_api_version != 2:
return
op = svc_hs_obj.heat_client_obj.stacks.get(stack_name).outputs
for output in op:
if output['output_key'] == 'policy_id':
policy_id = output['output_value']
if output['output_key'] == 'policy_fqname':
policy_fqname = output['output_value']
policy_fqname = ':'.join(policy_fqname)
# Hack, policy association doesn't work through heat, rewrite after bug fix
heat_objs[0].policys = getattr(heat_objs[0], 'policys', [])
heat_objs[1].policys = getattr(heat_objs[1], 'policys', [])
heat_objs[0].policys.append(policy_fqname.split(':'))
heat_objs[1].policys.append(policy_fqname.split(':'))
vn_list[1].bind_policies(heat_objs[0].policys, vn_list[1].uuid)
vn_list[2].bind_policies(heat_objs[1].policys, vn_list[2].uuid)
svc_hs_obj.addCleanup(vn_list[1].unbind_policies, vn_list[1].uuid, [policy_fqname.split(':')])
svc_hs_obj.addCleanup(vn_list[2].unbind_policies, vn_list[2].uuid, [policy_fqname.split(':')])
return svc_hs_obj
示例4: setup_namespaces_pods_for_fabric_restart
def setup_namespaces_pods_for_fabric_restart(self, isolation=False,ip_fabric_forwarding=False):
""" common routine to create the namesapces and the pods by enabling the fabric forwarding
1.create 2 namespaces (ns1,ns2:enable fabric forwarding)
2.create pods in each namespace and verify(ns1:pod1,pod2, ns2:pod1, ns3:pod1 ,default:pod1)
"""
namespace1_name = get_random_name("ns1")
namespace2_name = get_random_name("ns2")
namespace1 = self.setup_namespace(name = namespace1_name, isolation = isolation,
ip_fabric_forwarding = ip_fabric_forwarding)
namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation,
ip_fabric_forwarding = ip_fabric_forwarding)
assert namespace1.verify_on_setup()
assert namespace2.verify_on_setup()
label = "fabric"
#create a pod in default namespaces
pod1_in_default_ns = self.setup_ubuntuapp_pod()
#create a two pods in fabric forwarding enabled namespace
pod1_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
labels={'app': label})
pod2_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
labels={'app': label})
#create a pod in fabric and ip fabric enabled namespace
pod1_in_ns2 = self.setup_ubuntuapp_pod(namespace=namespace2_name,
labels={'app': label})
assert pod1_in_default_ns.verify_on_setup()
assert pod1_in_ns1.verify_on_setup()
assert pod2_in_ns1.verify_on_setup()
assert pod1_in_ns2.verify_on_setup()
client1 = [pod1_in_ns1, pod2_in_ns1, namespace1]
client2 = [pod1_in_ns2, namespace2]
client3 = [pod1_in_default_ns]
return (client1, client2, client3)
示例5: config_svc_instance
def config_svc_instance(self, stack_name, st_fix, vn_list, max_inst=1):
res_name = 'svc_inst'
if st_fix.if_list == ['left', 'right']:
res_name += '_nomgmt'
if self.pt_based_svc:
res_name += '_pt'
if self.heat_api_version == 2:
if self.inputs.get_af() == 'v6':
res_name += '_dual'
res_name += '_v2'
template = self.get_template(res_name)
env = self.get_env(res_name)
env['parameters']['service_template_fq_name'] = ':'.join(st_fix.st_fq_name)
env['parameters']['service_instance_name'] = get_random_name(stack_name)
if env['parameters'].get('svm_name', None):
env['parameters']['svm_name'] = get_random_name(stack_name)
if st_fix.svc_mode != 'transparent':
env['parameters']['right_net_id'] = vn_list[1].vn_fq_name
env['parameters']['left_net_id'] = vn_list[0].vn_fq_name
else:
env['parameters']['right_net_id'] = ''
env['parameters']['left_net_id'] = ''
env['parameters'][
'service_instance_name'] = get_random_name('svc_inst')
if not self.pt_based_svc:
env['parameters']['max_instances'] = max_inst
si_hs_obj = self.config_heat_obj(stack_name, template, env)
si_name = env['parameters']['service_instance_name']
si_fix = self.verify_si(si_hs_obj.heat_client_obj, stack_name, si_name, st_fix, max_inst, st_fix.svc_mode, st_fix.image_name)
return si_fix, si_hs_obj
示例6: __init__
def __init__(self, inputs, project_name=None, input_file=None, logger=None,
username=None, password=None, domain_name=None):
self.username = None
self.password = None
self.inputs = inputs
if inputs.domain_isolation:
self.domain_name = get_random_name(domain_name)
else :
self.domain_name = domain_name
if inputs.tenant_isolation:
self.project_name = get_random_name(project_name)
else :
self.project_name = project_name or inputs.stack_tenant
if inputs.tenant_isolation and inputs.user_isolation:
self.username = self.project_name
self.password = self.project_name
else:
self.username = username or inputs.stack_user
self.password = password or inputs.stack_password
self.input_file = input_file
self.logger = logger
if self.inputs.orchestrator == 'vcenter':
self.project_name = self.inputs.stack_tenant
self.username = self.inputs.stack_user
self.password = self.inputs.stack_password
if self.inputs.vcenter_gw_setup:#Fixing tenant as vCenter for vcenter gw setup
self.project_name = 'vCenter'
self.username = self.inputs.stack_user
self.password = self.inputs.stack_password
示例7: test_perms_with_same_user_in_diff_projects
def test_perms_with_same_user_in_diff_projects(self):
''' Test user roles across projects in the same domain
1) Create project1 and project2
2) Create and Attach user1 to project1 with admin role and as _member_ role to project2
3) create VN1 under Project1
4) project2 shouldnt be able to read VN1 using project2 creds'''
username1 = get_random_name('TestUser-1')
password1 = get_random_name('TestUser-1')
project_name1 = get_random_name('TestProject-1')
project_name2 = get_random_name('TestProject-2')
domain_name = self.connections.domain_name
project_fix1 = self.create_project(
domain_name,project_name1,username1,password1)
project_fix2 = self.create_project(
domain_name,project_name2,username1,password1)
self.admin_connections.auth.create_user(user=username1, password=password1,
tenant_name=project_name1, domain_name=domain_name)
self.admin_connections.auth.add_user_to_domain(username1,'admin',domain_name)
self.admin_connections.auth.add_user_to_project(username1,project_name1,'admin')
self.admin_connections.auth.add_user_to_project(username1,project_name2,'_member_')
proj_conn1 = project_fix1.get_project_connections()
proj_conn2 = project_fix2.get_project_connections()
vn1_fixture = self.useFixture(
VNFixture(
project_name=project_fix1.project_name,
connections=proj_conn1,
vn_name='p1-vn1',
subnets=['10.2.2.0/24']))
assert not self.read_vn(proj_conn2,vn1_fixture.uuid)
示例8: start_tcpdump
def start_tcpdump(self, session, tap_intf, vlan=None, vm_fixtures=[], pcap_on_vm=False, no_header = False):
filt_str = ''
if not no_header:
filt_str = 'udp port 8099'
if not pcap_on_vm:
pcap = '/tmp/mirror-%s_%s.pcap' % (tap_intf, get_random_name())
cmd = 'rm -f %s' % pcap
execute_cmd(session, cmd, self.logger)
assert check_pcap_file_exists(session, pcap, expect=False),'pcap file still exists'
if vlan:
filt_str = 'greater 1200'
cmd = "sudo tcpdump -ni %s -U %s -w %s" % (tap_intf, filt_str, pcap)
self.logger.info("Starting tcpdump to capture the mirrored packets.")
execute_cmd(session, cmd, self.logger)
assert check_pcap_file_exists(session, pcap),'pcap file does not exist'
return pcap
else:
pcap = '/tmp/%s.pcap' % (get_random_name())
cmd_to_tcpdump = [ 'tcpdump -ni %s %s -w %s 1>/dev/null 2>/dev/null' % (tap_intf, filt_str, pcap) ]
pidfile = pcap + '.pid'
vm_fix_pcap_pid_files =[]
for vm_fixture in vm_fixtures:
vm_fixture.run_cmd_on_vm(cmds=cmd_to_tcpdump, as_daemon=True, pidfile=pidfile, as_sudo=True)
vm_fix_pcap_pid_files.append((vm_fixture, pcap, pidfile))
return vm_fix_pcap_pid_files
示例9: test_deployment_with_replica_update_for_fabric_fwd
def test_deployment_with_replica_update_for_fabric_fwd(self):
'''
Verifies fabric forwarding is enabled though deployment object
1.Create a deployment with n replicas with ip fabric forwardingenabled
2.verify the replicas able to reach the public network
3.update the pod replicas
4.should be able to reach pubic network from each pod
'''
labels = {'app': 'test'}
replicas = len(self.inputs.compute_ips)*1
new_replicas = len(self.inputs.compute_ips)*2
namespace1_name = get_random_name("ns1")
namespace1 = self.setup_namespace(name=namespace1_name, isolation=True,
ip_fabric_forwarding=True)
assert namespace1.verify_on_setup()
metadata = {}
spec = {}
name = get_random_name('ubuntu-dep')
template_metadata = {}
template_metadata['labels'] = labels
template_spec = {
'containers': [
{'image': 'ubuntu-upstart',
"name": "c1",
'command': ['sleep', '1000000'],
'image_pull_policy': 'IfNotPresent',
}
]
}
spec.update({
'replicas': replicas,
'template': {
'metadata': template_metadata,
'spec': template_spec
}
})
dep_1 = self.setup_deployment(name=name, namespace=namespace1_name,
metadata=metadata, spec=spec)
assert dep_1.verify_on_setup()
s_pod_fixtures = []
server_pods = dep_1.get_pods_list()
for x in server_pods:
s_pod_fixture = self.setup_ubuntuapp_pod(name=x.metadata.name,
namespace=namespace1_name)
s_pod_fixture.verify_on_setup()
assert s_pod_fixture.ping_to_ip(self.ip_to_ping)
dep_1.set_replicas(new_replicas)
assert dep_1.verify_on_setup()
s_pod_fixtures = []
server_pods = dep_1.get_pods_list()
for x in server_pods:
s_pod_fixture = self.setup_ubuntuapp_pod(name=x.metadata.name,
namespace=namespace1_name)
assert s_pod_fixture.verify_on_setup()
assert s_pod_fixture.ping_to_ip(self.ip_to_ping)
示例10: setup_common_namespaces_pods
def setup_common_namespaces_pods(self, prov_service = False):
service_ns1 = None
service_ns2 = None
vn_for_namespace = self.setup_vn(vn_name = "TestVNNamespace")
vn_dict_for_namespace = {"domain": vn_for_namespace.domain_name,
"project" : vn_for_namespace.project_name[0],
"name": vn_for_namespace.vn_name}
vn_for_pod = self.setup_vn(vn_name = "TestVNPod")
vn_dict_for_pod = {"domain": vn_for_pod.domain_name,
"project" : vn_for_pod.project_name[0],
"name": vn_for_pod.vn_name}
namespace1_name = get_random_name("ns1")
namespace2_name = get_random_name("ns2")
namespace1 = self.setup_namespace(name = namespace1_name)
namespace2 = self.setup_namespace(name = namespace2_name, custom_isolation = True,
fq_network_name= vn_dict_for_namespace)
assert namespace1.verify_on_setup()
assert namespace2.verify_on_setup()
ns_1_label = "namespace1"
ns_2_label = "namespace2"
client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
client4_ns1 = self.setup_busybox_pod(namespace=namespace1_name,
custom_isolation = True,
fq_network_name= vn_dict_for_pod)
client5_ns1 = self.setup_busybox_pod(namespace=namespace1_name,
custom_isolation = True,
fq_network_name= vn_dict_for_pod)
client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
client4_ns2 = self.setup_busybox_pod(namespace=namespace2_name,
custom_isolation = True,
fq_network_name= vn_dict_for_pod)
assert self.verify_nginx_pod(client1_ns1)
assert self.verify_nginx_pod(client2_ns1)
assert client3_ns1.verify_on_setup()
assert client4_ns1.verify_on_setup()
assert client5_ns1.verify_on_setup()
assert self.verify_nginx_pod(client1_ns2)
assert self.verify_nginx_pod(client2_ns2)
assert client3_ns2.verify_on_setup()
assert client4_ns2.verify_on_setup()
if prov_service == True:
service_ns1 = self.setup_http_service(namespace=namespace1.name,
labels={'app': ns_1_label})
service_ns2 = self.setup_http_service(namespace=namespace2.name,
labels={'app': ns_2_label})
client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
namespace1, client4_ns1, client5_ns1]
client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
namespace2, client4_ns2]
return (client1, client2)
示例11: test_remove_policy_with_ref
def test_remove_policy_with_ref(self):
''' This tests the following scenarios.
1. Test to validate that policy removal will fail when it referenced with VN.
2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs.
3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs.
'''
vn1_name = get_random_name('vn4')
vn1_subnets = ['10.1.1.0/24']
policy_name = get_random_name('policy1')
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn1_name,
},
]
policy_fixture = self.useFixture(
PolicyFixture(
policy_name=policy_name,
rules_list=rules,
inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.useFixture(
VNFixture(
project_name=self.inputs.project_name,
connections=self.connections,
vn_name=vn1_name,
inputs=self.inputs,
subnets=vn1_subnets,
policy_objs=[
policy_fixture.policy_obj]))
assert vn1_fixture.verify_on_setup()
ret = policy_fixture.verify_on_setup()
if ret['result'] == False:
self.logger.error(
"Policy %s verification failed after setup" % policy_name)
assert ret['result'], ret['msg']
self.logger.info(
"Done with setup and verification, moving onto test ..")
# try to remove policy which was referenced with VN.
policy_removal = True
pol_id = None
if self.quantum_h:
policy_removal = self.quantum_h.delete_policy(policy_fixture.get_id())
else:
try:
self.vnc_lib.network_policy_delete(id=policy_fixture.get_id())
except Exception as e:
policy_removal = False
self.assertFalse(
policy_removal,
'Policy removal succeed as not expected since policy is referenced with VN')
#assert vn1_fixture.verify_on_setup()
# policy_fixture.verify_policy_in_api_server()
return True
示例12: config_fip_pool
def config_fip_pool(self, vn):
stack_name = get_random_name('fip_pool')
template = self.get_template('fip_pool')
env = self.get_env('fip_pool')
env['parameters']['floating_pool'] = get_random_name(
env['parameters']['floating_pool'])
env['parameters']['vn'] = vn.get_vn_fq_name()
fip_pool_hs_obj = self.config_heat_obj(stack_name, template, env)
return fip_pool_hs_obj
示例13: config_svc_mirroring
def config_svc_mirroring(self, service_mode='transparent', *args, **kwargs):
"""Validate the service chaining datapath
Test steps:
1. Create the SI/ST in svc_mode specified.
2. Create vn11/vm1, vn21/vm2
3. Create the policy rule for ICMP/UDP and attach to vn's
4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to the analyzer
5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent.
Pass criteria :
count = sent
single node : Pkts mirrored to the analyzer should be equal to 'count'
multinode :Pkts mirrored to the analyzer should be equal to '2xcount'
"""
ci = self.inputs.is_ci_setup()
create_svms = kwargs.get('create_svms', True)
vn1_subnets = [get_random_cidr(af=self.inputs.get_af())]
vn2_subnets = [get_random_cidr(af=self.inputs.get_af())]
vn1_name = get_random_name('left')
vn2_name = get_random_name('right')
st_name = get_random_name("st1")
action_list = []
service_type = 'analyzer'
si_prefix = get_random_name("mirror_si")
policy_name = get_random_name("mirror_policy")
vn1_fixture = self.config_vn(vn1_name, vn1_subnets)
vn2_fixture = self.config_vn(vn2_name, vn2_subnets)
ret_dict = self.verify_svc_chain(service_mode=service_mode,
service_type=service_type,
left_vn_fixture=vn1_fixture,
right_vn_fixture=vn2_fixture,
create_svms=create_svms, **kwargs)
si_fixture = ret_dict['si_fixture']
policy_fixture = ret_dict['policy_fixture']
si_fq_name = si_fixture.fq_name_str
rules = [{'direction': '<>',
'protocol': 'icmp',
'source_network': vn1_fixture.vn_fq_name,
'src_ports': [0, 65535],
'dest_network': vn2_fixture.vn_fq_name,
'dst_ports': [0, 65535],
'action_list': {'simple_action': 'pass',
'mirror_to': {'analyzer_name': si_fq_name}}
},
{'direction': '<>',
'protocol': 'icmp6',
'source_network': vn1_fixture.vn_fq_name,
'src_ports': [0, 65535],
'dest_network': vn2_fixture.vn_fq_name,
'dst_ports': [0, 65535],
'action_list': {'simple_action': 'pass',
'mirror_to': {'analyzer_name': si_fq_name}}
}]
policy_fixture.update_policy_api(rules)
ret_dict['policy_fixture'] = policy_fixture
return ret_dict
示例14: setup_common_namespaces_pods
def setup_common_namespaces_pods(self, prov_service = False,
prov_ingress = False,
isolation = False):
operation = self.modify_cluster_project()
service_ns1, ingress_ns1 = None, None
service_ns2, ingress_ns2 = None, None
namespace1_name = get_random_name("ns1")
namespace2_name = get_random_name("ns2")
namespace1 = self.setup_namespace(name = namespace1_name)
namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation)
assert namespace1.verify_on_setup()
assert namespace2.verify_on_setup()
if operation=="reset":
assert namespace1.project_isolation
assert namespace2.project_isolation
else:
assert (namespace1.project_isolation == False)
assert (namespace2.project_isolation == False)
ns_1_label = "namespace1"
ns_2_label = "namespace2"
client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
assert self.verify_nginx_pod(client1_ns1)
assert self.verify_nginx_pod(client2_ns1)
assert client3_ns1.verify_on_setup()
assert self.verify_nginx_pod(client1_ns2)
assert self.verify_nginx_pod(client2_ns2)
assert client3_ns2.verify_on_setup()
if prov_service == True:
service_ns1 = self.setup_http_service(namespace=namespace1.name,
labels={'app': ns_1_label})
type = "LoadBalancer" if prov_ingress == False else None
service_ns2 = self.setup_http_service(namespace=namespace2.name,
labels={'app': ns_2_label},
type=type)
if prov_ingress == True:
ingress_ns1 = self.setup_simple_nginx_ingress(service_ns1.name,
namespace=namespace1.name)
ingress_ns2 = self.setup_simple_nginx_ingress(service_ns2.name,
namespace=namespace2.name)
assert ingress_ns1.verify_on_setup()
assert ingress_ns2.verify_on_setup()
client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
namespace1, ingress_ns1]
client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
namespace2, ingress_ns2]
return (client1, client2)
示例15: config_sec_groups
def config_sec_groups(self):
self.sg1_name = "test_tcp_sec_group" + "_" + get_random_name()
rule = [
{
"direction": "<>",
"protocol": "tcp",
"dst_addresses": [
{"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
{"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
],
"dst_ports": [{"start_port": 0, "end_port": -1}],
"src_ports": [{"start_port": 0, "end_port": -1}],
"src_addresses": [{"security_group": "local"}],
},
{
"direction": "<>",
"protocol": "tcp",
"src_addresses": [
{"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
{"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
],
"src_ports": [{"start_port": 0, "end_port": -1}],
"dst_ports": [{"start_port": 0, "end_port": -1}],
"dst_addresses": [{"security_group": "local"}],
},
]
self.sg1_fix = self.config_sec_group(name=self.sg1_name, entries=rule)
self.sg2_name = "test_udp_sec_group" + "_" + get_random_name()
rule = [
{
"direction": "<>",
"protocol": "udp",
"dst_addresses": [
{"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
{"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
],
"dst_ports": [{"start_port": 0, "end_port": -1}],
"src_ports": [{"start_port": 0, "end_port": -1}],
"src_addresses": [{"security_group": "local"}],
},
{
"direction": "<>",
"protocol": "udp",
"src_addresses": [
{"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
{"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
],
"src_ports": [{"start_port": 0, "end_port": -1}],
"dst_ports": [{"start_port": 0, "end_port": -1}],
"dst_addresses": [{"security_group": "local"}],
},
]
self.sg2_fix = self.config_sec_group(name=self.sg2_name, entries=rule)