当前位置: 首页>>代码示例>>Python>>正文


Python test_utils.assertEqual函数代码示例

本文整理汇总了Python中tcutils.test_lib.test_utils.assertEqual函数的典型用法代码示例。如果您正苦于以下问题:Python assertEqual函数的具体用法?Python assertEqual怎么用?Python assertEqual使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了assertEqual函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_policy

 def test_policy(self):
     """ Configure policies based on topology and run policy related verifications.
     """
     result = True
     #
     # Get config for test from topology
     topology_class_name = sdn_basic_topology.sdn_basic_config
     self.logger.info("Scenario for the test used is: %s" % (topology_class_name))
     # set project name
     try:
         # provided by wrapper module if run in parallel test env
         topo = topology_class_name(
             project=self.project.project_name,
             username=self.project.project_username,
             password=self.project.project_user_password,
         )
     except NameError:
         topo = topology_class_name()
     #
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(sdnTopoSetupFixture(self.connections, topo))
     out = setup_obj.topo_setup()
     assertEqual(out["result"], True, out["msg"])
     if out["result"]:
         topo, config_topo = out["data"]
     #
     # Verify [and assert on fail] after setup
     # Calling system policy verification, pick any policy fixture to
     # access fixture verification
     policy_name = topo.policy_list[0]
     system_vna_verify_policy(self, config_topo["policy"][policy_name], topo, "setup")
     return True
开发者ID:Juniper,项目名称:contrail-test-ci,代码行数:35,代码来源:test_policy_basic.py

示例2: test_flow_single_project

    def test_flow_single_project(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's within a single project"""
        result = True
        #self.agent_objs = {}
        #self.set_flow_tear_time()
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        topology_class_name = flow_test_topo.systest_topo_single_project
        # mini topo for testing script
        # topology_class_name = mini_flow_test_topo.systest_topo_single_project
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))

        topo = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data'][0], out['data'][1]
        proj = list(topo.keys())[0]

        # Get the vrouter build version for logging purposes.
        BuildTag = get_OS_Release_BuildVersion(self)

        # Create traffic profile with all details like IP addresses, port
        # numbers and no of flows, from the profile defined in the topology.
        traffic_profiles = self.create_traffic_profiles(
            topo[proj],
            config_topo)

        self.topo, self.config_topo = topo, config_topo
        for each_profile in traffic_profiles:
            result = self.generate_udp_flows_and_do_verification(
                traffic_profiles[each_profile], str(BuildTag))
            # verify_system_parameters(self, out)
            self.delete_agent_flows()
            if not result:
                 False

        return True
开发者ID:alokkumar223,项目名称:contrail-test,代码行数:60,代码来源:test_system_flows.py

示例3: all_policy_verify

def all_policy_verify(
        self,
        config_topo,
        topo,
        state='unspecified',
        fixture_only='no'):
    '''Call all policy related verifications..
    Useful to debug failures... call this on failure..
    Verify & assert on fail'''
    self.logger.debug("Starting Verifications after %s" % (state))
    # calling policy fixture verifications
    for policy_name, policy_fixt in config_topo['policy'].items():
        ret = policy_fixt.verify_on_setup()
        assertEqual(ret['result'], True, ret['msg'])
    # calling vn-policy verification
    for vn_name, vn_fixt in config_topo['vn'].items():
        ret = vn_fixt.verify_vn_policy_in_api_server()
        assertEqual(ret['result'], True, ret['msg'])
    if fixture_only == 'no':
        # This is not a fixture verfication,
        # requires runtime[config_topo] & user-def[topo] topology to be in sync to verify
        # calling vna-acl verification
        # pick any policy configured
        policy_fixt = config_topo['policy'][str(topo.policy_list[0])]
        system_vna_verify_policy(self, policy_fixt, topo, state)
开发者ID:Ankitja,项目名称:contrail-test,代码行数:25,代码来源:system_verification.py

示例4: test_flow_scaling_interNode_interVN

    def test_flow_scaling_interNode_interVN(self):
        """Basic systest with single project with many features & traffic..
        """
        result = False
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        topology_class_name = flow_scale_topo.FlowScaleTopology
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))

        try:
            topo = topology_class_name(
                compute_node_list=self.inputs.compute_ips,
                project=self.project.project_name,
                username=self.project.username,
                password=self.project.password)
        except NameError:
            topo = topology_class_name(
                compute_node_list=self.inputs.compute_ips)

        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map)
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']

        self.traffic_scenarios = self.build_traffic_scenarios(
                                     topo,
                                     config_topo)

        src_vm_obj=self.traffic_scenarios['1to2'][0]
        vn_fq_name=config_topo['vn']['vnet1'].vn_fq_name
        src_vm_intf_id=src_vm_obj.cs_vmi_obj[
            vn_fq_name]['virtual-machine-interface']['uuid']
        src_vm_obj.provision_static_route(
            prefix='111.1.0.0/16',
            virtual_machine_interface_id=src_vm_intf_id)

        result = self.create_scaled_flows()
        self.delete_agent_flows()
        return True
开发者ID:Ankitja,项目名称:contrail-test,代码行数:59,代码来源:test_flow_scaling.py

示例5: test_repeated_policy_modify

 def test_repeated_policy_modify(self):
     """ Configure policies based on topology; Replace VN's existing policy [same policy name but with different rule set] multiple times and verify.
     """
     ###
     # Get config for test from topology
     # very simple topo will do, one vn, one vm, multiple policies with n
     # rules
     topology_class_name = sdn_single_vm_multiple_policy_topology.sdn_single_vm_multiple_policy_config
     self.logger.info("Scenario for the test used is: %s" % (topology_class_name))
     # set project name
     try:
         # provided by wrapper module if run in parallel test env
         topo = topology_class_name(
             project=self.project.project_name, username=self.project.username, password=self.project.password
         )
     except NameError:
         topo = topology_class_name()
     ###
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(sdnTopoSetupFixture(self.connections, topo))
     out = setup_obj.topo_setup()
     assertEqual(out["result"], True, out["msg"])
     if out["result"]:
         topo, config_topo = out["data"]
     ###
     # Verify [and assert on fail] after setup
     # Calling system policy verification, pick any policy fixture to
     # access fixture verification
     policy_name = topo.policy_list[0]
     system_vna_verify_policy(self, config_topo["policy"][policy_name], topo, "setup")
     ###
     # Test procedure:
     # Test repeated update of a policy attached to a VM
     test_vm = topo.vmc_list[0]
     test_vn = topo.vn_of_vm[test_vm]
     test_vn_fix = config_topo["vn"][test_vn]
     test_vn_id = test_vn_fix.vn_id
     for policy in topo.policy_list:
         # set new policy for test_vn to policy
         test_policy_fq_names = []
         name = config_topo["policy"][policy].policy_fq_name
         test_policy_fq_names.append(name)
         state = "policy for %s updated to %s" % (test_vn, policy)
         test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
         # wait for tables update before checking after making changes to
         # system
         time.sleep(5)
         self.logger.info("new policy list of vn %s is %s" % (test_vn, policy))
         # update expected topology with this new info for verification
         updated_topo = policy_test_utils.update_topo(topo, test_vn, policy)
         system_vna_verify_policy(self, config_topo["policy"][policy], updated_topo, state)
     return True
开发者ID:Juniper,项目名称:contrail-test-ci,代码行数:55,代码来源:test_policy_basic.py

示例6: config_topo_single_proj

def config_topo_single_proj(class_instance, topology_class_name):
    """Initialize and Setup configurations for single project related flow
       system tests.
    """
    #self.agent_objs = {}
    #self.set_flow_tear_time()
    #
    # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
    # else report that minimum 2 compute nodes are needed for this test and
    # exit.
    #if len(self.inputs.compute_ips) < 2:
    if len(class_instance.inputs.compute_ips) < 2:
        class_instance.logger.warn(
            "Minimum 2 compute nodes are needed for this test to run")
        class_instance.logger.warn(
            "Exiting since this test can't be run on single compute node")
        return True
    #
    # Get config for test from topology
    #topology_class_name = system_test_topo.systest_topo_single_project
    # For testing script, use mini topology
    # topology_class_name =
    # mini_system_test_topo.systest_topo_single_project
    class_instance.logger.info(
        "Scenario for the test used is: %s" %
        (str(topology_class_name)))

    topo = topology_class_name(
        compute_node_list=class_instance.inputs.compute_ips)
    #
    # Test setup: Configure policy, VN, & VM
    # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
    # Returned topo is of following format:
    # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
    # vm_fixture}
    setup_obj = class_instance.useFixture(class_instance(),
        sdnTopoSetupFixture(class_instance.connections, topo))
    out = setup_obj.sdn_topo_setup()
    assertEqual(out['result'], True, out['msg'])
    if out['result']:
        topo, config_topo = out['data'][0], out['data'][1]
    proj = list(topo.keys())[0]

    # Get the vrouter build version for logging purposes.
    class_instance.BuildTag = get_OS_Release_BuildVersion(class_instance)

    # Create traffic profile with all details like IP addresses, port
    # numbers and no of flows, from the profile defined in the topology.
    class_instance.traffic_profiles = create_traffic_profiles(
        topo[proj],
        config_topo)

    class_instance.topo, class_instance.config_topo = topo, config_topo
开发者ID:ritamganguly,项目名称:contrail-test,代码行数:53,代码来源:system_flows_config.py

示例7: test_policy

    def test_policy(self):
        """ Configure policies based on topology and run policy related verifications.
        """
        result = True
        #
        # Get config for test from topology
        topology_class_name = sdn_basic_topology.sdn_basic_config
        self.logger.info(
            "Scenario for the test used is: %s" %
            (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(
                project=self.project.project_name,
                username=self.project.project_username,
                password=self.project.project_user_password)
        except NameError:
            topo = topology_class_name()
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo, config_topo = out['data']
        #
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(
            self,
            config_topo['policy'][policy_name],
            topo,
            'setup')

        # Verify ICMP traffic between the two VM's.
        if not config_topo['vm'][topo.vmc_list[0]].ping_with_certainty(
            expectation=True,
            dst_vm_fixture=config_topo['vm'][topo.vmc_list[1]]):
            self.logger.error(
                'Ping from %s to %s failed,expected it to pass' %
                (config_topo['vm'][topo.vmc_list[0]].vm_name,
                 config_topo['vm'][topo.vmc_list[1]].vm_name))
            return False

        return True
开发者ID:Ankitja,项目名称:contrail-test,代码行数:52,代码来源:test_policy_basic.py

示例8: inventory_tests

def inventory_tests(self, node_name=None):
    if node_name is None:
        self.logger.error("ERROR :: Target node name has to be specified to test inventory information.")
        return False
    self.logger.info("------------INVENTORY TEST FOR NODE %s------------" % node_name)
    local("server-manager-client display inventory --server_id %s > working_db.txt" % node_name)
    fd=open('working_db.txt','r')
    lines=fd.readlines()
    fd.close()
    fd=open('working_db.json','w')
    for i in range(1,len(lines)-1):
        fd.write(lines[i])
    fd.close()
    fd=open('working_db.json','r')
    inventory_data=json.load(fd)
    fd.close()

    node_ip=self.smgr_fixture.get_ip_using_server_id(node_name)
    node_pswd=self.smgr_fixture.get_pswd_using_server_id(node_name)

    #Check for cpu details in inventory.
    with settings(host_string='[email protected]'+node_ip, password=node_pswd, warn_only=True):
        cpu_cores=run('cat /proc/cpuinfo | grep "cpu cores" | head -n 1 |cut -d ":" -f2')
        clock_speed=run('cat /proc/cpuinfo | grep "cpu MHz" | head -n 1 |cut -d ":" -f2')
        model=run('cat /proc/cpuinfo | grep "model name" | head -n 1 |cut -d ":" -f2')

    assertEqual(int(cpu_cores), inventory_data['ServerInventoryInfo']['cpu_cores_count'],
        "cpu_cores_count mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s" % (node_name,inventory_data['ServerInventoryInfo']['cpu_cores_count'],cpu_cores))
    assertEqual(float(clock_speed), float(inventory_data['ServerInventoryInfo']['cpu_info_state']['clock_speed_MHz']),
        "clock_speed mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s"
            % (node_name,float(inventory_data['ServerInventoryInfo']['cpu_info_state']['clock_speed_MHz']),float(clock_speed)))
    assertEqual(model, inventory_data['ServerInventoryInfo']['cpu_info_state']['model'],
        "model mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s"
            % (node_name,inventory_data['ServerInventoryInfo']['cpu_info_state']['model'],model))

    #Check for interface details in inventory both physical and virtual intrerfaces should be listed.
    with settings(host_string='[email protected]'+node_ip, password=node_pswd, warn_only=True):
        intf_names=run("ifconfig -a | grep 'Link encap' | awk '{print $1}'")
        intf_list=intf_names.split('\r\n')

    track_intf=list(intf_list)
    for i in range(0,len(track_intf)):
        if '-' in track_intf[i]:
            del track_intf[i]

    for intf_data in inventory_data['ServerInventoryInfo']['interface_infos']:
        if '_' in intf_data['interface_name']:
            continue
        if intf_data['interface_name'] in track_intf:
            if (intf_data['ip_addr'] and intf_data['ip_addr'] != 'N/A'):
                with settings(host_string='[email protected]'+node_ip, password=node_pswd, warn_only=True):
                    ip_addr=run("ifconfig " + intf_data['interface_name'] + " | grep inet | awk '{print $2}' | cut -d ':' -f 2")
                assertEqual(ip_addr, intf_data['ip_addr'], "ip address mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s"
                    % (intf_data['interface_name'],node_name,intf_data['ip_addr'],ip_addr))

            if (intf_data['macaddress'] and intf_data['macaddress'] != 'N/A'):
                with settings(host_string='[email protected]'+node_ip, password=node_pswd, warn_only=True):
                    mac_addr=run("cat /sys/class/net/" + intf_data['interface_name'] + "/address")
                assertEqual(mac_addr.lower(), intf_data['macaddress'].lower(), "mac address mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s"
                    % (intf_data['interface_name'],node_name,intf_data['macaddress'].lower(),mac_addr.lower()))

            if (intf_data['netmask'] and intf_data['netmask'] != 'N/A'):
                with settings(host_string='[email protected]'+node_ip, password=node_pswd, warn_only=True):
                    mask=run("ifconfig " + intf_data['interface_name'] + " | grep Mask | awk '{print $4}' | cut -d ':' -f 2")
                assertEqual(mask, intf_data['netmask'], "netmask mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s"
                    % (intf_data['interface_name'],node_name,intf_data['netmask'],mask))

        else:
            self.logger.error("ERROR :: Interface not found in inventory but there as part of the system info")
            self.logger.error("ERROR :: Inventory interface information %s" % intf_data)
            self.logger.error("ERROR :: System interface information %s" % track_intf)
            return False

    #Check for memory state and number of disks.
    with settings(host_string='[email protected]'+node_ip, password=node_pswd, warn_only=True):
        dimm_size_mb=run("dmidecode -t 17 | grep Size | head -n 1 | awk '{print $2}'")
        mem_speed_MHz=run("dmidecode -t 17 | grep Speed | head -n 1 | awk '{print $2}'")
        mem_type=run("dmidecode -t 17 | grep Type | head -n 1 | awk '{print $2}'")
        num_of_dimms=run("dmidecode -t 17 | grep 'Memory Device' | wc -l")
        swap_size_mb=run("vmstat -s -S M | grep 'total swap' | awk '{print $1}'")
        total_mem_mb=run("vmstat -s -S M | grep 'total memory' | awk '{print $1}'")

    assertEqual(int(dimm_size_mb), inventory_data['ServerInventoryInfo']['mem_state']['dimm_size_mb'],
        "dimm_size_mb mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['dimm_size_mb'],int(dimm_size_mb)))
    assertEqual(int(mem_speed_MHz), inventory_data['ServerInventoryInfo']['mem_state']['mem_speed_MHz'],
        "mem_speed_MHz mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['mem_speed_MHz'],int(mem_speed_MHz)))
    assertEqual(mem_type, inventory_data['ServerInventoryInfo']['mem_state']['mem_type'],
        "mem_type mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['mem_type'],mem_type))
    assertEqual(int(num_of_dimms), inventory_data['ServerInventoryInfo']['mem_state']['num_of_dimms'],
        "num_of_dimms mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['num_of_dimms'],int(num_of_dimms)))

    if (float(swap_size_mb)*0.98 <= float(inventory_data['ServerInventoryInfo']['mem_state']['swap_size_mb']) <= float(swap_size_mb)*1.02):
        self.logger.info("swap_size_mb matched inventory data.")
    else:
        self.logger.error("swap_size_mb for node %s = inventory_data - %s, vmstat data - %s --- Not in range 98% to 102%"
            % (node_name,float(inventory_data['ServerInventoryInfo']['mem_state']['swap_size_mb']),float(swap_size_mb)))
        return False

    if (float(total_mem_mb)*0.98 <= float(inventory_data['ServerInventoryInfo']['mem_state']['total_mem_mb']) <= float(total_mem_mb)*1.02):
        self.logger.info("total_mem_mb matched inventory data.")
#.........这里部分代码省略.........
开发者ID:Ankitja,项目名称:contrail-test,代码行数:101,代码来源:smgr_inventory_monitoring_tests.py

示例9: test_policy_modify_vn_policy

    def test_policy_modify_vn_policy(self):
        """ Configure policies based on topology;
        """
        ###
        # Get config for test from topology
        # very simple topo will do, one vn, one vm, one policy, 3 rules
        topology_class_name = sdn_single_vm_policy_topology.sdn_single_vm_policy_config

        self.logger.info("Scenario for the test used is: %s" % (topology_class_name))
        # set project name
        try:
            # provided by wrapper module if run in parallel test env
            topo = topology_class_name(
                project=self.project.project_name, username=self.project.username, password=self.project.password
            )
        except NameError:
            topo = topology_class_name()
        ###
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        setup_obj = self.useFixture(sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.topo_setup()
        assertEqual(out["result"], True, out["msg"])
        if out["result"]:
            topo, config_topo = out["data"]
        ###
        # Verify [and assert on fail] after setup
        # Calling system policy verification, pick any policy fixture to
        # access fixture verification
        policy_name = topo.policy_list[0]
        system_vna_verify_policy(self, config_topo["policy"][policy_name], topo, "setup")
        ###
        # Test procedure:
        # Test adding new policy to VN's exiting policy list
        state = "add policy: "
        test_vm = topo.vmc_list[0]
        test_vn = topo.vn_of_vm[test_vm]
        # Init test data, take backup of current topology
        initial_vn_policy_list = copy.copy(topo.vn_policy[test_vn])
        new_policy_to_add = policy_test_utils.get_policy_not_in_vn(initial_vn_policy_list, topo.policy_list)
        if not new_policy_to_add:
            result = "False"
            msg = "test %s cannot be run as required config not available in topology; aborting test"
            self.logger.info(msg)
            assertEqual(result, True, msg)
        initial_policy_vn_list = copy.copy(topo.policy_vn[new_policy_to_add])
        new_vn_policy_list = copy.copy(initial_vn_policy_list)
        new_policy_vn_list = copy.copy(initial_policy_vn_list)
        new_vn_policy_list.append(new_policy_to_add)
        new_policy_vn_list.append(test_vn)
        test_vn_fix = config_topo["vn"][test_vn]
        test_vn_id = test_vn_fix.vn_id
        # configure new policy
        config_topo["policy"][new_policy_to_add] = self.useFixture(
            PolicyFixture(
                policy_name=new_policy_to_add,
                rules_list=topo.rules[new_policy_to_add],
                inputs=self.inputs,
                connections=self.connections,
            )
        )
        # get new policy_set to be pushed for the vn
        test_policy_fq_names = []
        for policy in new_vn_policy_list:
            name = config_topo["policy"][policy].policy_fq_name
            test_policy_fq_names.append(name)
        self.logger.info("adding policy %s to vn %s" % (new_policy_to_add, test_vn))
        test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        self.logger.info("New policy list of VN %s is %s" % (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        topo.policy_vn[new_policy_to_add] = new_policy_vn_list
        system_vna_verify_policy(self, config_topo["policy"][new_policy_to_add], topo, state)
        # Test unbinding all policies from VN
        state = "unbinding all policies"
        test_vn_fix.unbind_policies(test_vn_id)
        # wait for tables update before checking after making changes to system
        time.sleep(5)
        current_vn_policy_list = new_vn_policy_list
        new_vn_policy_list = []
        self.logger.info("New policy list of VN %s is %s" % (test_vn, new_vn_policy_list))
        # update expected topology with this new info for verification
        topo.vn_policy[test_vn] = new_vn_policy_list
        for policy in current_vn_policy_list:
            topo.policy_vn[policy].remove(test_vn)
        system_vna_verify_policy(self, config_topo["policy"][new_policy_to_add], topo, state)
        return True
开发者ID:Juniper,项目名称:contrail-test-ci,代码行数:91,代码来源:test_policy_basic.py

示例10: test_policy_with_multi_vn_in_vm


#.........这里部分代码省略.........
     policy2_fixture = self.useFixture(
         PolicyFixture(
             policy_name=policy2_name,
             rules_list=rules2,
             inputs=self.inputs,
             connections=self.connections))
     vn1_fixture = self.useFixture(
         VNFixture(
             project_name=self.inputs.project_name,
             connections=self.connections,
             vn_name=vn1_name,
             inputs=self.inputs,
             subnets=vn1_subnets,
             policy_objs=[
                 policy1_fixture.policy_obj]))
     vn2_fixture = self.useFixture(
         VNFixture(
             project_name=self.inputs.project_name,
             connections=self.connections,
             vn_name=vn2_name,
             inputs=self.inputs,
             subnets=vn2_subnets,
             disable_gateway=True,
             policy_objs=[
                 policy2_fixture.policy_obj]))
     vn3_fixture = self.useFixture(
         VNFixture(
             project_name=self.inputs.project_name,
             connections=self.connections,
             vn_name=vn3_name,
             inputs=self.inputs,
             subnets=vn3_subnets,
             policy_objs=[
                 policy2_fixture.policy_obj]))
     assert vn1_fixture.verify_on_setup()
     assert vn2_fixture.verify_on_setup()
     assert vn3_fixture.verify_on_setup()
     assert vn1_fixture.verify_vn_policy_in_api_server()
     assert vn2_fixture.verify_vn_policy_in_api_server()
     assert vn3_fixture.verify_vn_policy_in_api_server()
     vm1_fixture = self.useFixture(
         VMFixture(
             connections=self.connections,
             vn_objs=[
                 vn1_fixture.obj,
                 vn2_fixture.obj],
             vm_name=vm1_name,
             project_name=self.inputs.project_name))
     vm2_fixture = self.useFixture(
         VMFixture(
             connections=self.connections,
             vn_objs=[
                 vn3_fixture.obj],
             vm_name=vm2_name,
             project_name=self.inputs.project_name))
     assert vm1_fixture.verify_on_setup()
     assert vm2_fixture.verify_on_setup()
     self.nova_h.wait_till_vm_is_up(vm1_fixture.vm_obj)
     self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj)
     # For multi-vn vm, configure ip address for 2nd interface
     multivn_vm_ip_list = vm1_fixture.vm_ips
     intf_conf_cmd = "ifconfig eth1 %s netmask 255.255.255.0" % multivn_vm_ip_list[
         1]
     vm_cmds = (intf_conf_cmd, 'ifconfig -a')
     for cmd in vm_cmds:
         cmd_to_output = [cmd]
         vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
         output = vm1_fixture.return_output_cmd_dict[cmd]
     for ip in multivn_vm_ip_list:
         if ip not in output:
             self.logger.error(
                 "IP %s not assigned to any eth intf of %s" %
                 (ip, vm1_fixture.vm_name))
             assert False
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw of VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with no allow rule in the VN where default gw is part of, traffic should fail")
     result = vm1_fixture.ping_with_certainty(
         vm2_fixture.vm_ip,
         expectation=False)
     assertEqual(result, True, "ping passed which is not expected")
     # Configure VM to reroute traffic to interface belonging to different
     # VN
     self.logger.info(
         "Direct traffic to gw which is part of VN with allow policy to destination VN, traffic should pass now")
     i = ' route add -net %s netmask 255.255.255.0 gw %s dev eth1' % (
         vn3_subnets[0].split('/')[0], multivn_vm_ip_list[1])
     cmd_to_output = [i]
     vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
     output = vm1_fixture.return_output_cmd_dict[i]
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw for VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with allow rule in the VN where network gw is part of, traffic should pass")
     result = vm1_fixture.ping_with_certainty(
         vm2_fixture.vm_ip,
         expectation=True)
     assertEqual(result, True, "ping failed which is not expected")
     return True
开发者ID:smurugap,项目名称:contrail-test,代码行数:101,代码来源:test_policy.py

示例11: repeated_policy_update_test_with_ping

 def repeated_policy_update_test_with_ping(self, topo):
     """ Pick 2 VM's for testing, test with ping; modify policy of one VN [in which VM is
     present] and verify the rule functionality with ping.
     """
     result = True
     msg = []
     #
     # Test setup: Configure policy, VN, & VM
     # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
     # Returned topo is of following format:
     # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
     setup_obj = self.useFixture(
         sdnTopoSetupFixture(self.connections, topo))
     out = setup_obj.topo_setup()
     #out= setup_obj.topo_setup(vm_verify='yes', skip_cleanup='yes')
     self.logger.info("Setup completed with result %s" % (out['result']))
     assertEqual(out['result'], True, out['msg'])
     if out['result']:
         topo, config_topo = out['data']
     # 1. Define Traffic Params
     test_vm1 = topo.vmc_list[0]  # 'vmc0'
     test_vm2 = topo.vmc_list[1]  # 'vmc1'
     test_vm1_fixture = config_topo['vm'][test_vm1]
     test_vm2_fixture = config_topo['vm'][test_vm2]
     test_vn = topo.vn_of_vm[test_vm1]  # 'vnet0'
     test_vn_fix = config_topo['vn'][test_vn]
     test_vn_id = test_vn_fix.vn_id
     test_proto = 'icmp'
     for policy in topo.policy_test_order:
         # 2. set new policy for test_vn to policy
         test_policy_fq_names = []
         name = config_topo['policy'][policy].policy_fq_name
         test_policy_fq_names.append(name)
         state = "policy for " + test_vn + " updated to " + policy
         test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
         # wait for tables update before checking after making changes to
         # system
         time.sleep(5)
         self.logger.info("new policy list of vn %s is %s" %
                          (test_vn, policy))
         # update expected topology with this new info for verification
         updated_topo = policy_test_utils.update_topo(topo, test_vn, policy)
         self.logger.info("Starting Verifications after %s" % (state))
         policy_info = "policy in effect is : %s" % (topo.rules[policy])
         self.logger.info(policy_info)
         # 3. set expectation to verify..
         matching_rule_action = {}
         # Topology guide: There is only one policy assigned to test_vn and there is one rule affecting traffic proto.
         # For ping test, set expected result based on action - pass or deny
         # if action = 'pass', expectedResult= True, else Fail;
         num_rules = len(topo.rules[policy])
         for i in range(num_rules):
             proto = topo.rules[policy][i]['protocol']
             matching_rule_action[proto] = topo.rules[
                 policy][i]['simple_action']
         if num_rules == 0:
             matching_rule_action[test_proto] = 'deny'
         self.logger.info("matching_rule_action: %s" %
                          matching_rule_action)
         # 4. Test with ping
         expectedResult = True if matching_rule_action[
             test_proto] == 'pass' else False
         ret = test_vm1_fixture.ping_with_certainty(
             test_vm2_fixture.vm_ip, expectation=expectedResult,
             dst_vm_fixture=test_vm2_fixture)
         result_msg = "vm ping test result after %s is: %s" % (state, ret)
         self.logger.info(result_msg)
         if not ret:
             result = False
             msg.extend([result_msg, policy_info])
             all_policy_verify(
                 self, config_topo, updated_topo, state, fixture_only='yes')
     assertEqual(result, True, msg)
     test_vn_fix.unbind_policies(test_vn_id)
     return result
开发者ID:chhandakm,项目名称:contrail-test,代码行数:75,代码来源:test_policy_detailed.py

示例12: test_policy_rules_scaling_with_ping

    def test_policy_rules_scaling_with_ping(self):
        ''' Test to validate scaling of policy and rules
        '''
        result = True
        msg = []
        vn1_name = 'vn1'
        vn2_name = 'vn2'
        vn1_subnets = ['10.1.1.0/24']
        vn2_subnets = ['20.1.1.0/24']
        number_of_policy = 10
        # adding workaround to pass the test with less number of rules till
        # 1006, 1184 fixed
        number_of_dummy_rules = 148
        valid_rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'icmp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'udp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]

        self.logger.info(
            'Creating %d policy and %d rules to test policy scalability' %
            (number_of_policy, number_of_dummy_rules + len(valid_rules)))
        # for now we are creating limited number of policy and rules
        policy_objs_list = policy_test_helper._create_n_policy_n_rules(
            self, number_of_policy, valid_rules, number_of_dummy_rules)
        time.sleep(5)
        self.logger.info('Create VN and associate %d policy' %
                         (number_of_policy))
        vn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn1_name,
                inputs=self.inputs,
                subnets=vn1_subnets,
                policy_objs=policy_objs_list))
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn2_name,
                inputs=self.inputs,
                subnets=vn2_subnets,
                policy_objs=policy_objs_list))
        assert vn2_fixture.verify_on_setup()
        vn1_vm1_name = 'vm1'
        vn1_vm2_name = 'vm2'
        vm1_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn1_fixture.obj,
                vm_name=vn1_vm1_name))
        assert vm1_fixture.verify_on_setup()
        vm2_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn2_fixture.obj,
                vm_name=vn1_vm2_name))
        assert vm2_fixture.verify_on_setup()
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        self.logger.info("Verify ping to vm %s" % (vn1_vm2_name))
        ret = vm1_fixture.ping_with_certainty(
            vm2_fixture.vm_ip, expectation=True,
            dst_vm_fixture=vm2_fixture)
        result_msg = "vm ping test result to vm %s is: %s" % (
            vn1_vm2_name, ret)
        self.logger.info(result_msg)
        if not ret:
            result = False
            msg.extend(
                ["ping failure with scaled policy and rules:", result_msg])
        assertEqual(result, True, msg)
        return True
开发者ID:chhandakm,项目名称:contrail-test,代码行数:88,代码来源:test_policy_detailed.py

示例13: test_one_policy_rules_scaling_with_ping

    def test_one_policy_rules_scaling_with_ping(self):
        ''' Test to validate scaling of policy and rules.
            Test to validate rules scaling on a single
            policy. The policy will be attached to two
            VN's and 2 VM's will be spawned in each of
            the VN's to verify exact number of acls are
            created in the agent introspect.
            Expected ace id's = 1 policy * 1498 distinct rules
            + 2 valid rule + 2 default rules = 1504 ace id's.
        '''
        result = True
        msg = []
        vn1_name = 'vn1'
        vn2_name = 'vn2'
        vn1_subnets = ['10.1.1.0/24']
        vn2_subnets = ['20.1.1.0/24']
        number_of_policy = 1
        number_of_dummy_rules = 1498
        number_of_valid_rules = 2
        number_of_default_rules = 2
        total_number_of_rules=number_of_dummy_rules + number_of_valid_rules + number_of_default_rules
        no_of_rules_exp = total_number_of_rules
        valid_rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'icmp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'udp', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]

        self.logger.info(
            'Creating %d policy and %d rules to test policy scalability' %
            (number_of_policy, number_of_dummy_rules + len(valid_rules)))
        policy_objs_list = policy_test_helper._create_n_policy_n_rules(
            self, number_of_policy, valid_rules, number_of_dummy_rules)
        time.sleep(5)
        self.logger.info('Create VN and associate %d policy' %
                         (number_of_policy))
        vn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn1_name,
                inputs=self.inputs,
                subnets=vn1_subnets,
                policy_objs=policy_objs_list))
        assert vn1_fixture.verify_on_setup()
        vn2_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn2_name,
                inputs=self.inputs,
                subnets=vn2_subnets,
                policy_objs=policy_objs_list))
        assert vn2_fixture.verify_on_setup()
        vn1_vm1_name = 'vm1'
        vn1_vm2_name = 'vm2'
        vm1_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn1_fixture.obj,
                vm_name=vn1_vm1_name))
        assert vm1_fixture.verify_on_setup()
        vm2_fixture = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_obj=vn2_fixture.obj,
                vm_name=vn1_vm2_name))
        assert vm2_fixture.verify_on_setup()
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        self.logger.info("Verify ping to vm %s" % (vn1_vm2_name))
        ret = vm1_fixture.ping_with_certainty(
            vm2_fixture.vm_ip, expectation=True,
            dst_vm_fixture=vm2_fixture)
        result_msg = "vm ping test result to vm %s is: %s" % (
            vn1_vm2_name, ret)
        self.logger.info(result_msg)
        if not ret:
            result = False
            msg.extend(
                ["ping failure with scaled policy and rules:", result_msg])
        assertEqual(result, True, msg)
        if self.inputs.get_af() == af_test:
            #In v6 test, new rule is added for proto 58 corresponding to v4 icmp rule,
            #so expected no. of rules should be increamented by 1
            no_of_rules_exp = total_number_of_rules + 1

#.........这里部分代码省略.........
开发者ID:Juniper,项目名称:contrail-test,代码行数:101,代码来源:test_policy_detailed.py

示例14: test_flow_multi_projects

    def test_flow_multi_projects(self):
        """Tests related to flow setup rate and flow table stability accross various triggers for verification
           accross VN's and accross multiple projects"""
        result = True
        self.comp_node_fixt = {}
        for cmp_node in self.inputs.compute_ips:
            self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
                self.connections, cmp_node))
        #
        # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test.
        # else report that minimum 2 compute nodes are needed for this test and
        # exit.
        if len(self.inputs.compute_ips) < 2:
            self.logger.warn(
                "Minimum 2 compute nodes are needed for this test to run")
            self.logger.warn(
                "Exiting since this test can't be run on single compute node")
            return True
        #
        # Get config for test from topology
        msg = []
        topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # 1. Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm':
        # vm_fixture}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo))
        out = setup_obj.sdn_topo_setup()
        assertEqual(out['result'], True, out['msg'])
        self.topo, self.config_topo = out['data'][0], out['data'][1]
        self.proj = list(self.topo.keys())[0]
        # 2. Start Traffic
        for profile, details in self.topo[self.proj].traffic_profile.items():
            self.logger.info("Profile under test: %s, details: %s" %(profile, details))
            self.src_vm = details['src_vm']
            self.dst_vm = details['dst_vm']
            self.src_proj = details['src_proj']
            self.dst_proj = details['dst_proj']
            # Not flow scaling test, limit num_flows to low number..
            num_flows = 15000
            self.generated_flows = 2*num_flows
            self.flow_gen_rate = 1000
            src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm]
            src_vm_vn = src_vm_fixture.vn_names[0]
            src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn]
            dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm]
            self.proto = 'udp'
            self.cmp_node = src_vm_fixture.vm_node_ip
            self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit()
            self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time()
            self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows
            self.flow_cache_timeout = self.comp_node_fixt[self.cmp_node].flow_cache_timeout
            self.traffic_obj = self.useFixture(
                traffic_tests.trafficTestFixture(self.connections))
            # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None,
            # stream_proto= 'udp', start_sport= 8000,
            # total_single_instance_streams= 20):
            startStatus = self.traffic_obj.startTraffic(
                total_single_instance_streams=num_flows,
                pps=self.flow_gen_rate,
                start_sport=1000,
                cfg_profile='ContinuousSportRange',
                tx_vm_fixture=src_vm_fixture,
                rx_vm_fixture=dst_vm_fixture,
                stream_proto=self.proto)
            msg1 = "Status of start traffic : %s, %s, %s" % (
                self.proto, src_vm_fixture.vm_ip, startStatus['status'])
            self.logger.info(msg1)
            assert startStatus['status'], msg1
            # 3. Poll live traffic & verify VM flow count
            self.verify_node_flow_setup()
            # 4. Stop Traffic
            self.logger.info("Proceed to stop traffic..")
            self.traffic_obj.stopTraffic(wait_for_stop=False)
            start_time = time.time()
            # 5. Verify flow ageing
            self.logger.info(
                "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing")
            sleep(self.flow_cache_timeout)
            while True:
                begin_flow_count = self.comp_node_fixt[
                    self.cmp_node].get_vrouter_matching_flow_count(
                    self.flow_data)
                self.logger.debug('begin_flow_count: %s' %(begin_flow_count))
                if begin_flow_count['all'] == 0:
                    break
                flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(begin_flow_count['all'], self.flow_cache_timeout))
                # flow_teardown_time is not the actual time to remove flows
                # Based on flow_count at this time, teardown_time is calculated to the value
#.........这里部分代码省略.........
开发者ID:alokkumar223,项目名称:contrail-test,代码行数:101,代码来源:test_system_flows.py

示例15: system_vna_verify_policy

def system_vna_verify_policy(self, policy_fixt, topo, state):
    # Verify all policies in all compute nodes..
    self.logger.debug("Starting Verifications after %s" % (state))
    ret = policy_fixt.verify_policy_in_vna(topo)
    assertEqual(ret['result'], True, ret['msg'])
开发者ID:Ankitja,项目名称:contrail-test,代码行数:5,代码来源:system_verification.py


注:本文中的tcutils.test_lib.test_utils.assertEqual函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。