当前位置: 首页>>代码示例>>Python>>正文


Python DCNetwork.addLink方法代码示例

本文整理汇总了Python中emuvim.dcemulator.net.DCNetwork.addLink方法的典型用法代码示例。如果您正苦于以下问题:Python DCNetwork.addLink方法的具体用法?Python DCNetwork.addLink怎么用?Python DCNetwork.addLink使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在emuvim.dcemulator.net.DCNetwork的用法示例。


在下文中一共展示了DCNetwork.addLink方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_topology1

# 需要导入模块: from emuvim.dcemulator.net import DCNetwork [as 别名]
# 或者: from emuvim.dcemulator.net.DCNetwork import addLink [as 别名]
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    rapi1.stop()
    net.stop()
开发者ID:stevenvanrossem,项目名称:son-emu,代码行数:31,代码来源:sonata_y1_demo_topology_1.py

示例2: create_topology1

# 需要导入模块: from emuvim.dcemulator.net import DCNetwork [as 别名]
# 或者: from emuvim.dcemulator.net.DCNetwork import addLink [as 别名]
def create_topology1():
    cleanup()
    # create topology
    # use a maximum of 50% cpu time for containers added to data centers
    net = DCNetwork(dc_emulation_max_cpu=0.5, controller=Controller)
    # add some data centers and create a topology
    dc1 = net.addDatacenter("dc1", resource_log_path=RESOURCE_LOG_PATH)
    dc2 = net.addDatacenter("dc2", resource_log_path=RESOURCE_LOG_PATH)
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # create and assign resource models for each DC
    rm1 = UpbSimpleCloudDcRM(max_cu=4, max_mu=1024)
    rm2 = UpbOverprovisioningCloudDcRM(max_cu=4)
    dc1.assignResourceModel(rm1)
    dc2.assignResourceModel(rm2)

    # add the command line interface endpoint to each DC
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    # start the emulation platform
    net.start()
    print "Wait a moment and allocate some compute start some compute resources..."
    time.sleep(2)
    dc1.startCompute("vnf1")
    dc1.startCompute("vnf2", flavor_name="tiny")
    dc1.startCompute("vnf3", flavor_name="small")
    dc2.startCompute("vnf4", flavor_name="medium")
    dc2.startCompute("vnf5", flavor_name="medium")
    dc2.startCompute("vnf6", flavor_name="medium")
    print "... done."
    time.sleep(5)
    print "Removing instances ..."
    dc1.stopCompute("vnf1")
    dc2.stopCompute("vnf4")
    print "... done"
    net.CLI()
    net.stop()
开发者ID:felipevicens,项目名称:son-emu,代码行数:45,代码来源:resource_model_demo_topology.py

示例3: create_topology1

# 需要导入模块: from emuvim.dcemulator.net import DCNetwork [as 别名]
# 或者: from emuvim.dcemulator.net.DCNetwork import addLink [as 别名]
def create_topology1():
    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    dc3 = net.addDatacenter("dc3")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1)
    net.addLink(dc2, s1)
    net.addLink(dc3, s1)

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.connectDatacenter(dc3)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()


    # specify a vnfd file to be deployed as internal SAP:
    sap_vnfd = 'custom_sap_vnfd.yml'
    dir_path = os.path.dirname(__file__)
    sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
    # sap_vnfd_path = None
    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True, auto_deploy=True,
                                          docker_management=True, auto_delete=True,
                                          sap_vnfd_path=sap_vnfd_path)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    sdkg1.connectDatacenter(dc3)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()
    net.CLI()
    net.stop()
开发者ID:stevenvanrossem,项目名称:son-emu,代码行数:42,代码来源:demo_topo_3pop.py

示例4: create_topology1

# 需要导入模块: from emuvim.dcemulator.net import DCNetwork [as 别名]
# 或者: from emuvim.dcemulator.net.DCNetwork import addLink [as 别名]
def create_topology1():

    global exit

    # create topology
    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False)
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    s1 = net.addSwitch("s1")
    net.addLink(dc1, s1, delay="10ms")
    net.addLink(dc2, s1, delay="20ms")

    # add the command line interface endpoint to each DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    # connect total network also, needed to do the chaining and monitoring
    rapi1.connectDCNetwork(net)
    # run API endpoint server (in another thread, don't block)
    rapi1.start()

    # add the SONATA dummy gatekeeper to each DC
    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
    sdkg1.connectDatacenter(dc1)
    sdkg1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    sdkg1.start()

    # start the emulation platform
    net.start()

    # does not work from docker compose (cannot start container in interactive mode)
    # cli = net.CLI()
    # instead wait here:
    logging.info("waiting for SIGTERM or SIGINT signal")
    while not exit:
        time.sleep(1)
    logging.info("got SIG signal")
    net.stop()
开发者ID:CN-UPB,项目名称:son-emu,代码行数:41,代码来源:son-monitor_test_topo.py

示例5: create_topology

# 需要导入模块: from emuvim.dcemulator.net import DCNetwork [as 别名]
# 或者: from emuvim.dcemulator.net.DCNetwork import addLink [as 别名]
def create_topology():
    net = DCNetwork(monitor=False, enable_learning=True)
    # create two data centers
    dc1 = net.addDatacenter("dc1")
    dc2 = net.addDatacenter("dc2")
    # interconnect data centers
    net.addLink(dc1, dc2, delay="20ms")
    # add the command line interface endpoint to the emulated DC (REST API)
    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
    rapi1.connectDCNetwork(net)
    rapi1.connectDatacenter(dc1)
    rapi1.connectDatacenter(dc2)
    rapi1.start()
    # add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
    llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
    llcm1.connectDatacenter(dc1)
    llcm1.connectDatacenter(dc2)
    # run the dummy gatekeeper (in another thread, don't block)
    llcm1.start()
    # start the emulation and enter interactive CLI
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
开发者ID:mpeuster,项目名称:son-emu,代码行数:26,代码来源:tango_default_cli_topology_2_pop.py

示例6: create_topology1

# 需要导入模块: from emuvim.dcemulator.net import DCNetwork [as 别名]
# 或者: from emuvim.dcemulator.net.DCNetwork import addLink [as 别名]
def create_topology1():
    """
    1. Create a data center network object (DCNetwork) with monitoring enabled
    """
    net = DCNetwork(monitor=True, enable_learning=False)

    """
    1b. Add endpoint APIs for the whole DCNetwork,
        to access and control the networking from outside.
        e.g., to setup forwarding paths between compute
        instances aka. VNFs (represented by Docker containers), passing through
        different switches and datacenters of the emulated topology
    """
    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
    mon_api.connectDCNetwork(net)
    mon_api.start()

    """
    2. Add (logical) data centers to the topology
       (each data center is one "bigswitch" in our simplified
        first prototype)
    """
    dc1 = net.addDatacenter("datacenter1")
    dc2 = net.addDatacenter("datacenter2")
    #dc3 = net.addDatacenter("long_data_center_name3")
    #dc4 = net.addDatacenter(
    #    "datacenter4",
    #    metadata={"mydata": "we can also add arbitrary metadata to each DC"})

    """
    3. You can add additional SDN switches for data center
       interconnections to the network.
    """
    s1 = net.addSwitch("s1")

    """
    4. Add links between your data centers and additional switches
       to define you topology.
       These links can use Mininet's features to limit bw, add delay or jitter.
    """
    #net.addLink(dc1, dc2, delay="10ms")
    #net.addLink(dc1, dc2)
    net.addLink(dc1, s1)
    net.addLink(s1, dc2)
    #net.addLink("datacenter1", s1, delay="20ms")
    #net.addLink(s1, dc3)
    #net.addLink(s1, "datacenter4")


    """
    5. We want to access and control our data centers from the outside,
       e.g., we want to connect an orchestrator to start/stop compute
       resources aka. VNFs (represented by Docker containers in the emulated)

       So we need to instantiate API endpoints (e.g. a zerorpc or REST
       interface). Depending on the endpoint implementations, we can connect
       one or more data centers to it, which can then be controlled through
       this API, e.g., start/stop/list compute instances.
    """
    # create a new instance of a endpoint implementation
    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
    # connect data centers to this endpoint
    zapi1.connectDatacenter(dc1)
    zapi1.connectDatacenter(dc2)
    #zapi1.connectDatacenter(dc3)
    #zapi1.connectDatacenter(dc4)
    # run API endpoint server (in another thread, don't block)
    zapi1.start()

    """
    5.1. For our example, we create a second endpoint to illustrate that
         this is supported by our design. This feature allows us to have
         one API endpoint for each data center. This makes the emulation
         environment more realistic because you can easily create one
         OpenStack-like REST API endpoint for *each* data center.
         This will look like a real-world multi PoP/data center deployment
         from the perspective of an orchestrator.
    """
    #zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
    #zapi2.connectDatacenter(dc3)
    #zapi2.connectDatacenter(dc4)
    #zapi2.start()

    """
    6. Finally we are done and can start our network (the emulator).
       We can also enter the Mininet CLI to interactively interact
       with our compute resources (just like in default Mininet).
       But we can also implement fully automated experiments that
       can be executed again and again.
    """
    net.start()
    net.CLI()
    # when the user types exit in the CLI, we stop the emulator
    net.stop()
开发者ID:felipevicens,项目名称:son-emu,代码行数:96,代码来源:monitoring_demo_topology.py

示例7: SimpleTestTopology

# 需要导入模块: from emuvim.dcemulator.net import DCNetwork [as 别名]
# 或者: from emuvim.dcemulator.net.DCNetwork import addLink [as 别名]
class SimpleTestTopology(unittest.TestCase):
    """
        Helper class to do basic test setups.
        s1 -- s2 -- s3 -- ... -- sN
    """

    def __init__(self, *args, **kwargs):
        self.net = None
        self.s = []   # list of switches
        self.h = []   # list of hosts
        self.d = []   # list of docker containers
        self.dc = []  # list of data centers
        self.docker_cli = None
        super(SimpleTestTopology, self).__init__(*args, **kwargs)

    def createNet(
            self,
            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
            autolinkswitches=False, controller=Controller, **kwargs):
        """
        Creates a Mininet instance and automatically adds some
        nodes to it.

        Attention, we should always use Mininet's default controller
        for our tests. Only use other controllers if you want to test
        specific controller functionality.
        """
        self.net = DCNetwork(controller=controller, **kwargs)

        # add some switches
        # start from s1 because ovs does not like to have dpid = 0
        # and switch name-number is being used by mininet to set the dpid
        for i in range(1, nswitches+1):
            self.s.append(self.net.addSwitch('s%d' % i))
        # if specified, chain all switches
        if autolinkswitches:
            for i in range(0, len(self.s) - 1):
                self.net.addLink(self.s[i], self.s[i + 1])
        # add some data centers
        for i in range(0, ndatacenter):
            self.dc.append(
                self.net.addDatacenter(
                    'datacenter%d' % i,
                    metadata={"unittest_dc": i}))
        # add some hosts
        for i in range(0, nhosts):
            self.h.append(self.net.addHost('h%d' % i))
        # add some dockers
        for i in range(0, ndockers):
            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))

    def startNet(self):
        self.net.start()

    def stopNet(self):
        self.net.stop()

    def getDockerCli(self):
        """
        Helper to interact with local docker instance.
        """
        if self.docker_cli is None:
            self.docker_cli = docker.Client(
                base_url='unix://var/run/docker.sock')
        return self.docker_cli

    def getContainernetContainers(self):
        """
        List the containers managed by containernet
        """
        return self.getDockerCli().containers(filters={"label": "com.containernet"})

    @staticmethod
    def setUp():
        pass

    @staticmethod
    def tearDown():
        cleanup()
        # make sure that all pending docker containers are killed
        with open(os.devnull, 'w') as devnull:
            subprocess.call(
                "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                stdout=devnull,
                stderr=devnull,
                shell=True)
开发者ID:hadik3r,项目名称:son-emu,代码行数:88,代码来源:base.py


注:本文中的emuvim.dcemulator.net.DCNetwork.addLink方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。