本文整理汇总了Python中emuvim.dcemulator.net.DCNetwork类的典型用法代码示例。如果您正苦于以下问题:Python DCNetwork类的具体用法?Python DCNetwork怎么用?Python DCNetwork使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DCNetwork类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_topology1
def create_topology1():
# create topology
net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=True)
dc1 = net.addDatacenter("dc1")
dc2 = net.addDatacenter("dc2")
s1 = net.addSwitch("s1")
net.addLink(dc1, s1, delay="10ms")
net.addLink(dc2, s1, delay="20ms")
# add the command line interface endpoint to each DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc1)
rapi1.connectDatacenter(dc2)
# run API endpoint server (in another thread, don't block)
rapi1.start()
# add the SONATA dummy gatekeeper to each DC
sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
sdkg1.connectDatacenter(dc1)
sdkg1.connectDatacenter(dc2)
# run the dummy gatekeeper (in another thread, don't block)
sdkg1.start()
# start the emulation platform
net.start()
net.CLI()
rapi1.stop()
net.stop()
示例2: create_topology1
def create_topology1():
net = DCNetwork(monitor=False, enable_learning=True)
dc1 = net.addDatacenter("dc1")
heatapi1 = OpenstackApiEndpoint("0.0.0.0", 5001)
# connect data center to this endpoint
heatapi1.connect_datacenter(dc1)
# heatapirun API endpoint server (in another thread, don't block)
heatapi1.start()
heatapi1.connect_dc_network(net)
net.start()
示例3: __init__
def __init__(self):
GracefulKiller(self)
# create topology
self.net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False)
self.dc = self.net.addDatacenter("dc1")
# add the command line interface endpoint to each DC (REST API)
self.rapi1 = RestApiEndpoint("0.0.0.0", 5001)
self.rapi1.connectDCNetwork(self.net)
self.rapi1.connectDatacenter(self.dc)
# run API endpoint server (in another thread, don't block)
self.rapi1.start()
# add the SONATA dummy gatekeeper to each DC
self.sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=False)
self.sdkg1.connectDatacenter(self.dc)
# run the dummy gatekeeper (in another thread, don't block)
self.sdkg1.start()
self.net.start()
LOG.info("Started topology")
while(not self.stop_now):
sleep(1)
self.net.stop()
LOG.info("Stopped topology")
示例4: createNet
def createNet(
self,
nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
autolinkswitches=False, controller=Controller, **kwargs):
"""
Creates a Mininet instance and automatically adds some
nodes to it.
Attention, we should always use Mininet's default controller
for our tests. Only use other controllers if you want to test
specific controller functionality.
"""
self.net = DCNetwork(controller=controller, **kwargs)
# add some switches
# start from s1 because ovs does not like to have dpid = 0
# and switch name-number is being used by mininet to set the dpid
for i in range(1, nswitches+1):
self.s.append(self.net.addSwitch('s%d' % i))
# if specified, chain all switches
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
# add some data centers
for i in range(0, ndatacenter):
self.dc.append(
self.net.addDatacenter(
'datacenter%d' % i,
metadata={"unittest_dc": i}))
# add some hosts
for i in range(0, nhosts):
self.h.append(self.net.addHost('h%d' % i))
# add some dockers
for i in range(0, ndockers):
self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
示例5: create_and_start_topology
def create_and_start_topology(lock, restart_lock):
_LOGGER.info("Creating and starting the topology")
net = DCNetwork(controller=RemoteController,
monitor=True,
enable_learning=True)
restart_lock.acquire()
setup_topology(net)
try:
net.start() # non blocking call
_LOGGER.info("Waiting for the barrier to stop the topology")
lock.acquire()
_LOGGER.info("Stopping the topology")
net.stop()
lock.release()
except Exception as e:
_LOGGER.error("Ignoring exception in thread: {!s}".format(e))
restart_lock.release()
exit(1)
示例6: create_topology1
def create_topology1():
# create topology
net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
dc1 = net.addDatacenter("dc1")
# add the command line interface endpoint to each DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc1)
# run API endpoint server (in another thread, don't block)
rapi1.start()
# specify a vnfd file to be deployed as internal SAP:
sap_vnfd = 'custom_sap_vnfd.yml'
dir_path = os.path.dirname(__file__)
sap_vnfd_path = os.path.join(dir_path, sap_vnfd)
sap_vnfd_path = None
# add the SONATA dummy gatekeeper to each DC
sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True, auto_deploy=True,
docker_management=True, auto_delete=True,
sap_vnfd_path=sap_vnfd_path)
sdkg1.connectDatacenter(dc1)
# run the dummy gatekeeper (in another thread, don't block)
sdkg1.start()
# start the emulation platform
net.start()
net.CLI()
net.stop()
示例7: create_topology1
def create_topology1():
cleanup()
# create topology
# use a maximum of 50% cpu time for containers added to data centers
net = DCNetwork(dc_emulation_max_cpu=0.5, controller=Controller)
# add some data centers and create a topology
dc1 = net.addDatacenter("dc1", resource_log_path=RESOURCE_LOG_PATH)
dc2 = net.addDatacenter("dc2", resource_log_path=RESOURCE_LOG_PATH)
s1 = net.addSwitch("s1")
net.addLink(dc1, s1, delay="10ms")
net.addLink(dc2, s1, delay="20ms")
# create and assign resource models for each DC
rm1 = UpbSimpleCloudDcRM(max_cu=4, max_mu=1024)
rm2 = UpbOverprovisioningCloudDcRM(max_cu=4)
dc1.assignResourceModel(rm1)
dc2.assignResourceModel(rm2)
# add the command line interface endpoint to each DC
zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
zapi1.connectDatacenter(dc1)
zapi1.connectDatacenter(dc2)
# run API endpoint server (in another thread, don't block)
zapi1.start()
# start the emulation platform
net.start()
print "Wait a moment and allocate some compute start some compute resources..."
time.sleep(2)
dc1.startCompute("vnf1")
dc1.startCompute("vnf2", flavor_name="tiny")
dc1.startCompute("vnf3", flavor_name="small")
dc2.startCompute("vnf4", flavor_name="medium")
dc2.startCompute("vnf5", flavor_name="medium")
dc2.startCompute("vnf6", flavor_name="medium")
print "... done."
time.sleep(5)
print "Removing instances ..."
dc1.stopCompute("vnf1")
dc2.stopCompute("vnf4")
print "... done"
net.CLI()
net.stop()
示例8: create_topology1
def create_topology1():
global exit
# create topology
net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False)
dc1 = net.addDatacenter("dc1")
dc2 = net.addDatacenter("dc2")
s1 = net.addSwitch("s1")
net.addLink(dc1, s1, delay="10ms")
net.addLink(dc2, s1, delay="20ms")
# add the command line interface endpoint to each DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDatacenter(dc1)
rapi1.connectDatacenter(dc2)
# connect total network also, needed to do the chaining and monitoring
rapi1.connectDCNetwork(net)
# run API endpoint server (in another thread, don't block)
rapi1.start()
# add the SONATA dummy gatekeeper to each DC
sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
sdkg1.connectDatacenter(dc1)
sdkg1.connectDatacenter(dc2)
# run the dummy gatekeeper (in another thread, don't block)
sdkg1.start()
# start the emulation platform
net.start()
# does not work from docker compose (cannot start container in interactive mode)
# cli = net.CLI()
# instead wait here:
logging.info("waiting for SIGTERM or SIGINT signal")
while not exit:
time.sleep(1)
logging.info("got SIG signal")
net.stop()
示例9: create_topology1
def create_topology1():
# create topology
net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
dc1 = net.addDatacenter("dc1")
# add the command line interface endpoint to each DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc1)
# run API endpoint server (in another thread, don't block)
rapi1.start()
# add the SONATA dummy gatekeeper to each DC
#sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
#sdkg1.connectDatacenter(dc1)
# run the dummy gatekeeper (in another thread, don't block)
#sdkg1.start()
# start the emulation platform
net.start() # here the docker host default ip is configured
# topology must be started before hosts are added
cache = dc1.startCompute('cache', image="squid-vnf", network=[{"ip": "10.10.0.1/24", "id": "client", 'mac': "aa:aa:aa:00:00:01"},
{"ip": "10.20.0.1/24", "id": "server", "mac": "aa:aa:aa:00:00:02"}])
client = dc1.startCompute('client', image='vcdn-client', network=[{"ip": "10.10.0.2/24", "id": "client"}])
server = dc1.startCompute('server', image='webserver', network=[{"ip": "10.20.0.2/24", "id": "server"}])
#client = net.addDocker('client', ip='10.10.0.1/24', dimage="vcdn-client")
#cache = net.addDocker('cache', dimage="squid-vnf")
#server = net.addDocker('server', ip='10.20.0.1/24', dimage="webserver")
#net.addLink(dc1, client, intfName1='dc-cl', intfName2='client')
#net.addLink(dc1, server, intfName1='dc-sv', intfName2='server')
#net.addLink(dc1, cache, intfName1='dc-ccl', intfName2='client', params1={'ip': '10.10.0.2/24'})
#net.addLink(dc1, cache, intfName1='dc-csv', intfName2='server',params1={'ip': '10.20.0.2/24'})
# initialise VNFs
cache.cmd("./start.sh", detach=True)
client.cmd("./start.sh", detach=True)
server.cmd('./start.sh', detach=True)
# startup script hangs if we use other startup command
# command="./start.sh"
net.CLI()
net.stop()
while not net.exit:
pass
示例10: create_topology
def create_topology():
net = DCNetwork(monitor=False, enable_learning=True)
# create two data centers
dc1 = net.addDatacenter("dc1")
dc2 = net.addDatacenter("dc2")
# interconnect data centers
net.addLink(dc1, dc2, delay="20ms")
# add the command line interface endpoint to the emulated DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc1)
rapi1.connectDatacenter(dc2)
rapi1.start()
# add the 5GTANGO lightweight life cycle manager (LLCM) to the topology
llcm1 = TangoLLCMEndpoint("0.0.0.0", 5000, deploy_sap=False)
llcm1.connectDatacenter(dc1)
llcm1.connectDatacenter(dc2)
# run the dummy gatekeeper (in another thread, don't block)
llcm1.start()
# start the emulation and enter interactive CLI
net.start()
net.CLI()
# when the user types exit in the CLI, we stop the emulator
net.stop()
示例11: create_topology
def create_topology():
net = DCNetwork(monitor=False, enable_learning=False)
dc1 = net.addDatacenter("dc1")
# add OpenStack-like APIs to the emulated DC
api1 = OpenstackApiEndpoint("0.0.0.0", 6001)
api1.connect_datacenter(dc1)
api1.start()
api1.connect_dc_network(net)
# add the command line interface endpoint to the emulated DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc1)
rapi1.start()
net.start()
net.CLI()
# when the user types exit in the CLI, we stop the emulator
net.stop()
示例12: create_topology1
def create_topology1():
"""
1. Create a data center network object (DCNetwork) with monitoring enabled
"""
net = DCNetwork(monitor=True, enable_learning=False)
"""
1b. Add endpoint APIs for the whole DCNetwork,
to access and control the networking from outside.
e.g., to setup forwarding paths between compute
instances aka. VNFs (represented by Docker containers), passing through
different switches and datacenters of the emulated topology
"""
mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
mon_api.connectDCNetwork(net)
mon_api.start()
"""
2. Add (logical) data centers to the topology
(each data center is one "bigswitch" in our simplified
first prototype)
"""
dc1 = net.addDatacenter("datacenter1")
dc2 = net.addDatacenter("datacenter2")
#dc3 = net.addDatacenter("long_data_center_name3")
#dc4 = net.addDatacenter(
# "datacenter4",
# metadata={"mydata": "we can also add arbitrary metadata to each DC"})
"""
3. You can add additional SDN switches for data center
interconnections to the network.
"""
s1 = net.addSwitch("s1")
"""
4. Add links between your data centers and additional switches
to define you topology.
These links can use Mininet's features to limit bw, add delay or jitter.
"""
#net.addLink(dc1, dc2, delay="10ms")
#net.addLink(dc1, dc2)
net.addLink(dc1, s1)
net.addLink(s1, dc2)
#net.addLink("datacenter1", s1, delay="20ms")
#net.addLink(s1, dc3)
#net.addLink(s1, "datacenter4")
"""
5. We want to access and control our data centers from the outside,
e.g., we want to connect an orchestrator to start/stop compute
resources aka. VNFs (represented by Docker containers in the emulated)
So we need to instantiate API endpoints (e.g. a zerorpc or REST
interface). Depending on the endpoint implementations, we can connect
one or more data centers to it, which can then be controlled through
this API, e.g., start/stop/list compute instances.
"""
# create a new instance of a endpoint implementation
zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
# connect data centers to this endpoint
zapi1.connectDatacenter(dc1)
zapi1.connectDatacenter(dc2)
#zapi1.connectDatacenter(dc3)
#zapi1.connectDatacenter(dc4)
# run API endpoint server (in another thread, don't block)
zapi1.start()
"""
5.1. For our example, we create a second endpoint to illustrate that
this is supported by our design. This feature allows us to have
one API endpoint for each data center. This makes the emulation
environment more realistic because you can easily create one
OpenStack-like REST API endpoint for *each* data center.
This will look like a real-world multi PoP/data center deployment
from the perspective of an orchestrator.
"""
#zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
#zapi2.connectDatacenter(dc3)
#zapi2.connectDatacenter(dc4)
#zapi2.start()
"""
6. Finally we are done and can start our network (the emulator).
We can also enter the Mininet CLI to interactively interact
with our compute resources (just like in default Mininet).
But we can also implement fully automated experiments that
can be executed again and again.
"""
net.start()
net.CLI()
# when the user types exit in the CLI, we stop the emulator
net.stop()
示例13: SimpleTestTopology
class SimpleTestTopology(unittest.TestCase):
"""
Helper class to do basic test setups.
s1 -- s2 -- s3 -- ... -- sN
"""
def __init__(self, *args, **kwargs):
self.net = None
self.s = [] # list of switches
self.h = [] # list of hosts
self.d = [] # list of docker containers
self.dc = [] # list of data centers
self.docker_cli = None
super(SimpleTestTopology, self).__init__(*args, **kwargs)
def createNet(
self,
nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
autolinkswitches=False, controller=Controller, **kwargs):
"""
Creates a Mininet instance and automatically adds some
nodes to it.
Attention, we should always use Mininet's default controller
for our tests. Only use other controllers if you want to test
specific controller functionality.
"""
self.net = DCNetwork(controller=controller, **kwargs)
# add some switches
# start from s1 because ovs does not like to have dpid = 0
# and switch name-number is being used by mininet to set the dpid
for i in range(1, nswitches+1):
self.s.append(self.net.addSwitch('s%d' % i))
# if specified, chain all switches
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
# add some data centers
for i in range(0, ndatacenter):
self.dc.append(
self.net.addDatacenter(
'datacenter%d' % i,
metadata={"unittest_dc": i}))
# add some hosts
for i in range(0, nhosts):
self.h.append(self.net.addHost('h%d' % i))
# add some dockers
for i in range(0, ndockers):
self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
def startNet(self):
self.net.start()
def stopNet(self):
self.net.stop()
def getDockerCli(self):
"""
Helper to interact with local docker instance.
"""
if self.docker_cli is None:
self.docker_cli = docker.Client(
base_url='unix://var/run/docker.sock')
return self.docker_cli
def getContainernetContainers(self):
"""
List the containers managed by containernet
"""
return self.getDockerCli().containers(filters={"label": "com.containernet"})
@staticmethod
def setUp():
pass
@staticmethod
def tearDown():
cleanup()
# make sure that all pending docker containers are killed
with open(os.devnull, 'w') as devnull:
subprocess.call(
"sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
stdout=devnull,
stderr=devnull,
shell=True)