本文整理汇总了Python中mininet.net.Containernet类的典型用法代码示例。如果您正苦于以下问题:Python Containernet类的具体用法?Python Containernet怎么用?Python Containernet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Containernet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: topology
def topology():
"Create a network with some docker containers acting as hosts."
edgefile = os.path.join(RESULTS_FOLDER, "./substrate.edges.empty.data")
nodesfile = os.path.join(RESULTS_FOLDER, "./substrate.nodes.data")
CDNfile = os.path.join(RESULTS_FOLDER, "CDN.nodes.data")
startersFile = os.path.join(RESULTS_FOLDER, "starters.nodes.data")
solutionsFile = os.path.join(RESULTS_FOLDER, "solutions.data")
service_edges = os.path.join(RESULTS_FOLDER, "./service.edges.data")
switch = partial( OVSSwitch, protocols='OpenFlow13')
topo = loadTopo(edgefile, nodesfile, CDNfile, startersFile, solutionsFile,service_edges)
c = RemoteController('c', '0.0.0.0', 6633)
# topodock= loaddocker(os.path.join(RESULTS_FOLDER, "./substrate.edges.data"), os.path.join(RESULTS_FOLDER, "./substrate.nodes.data"))
info('*** Start Containernet\n')
net = Containernet(topo=topo, controller=c, link=TCLink,switch=switch)
for host in net.hosts:
if host.name in topo._cmd:
for cmd in topo._cmd[host.name]:
print("send cmd")
print((host.sendCmd(cmd)))
info('*** Starting network\n')
net.start()
info('*** Running CLI\n')
CLI(net)
info('*** Stopping network')
net.stop()
示例2: stop
def stop(self):
# stop the monitor agent
if self.monitor_agent is not None:
self.monitor_agent.stop()
# stop emulator net
Containernet.stop(self)
# stop Ryu controller
self.stopRyu()
示例3: __init__
def __init__(self, controller=RemoteController, monitor=False,
enable_learning = True, # in case of RemoteController (Ryu), learning switch behavior can be turned off/on
dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
"""
Create an extended version of a Containernet network
:param dc_emulation_max_cpu: max. CPU time used by containers in data centers
:param kwargs: path through for Mininet parameters
:return:
"""
self.dcs = {}
# make sure any remaining Ryu processes are killed
self.killRyu()
# make sure no containers are left over from a previous emulator run.
self.removeLeftoverContainers()
# call original Docker.__init__ and setup default controller
Containernet.__init__(
self, switch=OVSKernelSwitch, controller=controller, **kwargs)
# Ryu management
self.ryu_process = None
if controller == RemoteController:
# start Ryu controller
self.startRyu(learning_switch=enable_learning)
# add the specified controller
self.addController('c0', controller=controller)
# graph of the complete DC network
self.DCNetwork_graph = nx.MultiDiGraph()
# initialize pool of vlan tags to setup the SDN paths
self.vlans = range(4096)[::-1]
# link to Ryu REST_API
ryu_ip = '0.0.0.0'
ryu_port = '8080'
self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)
# monitoring agent
if monitor:
self.monitor_agent = DCNetworkMonitor(self)
else:
self.monitor_agent = None
# initialize resource model registrar
self.rm_registrar = ResourceModelRegistrar(
dc_emulation_max_cpu, dc_emulation_max_mem)
示例4: stop
def stop(self):
# stop the monitor agent
if self.monitor_agent is not None:
self.monitor_agent.stop()
# stop emulator net
Containernet.stop(self)
# stop Ryu controller
self.killRyu()
# flag to indicate the topology has been stopped
self.exit = True
示例5: addDocker
def addDocker(self, label, **params):
"""
Wrapper for addDocker method to use custom container class.
"""
self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
return Containernet.addDocker(
self, label, cls=EmulatorCompute, **params)
示例6: addSwitch
def addSwitch( self, name, add_to_graph=True, **params ):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
if add_to_graph:
self.DCNetwork_graph.add_node(name)
return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
示例7: addExtSAP
def addExtSAP(self, sap_name, sap_ip, **params):
"""
Wrapper for addExtSAP method to store SAP also in graph.
"""
# make sure that 'type' is set
params['type'] = params.get('type','sap_ext')
self.DCNetwork_graph.add_node(sap_name, type=params['type'])
return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
示例8: removeLink
def removeLink(self, link=None, node1=None, node2=None):
"""
Remove the link from the Containernet and the networkx graph
"""
if link is not None:
node1 = link.intf1.node
node2 = link.intf2.node
assert node1 is not None
assert node2 is not None
Containernet.removeLink(self, link=link, node1=node1, node2=node2)
# TODO we might decrease the loglevel to debug:
try:
self.DCNetwork_graph.remove_edge(node2.name, node1.name)
except:
LOG.warning("%s, %s not found in DCNetwork_graph." % ((node2.name, node1.name)))
try:
self.DCNetwork_graph.remove_edge(node1.name, node2.name)
except:
LOG.warning("%s, %s not found in DCNetwork_graph." % ((node1.name, node2.name)))
示例9: addSwitch
def addSwitch( self, name, add_to_graph=True, **params ):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
# add this switch to the global topology overview
if add_to_graph:
self.DCNetwork_graph.add_node(name, type=params.get('type','switch'))
# set the learning switch behavior
if 'failMode' in params :
failMode = params['failMode']
else :
failMode = self.failMode
s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
return s
示例10: topology
def topology():
"Create a network with some docker containers acting as hosts."
net = Containernet(controller=Controller)
info('*** Adding controller\n')
net.addController('c0')
info('*** Adding docker containers\n')
d1 = net.addDocker('d1', ip='10.0.0.251', dimage="mpeuster/stress", cpuset_cpus="0,1")
d1.sendCmd("./start.sh")
info('*** Starting network\n')
net.start()
info('*** Running CLI\n')
CLI(net)
info('*** Stopping network')
net.stop()
示例11: addSwitch
def addSwitch( self, name, add_to_graph=True, **params ):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
# add this switch to the global topology overview
if add_to_graph:
self.DCNetwork_graph.add_node(name)
# set the learning switch behavior
if 'failMode' in params :
failMode = params['failMode']
else :
failMode = self.failMode
s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
# set flow entry that enables learning switch behavior (needed to enable E-LAN functionality)
#LOG.info('failmode {0}'.format(failMode))
#if failMode == 'standalone' :
# LOG.info('add NORMAL')
# s.dpctl('add-flow', 'actions=NORMAL')
return s
示例12: createNet
def createNet(
self,
nswitches=1, nhosts=0, ndockers=0,
autolinkswitches=False):
"""
Creates a Mininet instance and automatically adds some
nodes to it.
"""
self.net = Containernet( controller=Controller )
self.net.addController( 'c0' )
# add some switches
for i in range(0, nswitches):
self.s.append(self.net.addSwitch('s%d' % i))
# if specified, chain all switches
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
# add some hosts
for i in range(0, nhosts):
self.h.append(self.net.addHost('h%d' % i))
# add some dockers
for i in range(0, ndockers):
self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
示例13: tfTopo
def tfTopo():
net = Containernet( topo=None, controller=RemoteController, switch=OVSKernelSwitch )
net.addController( 'c0', RemoteController, ip="127.0.0.1", port=6633 )
#Arguments
opts, args = getopt.getopt(sys.argv[1:], "", ["flows=", "dos="])
for o, a in opts:
if o == "--flows":
number_of_flows=int(a)
print "Flows: ",a
elif o in ("--dos"):
number_of_dos=int(a)
print "DoS: ",a
# Hosts
h1 = net.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01')
h2 = net.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02')
h3 = net.addHost('h3', ip='10.0.0.3', mac='00:00:00:00:00:03')
h4 = net.addHost('h4', ip='10.0.0.4', mac='00:00:00:00:00:04')
h5 = net.addHost('h5', ip='10.0.0.5', mac='00:00:00:00:00:05')
h6 = net.addHost('h6', ip='10.0.0.6', mac='00:00:00:00:00:06')
h7 = net.addHost('h7', ip='10.0.0.7', mac='00:00:00:00:00:07')
h8 = net.addHost('h8', ip='10.0.0.8', mac='00:00:00:00:00:08')
h9 = net.addHost('h9', ip='10.0.0.9', mac='00:00:00:00:00:09')
h10 = net.addHost('h10', ip='10.0.0.10', mac='00:00:00:00:00:10')
p1 = net.addHost('p1', ip='10.0.1.1', mac='00:00:00:00:01:01', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
p2 = net.addHost('p2', ip='10.0.1.2', mac='00:00:00:00:01:02', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
p3 = net.addHost('p3', ip='10.0.1.3', mac='00:00:00:00:01:03', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
p4 = net.addHost('p4', ip='10.0.1.4', mac='00:00:00:00:01:04', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
p5 = net.addHost('p5', ip='10.0.1.5', mac='00:00:00:00:01:05', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
p6 = net.addHost('p6', ip='10.0.1.6', mac='00:00:00:00:01:06', cls=Docker, dimage='gmiotto/click',mem_limit=1024*1024*10, cpu_quota=pop_cpu_percentage*100,cpu_period=10000)
#Switches
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
s5 = net.addSwitch('s5')
s6 = net.addSwitch('s6')
s7 = net.addSwitch('s7')
s8 = net.addSwitch('s8')
s9 = net.addSwitch('s9')
s10 = net.addSwitch('s10')
#PoP Hosts
net.addLink(p1,s1, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
net.addLink(p1,s1)
net.addLink(p2,s2, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
net.addLink(p2,s2)
net.addLink(p3,s3, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
net.addLink(p3,s3)
net.addLink(p4,s4, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
net.addLink(p4,s4)
net.addLink(p5,s5, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
net.addLink(p5,s5)
net.addLink(p6,s6, cls=TCLink, delay=pop_link_delay,bw=pop_link_bw,loss=pop_link_loss)
net.addLink(p6,s6)
#Normal Hosts
net.addLink(h1,s1, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h2,s2, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h3,s3, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h4,s4, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h5,s5, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h6,s6, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h7,s7, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h8,s8, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h9,s9, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(h10,s10, cls=TCLink, delay=host_switch_delay,bw=host_switch_bw,loss=host_switch_loss)
net.addLink(s7, s1, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss) #s7-s1
net.addLink(s7, s2, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s1, s2, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s1, s8, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s1, s3, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s1, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s8, s3, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s2, s5, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s2, s4, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s3, s5, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s3, s4, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s4, s9, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s4, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s5, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s5, s10, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s9, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.addLink(s10, s6, cls=TCLink, delay=inter_switch_delay,bw=inter_switch_bw,loss=inter_switch_loss)
net.start()
for host in net.hosts:
if "h" in host.name:
host.cmd('ethtool -K %s-eth0 tso off' % host.name)
#.........这里部分代码省略.........
示例14: start
def start(self):
# start
for dc in self.dcs.itervalues():
dc.start()
Containernet.start(self)
示例15: addLink
def addLink(self, node1, node2, **params):
"""
Able to handle Datacenter objects as link
end points.
"""
assert node1 is not None
assert node2 is not None
LOG.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
# ensure type of node1
if isinstance( node1, basestring ):
if node1 in self.dcs:
node1 = self.dcs[node1].switch
if isinstance( node1, Datacenter ):
node1 = node1.switch
# ensure type of node2
if isinstance( node2, basestring ):
if node2 in self.dcs:
node2 = self.dcs[node2].switch
if isinstance( node2, Datacenter ):
node2 = node2.switch
# try to give containers a default IP
if isinstance( node1, Docker ):
if "params1" not in params:
params["params1"] = {}
if "ip" not in params["params1"]:
params["params1"]["ip"] = self.getNextIp()
if isinstance( node2, Docker ):
if "params2" not in params:
params["params2"] = {}
if "ip" not in params["params2"]:
params["params2"]["ip"] = self.getNextIp()
# ensure that we allow TCLinks between data centers
# TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
# see Containernet issue: https://github.com/mpeuster/containernet/issues/3
if "cls" not in params:
params["cls"] = TCLink
link = Containernet.addLink(self, node1, node2, **params)
# try to give container interfaces a default id
node1_port_id = node1.ports[link.intf1]
if isinstance(node1, Docker):
if "id" in params["params1"]:
node1_port_id = params["params1"]["id"]
node1_port_name = link.intf1.name
node2_port_id = node2.ports[link.intf2]
if isinstance(node2, Docker):
if "id" in params["params2"]:
node2_port_id = params["params2"]["id"]
node2_port_name = link.intf2.name
# add edge and assigned port number to graph in both directions between node1 and node2
# port_id: id given in descriptor (if available, otherwise same as port)
# port: portnumber assigned by Containernet
attr_dict = {}
# possible weight metrics allowed by TClink class:
weight_metrics = ['bw', 'delay', 'jitter', 'loss']
edge_attributes = [p for p in params if p in weight_metrics]
for attr in edge_attributes:
# if delay: strip ms (need number as weight in graph)
match = re.search('([0-9]*\.?[0-9]+)', params[attr])
if match:
attr_number = match.group(1)
else:
attr_number = None
attr_dict[attr] = attr_number
attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
'src_port_name': node1_port_name,
'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
'dst_port_name': node2_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
'src_port_name': node2_port_name,
'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
'dst_port_name': node1_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
return link