本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/dbus.New函数的典型用法代码示例。如果您正苦于以下问题:Golang New函数的具体用法?Golang New怎么用?Golang New使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了New函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: newNodeIPTables
func newNodeIPTables(clusterNetworkCIDR string, syncPeriod time.Duration) *NodeIPTables {
return &NodeIPTables{
ipt: iptables.New(kexec.New(), utildbus.New(), iptables.ProtocolIpv4),
clusterNetworkCIDR: clusterNetworkCIDR,
syncPeriod: syncPeriod,
}
}
示例2: StartNode
func (oc *OvsController) StartNode(mtu uint) error {
// Assume we are working with IPv4
clusterNetworkCIDR, err := oc.Registry.GetClusterNetworkCIDR()
if err != nil {
log.Errorf("Failed to obtain ClusterNetwork: %v", err)
return err
}
ipt := iptables.New(kexec.New(), utildbus.New(), iptables.ProtocolIpv4)
if err := SetupIptables(ipt, clusterNetworkCIDR); err != nil {
return fmt.Errorf("Failed to set up iptables: %v", err)
}
ipt.AddReloadFunc(func() {
err := SetupIptables(ipt, clusterNetworkCIDR)
if err != nil {
log.Errorf("Error reloading iptables: %v\n", err)
}
})
if err := oc.pluginHooks.PluginStartNode(mtu); err != nil {
return fmt.Errorf("Failed to start plugin: %v", err)
}
oc.markPodNetworkReady()
return nil
}
示例3: SetupIptables
func SetupIptables(fw *firewalld.Interface, clusterNetworkCIDR string) error {
if fw.IsRunning() {
rules := []FirewallRule{
{firewalld.IPv4, "nat", "POSTROUTING", 0, []string{"-s", clusterNetworkCIDR, "!", "-d", clusterNetworkCIDR, "-j", "MASQUERADE"}},
{firewalld.IPv4, "filter", "INPUT", 0, []string{"-p", "udp", "-m", "multiport", "--dports", "4789", "-m", "comment", "--comment", "001 vxlan incoming", "-j", "ACCEPT"}},
{firewalld.IPv4, "filter", "INPUT", 0, []string{"-i", "tun0", "-m", "comment", "--comment", "traffic from docker for internet", "-j", "ACCEPT"}},
{firewalld.IPv4, "filter", "FORWARD", 0, []string{"-d", clusterNetworkCIDR, "-j", "ACCEPT"}},
{firewalld.IPv4, "filter", "FORWARD", 0, []string{"-s", clusterNetworkCIDR, "-j", "ACCEPT"}},
}
for _, rule := range rules {
err := fw.EnsureRule(rule.ipv, rule.table, rule.chain, rule.priority, rule.args)
if err != nil {
return err
}
}
} else {
dbus := utildbus.New()
ipt := iptables.New(kexec.New(), dbus, iptables.ProtocolIpv4)
_, err := ipt.EnsureRule(iptables.Append, iptables.TableNAT, iptables.ChainPostrouting, "-s", clusterNetworkCIDR, "!", "-d", clusterNetworkCIDR, "-j", "MASQUERADE")
if err != nil {
return err
}
}
return nil
}
示例4: RunProxy
// RunProxy starts the proxy
func (c *NodeConfig) RunProxy() {
// initialize kube proxy
serviceConfig := pconfig.NewServiceConfig()
endpointsConfig := pconfig.NewEndpointsConfig()
host, _, err := net.SplitHostPort(c.BindAddress)
if err != nil {
glog.Fatalf("The provided value to bind to must be an ip:port %q", c.BindAddress)
}
ip := net.ParseIP(host)
if ip == nil {
glog.Fatalf("The provided value to bind to must be an ip:port: %q", c.BindAddress)
}
protocol := iptables.ProtocolIpv4
if ip.To4() == nil {
protocol = iptables.ProtocolIpv6
}
syncPeriod, err := time.ParseDuration(c.IPTablesSyncPeriod)
if err != nil {
glog.Fatalf("Cannot parse the provided ip-tables sync period (%s) : %v", c.IPTablesSyncPeriod, err)
}
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(c.Client.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName})
nodeRef := &kapi.ObjectReference{
Kind: "Node",
Name: c.KubeletConfig.NodeName,
}
exec := kexec.New()
dbus := utildbus.New()
iptables := iptables.New(exec, dbus, protocol)
proxier, err := proxy.NewProxier(iptables, exec, syncPeriod, false)
if err != nil {
// This should be fatal, but that would break the integration tests
glog.Warningf("WARNING: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err)
return
}
iptables.AddReloadFunc(proxier.Sync)
pconfig.NewSourceAPI(
c.Client,
10*time.Minute,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"))
serviceConfig.RegisterHandler(proxier)
if c.FilteringEndpointsHandler == nil {
endpointsConfig.RegisterHandler(proxier)
} else {
c.FilteringEndpointsHandler.SetBaseEndpointsHandler(proxier)
endpointsConfig.RegisterHandler(c.FilteringEndpointsHandler)
}
recorder.Eventf(nodeRef, kapi.EventTypeNormal, "Starting", "Starting kube-proxy.")
glog.Infof("Started Kubernetes Proxy on %s", host)
}
示例5: NewHostportHandler
func NewHostportHandler() HostportHandler {
iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4)
return &handler{
hostPortMap: make(map[hostport]closeable),
iptables: iptInterface,
portOpener: openLocalPort,
}
}
示例6: newIPVSController
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, configMapName string) *ipvsControllerController {
ipvsc := ipvsControllerController{
client: kubeClient,
reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
ruCfg: []vip{},
configMapName: configMapName,
}
clusterNodes := getClusterNodesIP(kubeClient)
nodeInfo, err := getNodeInfo(clusterNodes)
if err != nil {
glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
}
neighbors := getNodeNeighbors(nodeInfo, clusterNodes)
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, utiliptables.ProtocolIpv4)
ipvsc.keepalived = &keepalived{
iface: nodeInfo.iface,
ip: nodeInfo.ip,
netmask: nodeInfo.netmask,
nodes: clusterNodes,
neighbors: neighbors,
priority: getNodePriority(nodeInfo.ip, clusterNodes),
useUnicast: useUnicast,
ipt: iptInterface,
}
err = ipvsc.keepalived.loadTemplate()
if err != nil {
glog.Fatalf("Error loading keepalived template: %v", err)
}
eventHandlers := framework.ResourceEventHandlerFuncs{}
ipvsc.svcLister.Store, ipvsc.svcController = framework.NewInformer(
cache.NewListWatchFromClient(
ipvsc.client, "services", namespace, fields.Everything()),
&api.Service{}, resyncPeriod, eventHandlers)
ipvsc.epLister.Store, ipvsc.epController = framework.NewInformer(
cache.NewListWatchFromClient(
ipvsc.client, "endpoints", namespace, fields.Everything()),
&api.Endpoints{}, resyncPeriod, eventHandlers)
return &ipvsc
}
示例7: NewPlugin
func NewPlugin() network.NetworkPlugin {
protocol := utiliptables.ProtocolIpv4
execer := utilexec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
return &kubenetNetworkPlugin{
podIPs: make(map[kubecontainer.ContainerID]string),
hostPortMap: make(map[hostport]closeable),
MTU: 1460, //TODO: don't hardcode this
execer: utilexec.New(),
iptables: iptInterface,
}
}
示例8: NewPlugin
func NewPlugin(networkPluginDir string) network.NetworkPlugin {
protocol := utiliptables.ProtocolIpv4
execer := utilexec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
return &kubenetNetworkPlugin{
podIPs: make(map[kubecontainer.ContainerID]string),
MTU: 1460, //TODO: don't hardcode this
execer: utilexec.New(),
iptables: iptInterface,
vendorDir: networkPluginDir,
hostportHandler: hostport.NewHostportHandler(),
nonMasqueradeCIDR: "10.0.0.0/8",
}
}
示例9: newIPVSController
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, configMapName string) *ipvsControllerController {
ipvsc := ipvsControllerController{
client: kubeClient,
reloadRateLimiter: flowcontrol.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
ruCfg: []vip{},
configMapName: configMapName,
stopCh: make(chan struct{}),
}
podInfo, err := getPodDetails(kubeClient)
if err != nil {
glog.Fatalf("Error getting POD information: %v", err)
}
pod, err := kubeClient.Pods(podInfo.PodNamespace).Get(podInfo.PodName)
if err != nil {
glog.Fatalf("Error getting %v: %v", podInfo.PodName, err)
}
selector := parseNodeSelector(pod.Spec.NodeSelector)
clusterNodes := getClusterNodesIP(kubeClient, selector)
nodeInfo, err := getNetworkInfo(podInfo.NodeIP)
if err != nil {
glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
}
neighbors := getNodeNeighbors(nodeInfo, clusterNodes)
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, utiliptables.ProtocolIpv4)
ipvsc.keepalived = &keepalived{
iface: nodeInfo.iface,
ip: nodeInfo.ip,
netmask: nodeInfo.netmask,
nodes: clusterNodes,
neighbors: neighbors,
priority: getNodePriority(nodeInfo.ip, clusterNodes),
useUnicast: useUnicast,
ipt: iptInterface,
}
ipvsc.syncQueue = NewTaskQueue(ipvsc.sync)
err = ipvsc.keepalived.loadTemplate()
if err != nil {
glog.Fatalf("Error loading keepalived template: %v", err)
}
eventHandlers := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ipvsc.syncQueue.enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
ipvsc.syncQueue.enqueue(obj)
},
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
ipvsc.syncQueue.enqueue(cur)
}
},
}
ipvsc.svcLister.Indexer, ipvsc.svcController = cache.NewIndexerInformer(
cache.NewListWatchFromClient(
ipvsc.client, "services", namespace, fields.Everything()),
&api.Service{},
resyncPeriod,
eventHandlers,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
ipvsc.epLister.Store, ipvsc.epController = cache.NewInformer(
cache.NewListWatchFromClient(
ipvsc.client, "endpoints", namespace, fields.Everything()),
&api.Endpoints{}, resyncPeriod, eventHandlers)
return &ipvsc
}
示例10: NewProxyServerDefault
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
if c, err := configz.New("componentconfig"); err == nil {
c.Set(config.KubeProxyConfiguration)
} else {
glog.Errorf("unable to register configz: %s", err)
}
protocol := utiliptables.ProtocolIpv4
if net.ParseIP(config.BindAddress).To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We omit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*config.OOMScoreAdj)); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := resourcecontainer.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
kubeconfig.ContentType = config.ContentType
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeAPIQPS
kubeconfig.Burst = int(config.KubeAPIBurst)
client, err := clientset.NewForConfig(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
proxyMode := getProxyMode(string(config.Mode), client.Core().Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{})
if proxyMode == proxyModeIPTables {
glog.V(0).Info("Using iptables Proxier.")
if config.IPTablesMasqueradeBit == nil {
// IPTablesMasqueradeBit must be specified or defaulted.
return nil, fmt.Errorf("Unable to read IPTablesMasqueradeBit from config")
}
proxierIPTables, err := iptables.NewProxier(iptInterface, utilsysctl.New(), execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, int(*config.IPTablesMasqueradeBit), config.ClusterCIDR, hostname, getNodeIP(client, hostname))
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIPTables
endpointsHandler = proxierIPTables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(0).Info("Tearing down userspace rules.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(0).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
//.........这里部分代码省略.........
示例11: RunProxy
// RunProxy starts the proxy
func (c *NodeConfig) RunProxy(endpointsFilterer FilteringEndpointsConfigHandler) {
// initialize kube proxy
serviceConfig := pconfig.NewServiceConfig()
endpointsConfig := pconfig.NewEndpointsConfig()
loadBalancer := proxy.NewLoadBalancerRR()
if endpointsFilterer == nil {
endpointsConfig.RegisterHandler(loadBalancer)
} else {
endpointsFilterer.SetBaseEndpointsHandler(loadBalancer)
endpointsConfig.RegisterHandler(endpointsFilterer)
}
host, _, err := net.SplitHostPort(c.BindAddress)
if err != nil {
glog.Fatalf("The provided value to bind to must be an ip:port %q", c.BindAddress)
}
ip := net.ParseIP(host)
if ip == nil {
glog.Fatalf("The provided value to bind to must be an ip:port: %q", c.BindAddress)
}
protocol := iptables.ProtocolIpv4
if ip.To4() == nil {
protocol = iptables.ProtocolIpv6
}
syncPeriod, err := time.ParseDuration(c.IPTablesSyncPeriod)
if err != nil {
glog.Fatalf("Cannot parse the provided ip-tables sync period (%s) : %v", c.IPTablesSyncPeriod, err)
}
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(c.Client.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName})
nodeRef := &kapi.ObjectReference{
Kind: "Node",
Name: c.KubeletConfig.NodeName,
}
go util.Forever(func() {
dbus := utildbus.New()
iptables := iptables.New(kexec.New(), dbus, protocol)
proxier, err := proxy.NewProxier(loadBalancer, ip, iptables, util.PortRange{}, syncPeriod)
iptables.AddReloadFunc(proxier.Sync)
if err != nil {
switch {
// conflicting use of iptables, retry
case proxy.IsProxyLocked(err):
glog.Errorf("Unable to start proxy, will retry: %v", err)
return
// on a system without iptables
case strings.Contains(err.Error(), "executable file not found in path"):
glog.V(4).Infof("kube-proxy initialization error: %v", err)
glog.Warningf("WARNING: Could not find the iptables command. The service proxy requires iptables and will be disabled.")
case err == proxy.ErrProxyOnLocalhost:
glog.Warningf("WARNING: The service proxy cannot bind to localhost and will be disabled.")
case strings.Contains(err.Error(), "you must be root"):
glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy.")
default:
glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy: %v", err)
}
select {}
}
pconfig.NewSourceAPI(
c.Client,
10*time.Minute,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"))
serviceConfig.RegisterHandler(proxier)
recorder.Eventf(nodeRef, "Starting", "Starting kube-proxy.")
glog.Infof("Started Kubernetes Proxy on %s", host)
select {}
}, 5*time.Second)
}
示例12: NewProxyServerDefault
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if config.BindAddress.To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
// We ommit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
return &ProxyServer{
Config: config,
IptInterface: iptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != 0 {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = config.KubeApiQps
kubeconfig.Burst = config.KubeApiBurst
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
eventBroadcaster.StartRecordingToSink(client.Events(""))
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
useIptablesProxy := false
if mayTryIptablesProxy(config.ProxyMode, client.Nodes(), hostname) {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err = iptables.ShouldUseIptablesProxier()
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
}
}
if useIptablesProxy {
glog.V(2).Info("Using iptables Proxier.")
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IptablesSyncPeriod, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(loadBalancer, config.BindAddress, iptInterface, config.PortRange, config.IptablesSyncPeriod)
//.........这里部分代码省略.........
示例13: StartNode
func (oc *OvsController) StartNode(mtu uint) error {
err := oc.initSelfSubnet()
if err != nil {
log.Errorf("Failed to get subnet for this host: %v", err)
return err
}
// Assume we are working with IPv4
clusterNetworkCIDR, err := oc.subnetRegistry.GetClusterNetworkCIDR()
if err != nil {
log.Errorf("Failed to obtain ClusterNetwork: %v", err)
return err
}
servicesNetworkCIDR, err := oc.subnetRegistry.GetServicesNetworkCIDR()
if err != nil {
log.Errorf("Failed to obtain ServicesNetwork: %v", err)
return err
}
err = oc.flowController.Setup(oc.localSubnet.SubnetCIDR, clusterNetworkCIDR, servicesNetworkCIDR, mtu)
if err != nil {
return err
}
ipt := iptables.New(kexec.New(), utildbus.New(), iptables.ProtocolIpv4)
err = SetupIptables(ipt, clusterNetworkCIDR)
if err != nil {
return err
}
ipt.AddReloadFunc(func() {
err := SetupIptables(ipt, clusterNetworkCIDR)
if err != nil {
log.Errorf("Error reloading iptables: %v\n", err)
}
})
result, err := oc.watchAndGetResource("HostSubnet")
if err != nil {
return err
}
subnets := result.([]api.Subnet)
for _, s := range subnets {
oc.flowController.AddOFRules(s.NodeIP, s.SubnetCIDR, oc.localIP)
}
if oc.isMultitenant() {
result, err := oc.watchAndGetResource("NetNamespace")
if err != nil {
return err
}
nslist := result.([]api.NetNamespace)
for _, ns := range nslist {
oc.VNIDMap[ns.Name] = ns.NetID
}
result, err = oc.watchAndGetResource("Service")
if err != nil {
return err
}
services := result.([]api.Service)
for _, svc := range services {
netid, found := oc.VNIDMap[svc.Namespace]
if !found {
return fmt.Errorf("Error fetching Net ID for namespace: %s", svc.Namespace)
}
oc.flowController.AddServiceOFRules(netid, svc.IP, svc.Protocol, svc.Port)
}
_, err = oc.watchAndGetResource("Pod")
if err != nil {
return err
}
}
if oc.ready != nil {
close(oc.ready)
}
return nil
}
示例14: NewFlannelHelper
// NewFlannelHelper creates a new flannel helper.
func NewFlannelHelper() *FlannelHelper {
return &FlannelHelper{
subnetFile: flannelSubnetFile,
iptablesHelper: utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4),
}
}
示例15: RunProxy
// RunProxy starts the proxy
func (c *NodeConfig) RunProxy() {
protocol := utiliptables.ProtocolIpv4
bindAddr := net.ParseIP(c.ProxyConfig.BindAddress)
if bindAddr.To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
portRange := utilnet.ParsePortRangeOrDie(c.ProxyConfig.PortRange)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(c.Client.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName})
execer := kexec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
var proxier proxy.ProxyProvider
var endpointsHandler pconfig.EndpointsConfigHandler
switch c.ProxyConfig.Mode {
case componentconfig.ProxyModeIPTables:
glog.V(0).Info("Using iptables Proxier.")
if c.ProxyConfig.IPTablesMasqueradeBit == nil {
// IPTablesMasqueradeBit must be specified or defaulted.
glog.Fatalf("Unable to read IPTablesMasqueradeBit from config")
}
proxierIptables, err := iptables.NewProxier(iptInterface, execer, c.ProxyConfig.IPTablesSyncPeriod.Duration, c.ProxyConfig.MasqueradeAll, int(*c.ProxyConfig.IPTablesMasqueradeBit), c.ProxyConfig.ClusterCIDR)
if err != nil {
if c.Containerized {
glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err)
} else {
glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err)
}
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(0).Info("Tearing down userspace rules.")
userspace.CleanupLeftovers(iptInterface)
case componentconfig.ProxyModeUserspace:
glog.V(0).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(
loadBalancer,
bindAddr,
iptInterface,
*portRange,
c.ProxyConfig.IPTablesSyncPeriod.Duration,
c.ProxyConfig.UDPIdleTimeout.Duration,
)
if err != nil {
if c.Containerized {
glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err)
} else {
glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err)
}
}
proxier = proxierUserspace
// Remove artifacts from the pure-iptables Proxier.
glog.V(0).Info("Tearing down pure-iptables proxy rules.")
iptables.CleanupLeftovers(iptInterface)
default:
glog.Fatalf("Unknown proxy mode %q", c.ProxyConfig.Mode)
}
// Create configs (i.e. Watches for Services and Endpoints)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
serviceConfig := pconfig.NewServiceConfig()
if c.EnableUnidling {
unidlingLoadBalancer := ouserspace.NewLoadBalancerRR()
signaler := unidler.NewEventSignaler(recorder)
unidlingUserspaceProxy, err := unidler.NewUnidlerProxier(unidlingLoadBalancer, bindAddr, iptInterface, execer, *portRange, c.ProxyConfig.IPTablesSyncPeriod.Duration, c.ProxyConfig.UDPIdleTimeout.Duration, signaler)
if err != nil {
if c.Containerized {
glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err)
} else {
glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err)
}
}
hybridProxier, err := hybrid.NewHybridProxier(unidlingLoadBalancer, unidlingUserspaceProxy, endpointsHandler, proxier, c.ProxyConfig.IPTablesSyncPeriod.Duration, serviceConfig)
if err != nil {
if c.Containerized {
glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err)
} else {
glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err)
}
}
endpointsHandler = hybridProxier
iptInterface.AddReloadFunc(hybridProxier.Sync)
//.........这里部分代码省略.........