本文整理汇总了Golang中k8s/io/kubernetes/pkg/kubelet/cm.NewContainerManager函数的典型用法代码示例。如果您正苦于以下问题:Golang NewContainerManager函数的具体用法?Golang NewContainerManager怎么用?Golang NewContainerManager使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewContainerManager函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: run
//.........这里部分代码省略.........
eventClientConfig.Burst = int(s.EventBurst)
eventClient, err = clientset.NewForConfig(&eventClientConfig)
} else {
if s.RequireKubeConfig {
return fmt.Errorf("invalid kubeconfig: %v", err)
}
if standaloneMode {
glog.Warningf("No API client: %v", err)
}
}
kubeDeps, err = UnsecuredKubeletDeps(s)
if err != nil {
return err
}
kubeDeps.Cloud = cloud
kubeDeps.KubeClient = kubeClient
kubeDeps.EventClient = eventClient
}
if kubeDeps.Auth == nil {
nodeName, err := getNodeName(kubeDeps.Cloud, nodeutil.GetHostname(s.HostnameOverride))
if err != nil {
return err
}
auth, err := buildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration)
if err != nil {
return err
}
kubeDeps.Auth = auth
}
if kubeDeps.CAdvisorInterface == nil {
kubeDeps.CAdvisorInterface, err = cadvisor.New(uint(s.CAdvisorPort), s.ContainerRuntime, s.RootDirectory)
if err != nil {
return err
}
}
if kubeDeps.ContainerManager == nil {
if s.SystemCgroups != "" && s.CgroupRoot == "" {
return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified")
}
kubeDeps.ContainerManager, err = cm.NewContainerManager(
kubeDeps.Mounter,
kubeDeps.CAdvisorInterface,
cm.NodeConfig{
RuntimeCgroupsName: s.RuntimeCgroups,
SystemCgroupsName: s.SystemCgroups,
KubeletCgroupsName: s.KubeletCgroups,
ContainerRuntime: s.ContainerRuntime,
CgroupsPerQOS: s.ExperimentalCgroupsPerQOS,
CgroupRoot: s.CgroupRoot,
CgroupDriver: s.CgroupDriver,
ProtectKernelDefaults: s.ProtectKernelDefaults,
EnableCRI: s.EnableCRI,
},
s.ExperimentalFailSwapOn)
if err != nil {
return err
}
}
if err := checkPermissions(); err != nil {
glog.Error(err)
}
utilruntime.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
// TODO(vmarmol): Do this through container config.
oomAdjuster := kubeDeps.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil {
glog.Warning(err)
}
if err := RunKubelet(&s.KubeletConfiguration, kubeDeps, s.RunOnce, standaloneMode); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go wait.Until(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, wait.NeverStop)
}
if s.RunOnce {
return nil
}
<-done
return nil
}
示例2: Run
// Run runs the specified KubeletServer for the given KubeletConfig. This should never exit.
// The kcfg argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletConfig object and all defaults
// will be ignored.
func Run(s *options.KubeletServer, kcfg *KubeletConfig) error {
var err error
if kcfg == nil {
cfg, err := UnsecuredKubeletConfig(s)
if err != nil {
return err
}
kcfg = cfg
clientConfig, err := CreateAPIServerClientConfig(s)
if err == nil {
kcfg.KubeClient, err = client.New(clientConfig)
// make a separate client for events
eventClientConfig := *clientConfig
eventClientConfig.QPS = s.EventRecordQPS
eventClientConfig.Burst = s.EventBurst
kcfg.EventClient, err = client.New(&eventClientConfig)
}
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
kcfg.Cloud = cloud
}
if kcfg.CAdvisorInterface == nil {
kcfg.CAdvisorInterface, err = cadvisor.New(s.CAdvisorPort)
if err != nil {
return err
}
}
if kcfg.ContainerManager == nil {
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, kcfg.CAdvisorInterface)
if err != nil {
return err
}
}
util.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
glog.V(2).Infof("Using root directory: %v", s.RootDirectory)
// TODO(vmarmol): Do this through container config.
oomAdjuster := kcfg.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Warning(err)
}
if err := RunKubelet(kcfg); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go util.Until(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
if s.RunOnce {
return nil
}
// run forever
select {}
}
示例3: run
func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
if s.ExitOnLockContention && s.LockFilePath == "" {
return errors.New("cannot exit on lock file contention: no lock file specified")
}
done := make(chan struct{})
if s.LockFilePath != "" {
glog.Infof("acquiring lock on %q", s.LockFilePath)
if err := flock.Acquire(s.LockFilePath); err != nil {
return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err)
}
if s.ExitOnLockContention {
glog.Infof("watching for inotify events for: %v", s.LockFilePath)
if err := watchForLockfileContention(s.LockFilePath, done); err != nil {
return err
}
}
}
if c, err := configz.New("componentconfig"); err == nil {
c.Set(s.KubeletConfiguration)
} else {
glog.Errorf("unable to register configz: %s", err)
}
if kcfg == nil {
cfg, err := UnsecuredKubeletConfig(s)
if err != nil {
return err
}
kcfg = cfg
clientConfig, err := CreateAPIServerClientConfig(s)
if err == nil {
kcfg.KubeClient, err = clientset.NewForConfig(clientConfig)
// make a separate client for events
eventClientConfig := *clientConfig
eventClientConfig.QPS = float32(s.EventRecordQPS)
eventClientConfig.Burst = int(s.EventBurst)
kcfg.EventClient, err = clientset.NewForConfig(&eventClientConfig)
}
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
if s.CloudProvider == kubeExternal.AutoDetectCloudProvider {
kcfg.AutoDetectCloudProvider = true
} else {
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
if cloud == nil {
glog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
} else {
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
kcfg.Cloud = cloud
}
}
}
if kcfg.CAdvisorInterface == nil {
kcfg.CAdvisorInterface, err = cadvisor.New(uint(s.CAdvisorPort), kcfg.ContainerRuntime)
if err != nil {
return err
}
}
if kcfg.ContainerManager == nil {
if kcfg.SystemCgroups != "" && kcfg.CgroupRoot == "" {
return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified")
}
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, kcfg.CAdvisorInterface, cm.NodeConfig{
RuntimeCgroupsName: kcfg.RuntimeCgroups,
SystemCgroupsName: kcfg.SystemCgroups,
KubeletCgroupsName: kcfg.KubeletCgroups,
ContainerRuntime: kcfg.ContainerRuntime,
CgroupsPerQOS: kcfg.CgroupsPerQOS,
CgroupRoot: kcfg.CgroupRoot,
})
if err != nil {
return err
}
}
runtime.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
// TODO(vmarmol): Do this through container config.
oomAdjuster := kcfg.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil {
glog.Warning(err)
}
if err := RunKubelet(kcfg); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go wait.Until(func() {
//.........这里部分代码省略.........
示例4: runKubelet
func (s *KubeletExecutorServer) runKubelet(
nodeInfos <-chan executor.NodeInfo,
kubeletDone chan<- struct{},
staticPodsConfigPath string,
apiclient *clientset.Clientset,
podLW *cache.ListWatch,
registry executor.Registry,
executorDone <-chan struct{},
) (err error) {
defer func() {
if err != nil {
// close the channel here. When Run returns without error, the executorKubelet is
// responsible to do this. If it returns with an error, we are responsible here.
close(kubeletDone)
}
}()
kcfg, err := kubeletapp.UnsecuredKubeletConfig(s.KubeletServer)
if err != nil {
return err
}
// apply Mesos specific settings
kcfg.Builder = func(kc *kubeletapp.KubeletConfig) (kubeletapp.KubeletBootstrap, *kconfig.PodConfig, error) {
k, pc, err := kubeletapp.CreateAndInitKubelet(kc)
if err != nil {
return k, pc, err
}
// decorate kubelet such that it shuts down when the executor is
decorated := &executorKubelet{
Kubelet: k.(*kubelet.Kubelet),
kubeletDone: kubeletDone,
executorDone: executorDone,
}
return decorated, pc, nil
}
kcfg.DockerDaemonContainer = "" // don't move the docker daemon into a cgroup
kcfg.Hostname = kcfg.HostnameOverride
kcfg.KubeClient = apiclient
// taken from KubeletServer#Run(*KubeletConfig)
eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
if err != nil {
return err
}
// make a separate client for events
eventClientConfig.QPS = s.EventRecordQPS
eventClientConfig.Burst = s.EventBurst
kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig)
if err != nil {
return err
}
kcfg.NodeName = kcfg.HostnameOverride
kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source
kcfg.StandaloneMode = false
kcfg.SystemContainer = "" // don't take control over other system processes.
if kcfg.Cloud != nil {
// fail early and hard because having the cloud provider loaded would go unnoticed,
// but break bigger cluster because accessing the state.json from every slave kills the master.
panic("cloud provider must not be set")
}
// create custom cAdvisor interface which return the resource values that Mesos reports
ni := <-nodeInfos
cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort)
if err != nil {
return err
}
kcfg.CAdvisorInterface = cAdvisorInterface
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, cAdvisorInterface)
if err != nil {
return err
}
go func() {
for ni := range nodeInfos {
// TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished
log.V(3).Infof("ignoring updated node resources: %v", ni)
}
}()
// create main pod source, it will stop generating events once executorDone is closed
newSourceMesos(executorDone, kcfg.PodConfig.Channel(mesosSource), podLW, registry)
// create static-pods directory file source
log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath)
fileSourceUpdates := kcfg.PodConfig.Channel(kubetypes.FileSource)
kconfig.NewSourceFile(staticPodsConfigPath, kcfg.HostnameOverride, kcfg.FileCheckFrequency, fileSourceUpdates)
// run the kubelet
// NOTE: because kcfg != nil holds, the upstream Run function will not
// initialize the cloud provider. We explicitly wouldn't want
// that because then every kubelet instance would query the master
// state.json which does not scale.
err = kubeletapp.Run(s.KubeletServer, kcfg)
//.........这里部分代码省略.........
示例5: run
func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
if s.LockFilePath != "" {
glog.Infof("aquiring lock on %q", s.LockFilePath)
if err := flock.Acquire(s.LockFilePath); err != nil {
return fmt.Errorf("unable to aquire file lock on %q: %v", s.LockFilePath, err)
}
}
if c, err := configz.New("componentconfig"); err == nil {
c.Set(s.KubeletConfiguration)
} else {
glog.Errorf("unable to register configz: %s", err)
}
if kcfg == nil {
cfg, err := UnsecuredKubeletConfig(s)
if err != nil {
return err
}
kcfg = cfg
clientConfig, err := CreateAPIServerClientConfig(s)
if err == nil {
kcfg.KubeClient, err = clientset.NewForConfig(clientConfig)
// make a separate client for events
eventClientConfig := *clientConfig
eventClientConfig.QPS = s.EventRecordQPS
eventClientConfig.Burst = s.EventBurst
kcfg.EventClient, err = clientset.NewForConfig(&eventClientConfig)
}
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
kcfg.Cloud = cloud
}
if kcfg.CAdvisorInterface == nil {
kcfg.CAdvisorInterface, err = cadvisor.New(s.CAdvisorPort)
if err != nil {
return err
}
}
if kcfg.ContainerManager == nil {
if kcfg.SystemCgroups != "" && kcfg.CgroupRoot == "" {
return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified")
}
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, kcfg.CAdvisorInterface, cm.NodeConfig{
RuntimeCgroupsName: kcfg.RuntimeCgroups,
SystemCgroupsName: kcfg.SystemCgroups,
KubeletCgroupsName: kcfg.KubeletCgroups,
ContainerRuntime: kcfg.ContainerRuntime,
})
if err != nil {
return err
}
}
runtime.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
glog.V(2).Infof("Using root directory: %v", s.RootDirectory)
// TODO(vmarmol): Do this through container config.
oomAdjuster := kcfg.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Warning(err)
}
if err := RunKubelet(kcfg); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go wait.Until(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(s.HealthzPort)), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, wait.NeverStop)
}
if s.RunOnce {
return nil
}
// run forever
select {}
}
示例6: runKubelet
func (s *KubeletExecutorServer) runKubelet(execUpdates <-chan kubetypes.PodUpdate, nodeInfos <-chan executor.NodeInfo, kubeletDone chan<- struct{},
staticPodsConfigPath string, apiclient *client.Client) error {
kcfg, err := s.UnsecuredKubeletConfig()
if err == nil {
// apply Messo specific settings
executorDone := make(chan struct{})
kcfg.Builder = func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) {
k, pc, err := app.CreateAndInitKubelet(kc)
if err != nil {
return k, pc, err
}
klet := k.(*kubelet.Kubelet)
s.kletLock.Lock()
s.klet = klet
s.kletLock.Unlock()
// decorate kubelet such that it shuts down when the executor is
decorated := &executorKubelet{
Kubelet: klet,
kubeletDone: kubeletDone,
executorDone: executorDone,
}
return decorated, pc, nil
}
kcfg.DockerDaemonContainer = "" // don't move the docker daemon into a cgroup
kcfg.Hostname = kcfg.HostnameOverride
kcfg.KubeClient = apiclient
kcfg.NodeName = kcfg.HostnameOverride
kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source
kcfg.StandaloneMode = false
kcfg.SystemContainer = "" // don't take control over other system processes.
if kcfg.Cloud != nil {
// fail early and hard because having the cloud provider loaded would go unnoticed,
// but break bigger cluster because accessing the state.json from every slave kills the master.
panic("cloud provider must not be set")
}
// create custom cAdvisor interface which return the resource values that Mesos reports
ni := <-nodeInfos
cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort)
if err != nil {
return err
}
kcfg.CAdvisorInterface = cAdvisorInterface
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, cAdvisorInterface)
if err != nil {
return err
}
go func() {
for ni := range nodeInfos {
// TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished
log.V(3).Infof("ignoring updated node resources: %v", ni)
}
}()
// create main pod source
updates := kcfg.PodConfig.Channel(MESOS_CFG_SOURCE)
go func() {
// execUpdates will be closed by the executor on shutdown
defer close(executorDone)
for u := range execUpdates {
u.Source = MESOS_CFG_SOURCE
updates <- u
}
}()
// create static-pods directory file source
log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath)
fileSourceUpdates := kcfg.PodConfig.Channel(kubetypes.FileSource)
kconfig.NewSourceFile(staticPodsConfigPath, kcfg.HostnameOverride, kcfg.FileCheckFrequency, fileSourceUpdates)
// run the kubelet, until execUpdates is closed
// NOTE: because kcfg != nil holds, the upstream Run function will not
// initialize the cloud provider. We explicitly wouldn't want
// that because then every kubelet instance would query the master
// state.json which does not scale.
err = s.KubeletServer.Run(kcfg)
}
if err != nil {
// close the channel here. When Run returns without error, the executorKubelet is
// responsible to do this. If it returns with an error, we are responsible here.
close(kubeletDone)
}
return err
}
示例7: runKubelet
func (s *KubeletExecutorServer) runKubelet(
nodeInfos <-chan executor.NodeInfo,
kubeletDone chan<- struct{},
staticPodsConfigPath string,
apiclient *clientset.Clientset,
podLW *cache.ListWatch,
registry executor.Registry,
executorDone <-chan struct{},
) (err error) {
defer func() {
if err != nil {
// close the channel here. When Run returns without error, the executorKubelet is
// responsible to do this. If it returns with an error, we are responsible here.
close(kubeletDone)
}
}()
kubeDeps, err := kubeletapp.UnsecuredKubeletDeps(s.KubeletServer)
if err != nil {
return err
}
// apply Mesos specific settings
kubeDeps.Builder = func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, standaloneMode bool) (kubelet.KubeletBootstrap, error) {
k, err := kubeletapp.CreateAndInitKubelet(kubeCfg, kubeDeps, standaloneMode)
if err != nil {
return k, err
}
// decorate kubelet such that it shuts down when the executor is
decorated := &executorKubelet{
Kubelet: k.(*kubelet.Kubelet),
kubeletDone: kubeletDone,
executorDone: executorDone,
}
return decorated, nil
}
s.RuntimeCgroups = "" // don't move the docker daemon into a cgroup
kubeDeps.KubeClient = apiclient
// taken from KubeletServer#Run(*KubeletConfig)
eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
if err != nil {
return err
}
// make a separate client for events
eventClientConfig.QPS = float32(s.EventRecordQPS)
eventClientConfig.Burst = int(s.EventBurst)
kubeDeps.EventClient, err = clientset.NewForConfig(eventClientConfig)
if err != nil {
return err
}
kubeDeps.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kubeDeps.Recorder) // override the default pod source
s.SystemCgroups = "" // don't take control over other system processes.
if kubeDeps.Cloud != nil {
// fail early and hard because having the cloud provider loaded would go unnoticed,
// but break bigger cluster because accessing the state.json from every slave kills the master.
panic("cloud provider must not be set")
}
// create custom cAdvisor interface which return the resource values that Mesos reports
ni := <-nodeInfos
cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, uint(s.CAdvisorPort), s.ContainerRuntime)
if err != nil {
return err
}
kubeDeps.CAdvisorInterface = cAdvisorInterface
kubeDeps.ContainerManager, err = cm.NewContainerManager(kubeDeps.Mounter, cAdvisorInterface, cm.NodeConfig{
RuntimeCgroupsName: s.RuntimeCgroups,
SystemCgroupsName: s.SystemCgroups,
KubeletCgroupsName: s.KubeletCgroups,
ContainerRuntime: s.ContainerRuntime,
})
if err != nil {
return err
}
go func() {
for ni := range nodeInfos {
// TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished
log.V(3).Infof("ignoring updated node resources: %v", ni)
}
}()
// create main pod source, it will stop generating events once executorDone is closed
var containerOptions []podsource.Option
if s.containerID != "" {
// tag all pod containers with the containerID so that they can be properly GC'd by Mesos
containerOptions = append(containerOptions, podsource.ContainerEnvOverlay([]api.EnvVar{
{Name: envContainerID, Value: s.containerID},
}))
kubeDeps.ContainerRuntimeOptions = append(kubeDeps.ContainerRuntimeOptions,
dockertools.PodInfraContainerEnv(map[string]string{
envContainerID: s.containerID,
//.........这里部分代码省略.........