本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/oom.NewOomAdjuster函数的典型用法代码示例。如果您正苦于以下问题:Golang NewOomAdjuster函数的具体用法?Golang NewOomAdjuster怎么用?Golang NewOomAdjuster使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewOomAdjuster函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: ensureDockerInContainer
// Ensures that the Docker daemon is in the desired container.
func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manager *fs.Manager) error {
// What container is Docker in?
out, err := exec.Command("pidof", "docker").Output()
if err != nil {
return fmt.Errorf("failed to find pid of Docker container: %v", err)
}
// The output of pidof is a list of pids.
// Docker may be forking and thus there would be more than one result.
pids := []int{}
for _, pidStr := range strings.Split(strings.TrimSpace(string(out)), " ") {
pid, err := strconv.Atoi(pidStr)
if err != nil {
continue
}
pids = append(pids, pid)
}
// Move if the pid is not already in the desired container.
errs := []error{}
for _, pid := range pids {
cont, err := getContainer(pid)
if err != nil {
errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
}
if cont != manager.Cgroups.Name {
err = manager.Apply(pid)
if err != nil {
errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name))
}
}
// Also apply oom-score-adj to processes
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil {
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
}
}
return errors.NewAggregate(errs)
}
示例2: Run
// Run runs the specified KubeletServer for the given KubeletConfig. This should never exit.
// The kcfg argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletConfig object and all defaults
// will be ignored.
func (s *KubeletServer) Run(kcfg *KubeletConfig) error {
if kcfg == nil {
cfg, err := s.UnsecuredKubeletConfig()
if err != nil {
return err
}
kcfg = cfg
clientConfig, err := s.CreateAPIServerClientConfig()
if err == nil {
kcfg.KubeClient, err = client.New(clientConfig)
}
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
kcfg.Cloud = cloud
}
if kcfg.CadvisorInterface == nil {
ca, err := cadvisor.New(s.CadvisorPort)
if err != nil {
return err
}
kcfg.CadvisorInterface = ca
}
util.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
glog.V(2).Infof("Using root directory: %v", s.RootDirectory)
// TODO(vmarmol): Do this through container config.
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Warning(err)
}
if err := RunKubelet(kcfg, nil); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go util.Until(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
if s.RunOnce {
return nil
}
// run forever
select {}
}
示例3: Run
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run(_ []string) error {
protocol := utiliptables.ProtocolIpv4
if s.BindAddress.To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// remove iptables rules and exit
if s.CleanupAndExit {
execer := exec.New()
ipt := utiliptables.New(execer, protocol)
encounteredError := userspace.CleanupLeftovers(ipt)
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
// TODO(vmarmol): Use container config for this.
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
// Run in its own container.
if err := util.RunInResourceContainer(s.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer)
}
// define api config source
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
client, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Add event recorder
Hostname := nodeutil.GetHostname(s.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
s.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: Hostname})
eventBroadcaster.StartRecordingToSink(client.Events(""))
s.nodeRef = &api.ObjectReference{
Kind: "Node",
Name: Hostname,
UID: types.UID(Hostname),
Namespace: "",
}
// Birth Cry
s.birthCry()
serviceConfig := config.NewServiceConfig()
endpointsConfig := config.NewEndpointsConfig()
var proxier proxy.ProxyProvider
var endpointsHandler config.EndpointsConfigHandler
// guaranteed false on error, error only necessary for debugging
shouldUseIptables, err := iptables.ShouldUseIptablesProxier()
if err != nil {
glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err)
}
if !s.ForceUserspaceProxy && shouldUseIptables {
glog.V(2).Info("Using iptables Proxier.")
execer := exec.New()
ipt := utiliptables.New(execer, protocol)
proxierIptables, err := iptables.NewProxier(ipt, execer, s.SyncPeriod, s.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(ipt)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
//.........这里部分代码省略.........
示例4: Run
// Run runs the specified KubeletExecutorServer.
func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
rand.Seed(time.Now().UTC().UnixNano())
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
log.Info(err)
}
// derive the executor cgroup and use it as docker container cgroup root
mesosCgroup := findMesosCgroup(s.cgroupPrefix)
s.cgroupRoot = mesosCgroup
log.V(2).Infof("passing cgroup %q to the kubelet as cgroup root", s.CgroupRoot)
// empty string for the docker and system containers (= cgroup paths). This
// stops the kubelet taking any control over other system processes.
s.SystemContainer = ""
s.DockerDaemonContainer = ""
// We set kubelet container to its own cgroup below the executor cgroup.
// In contrast to the docker and system container, this has no other
// undesired side-effects.
s.ResourceContainer = mesosCgroup + "/kubelet"
// create apiserver client
var apiclient *client.Client
clientConfig, err := s.CreateAPIServerClientConfig()
if err == nil {
apiclient, err = client.New(clientConfig)
}
if err != nil {
// required for k8sm since we need to send api.Binding information
// back to the apiserver
log.Fatalf("No API client: %v", err)
}
log.Infof("Using root directory: %v", s.RootDirectory)
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
shutdownCloser, err := s.syncExternalShutdownWatcher()
if err != nil {
return err
}
cadvisorInterface, err := cadvisor.New(s.CadvisorPort)
if err != nil {
return err
}
imageGCPolicy := kubelet.ImageGCPolicy{
HighThresholdPercent: s.ImageGCHighThresholdPercent,
LowThresholdPercent: s.ImageGCLowThresholdPercent,
}
diskSpacePolicy := kubelet.DiskSpacePolicy{
DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
RootFreeDiskMB: s.LowDiskSpaceThresholdMB,
}
//TODO(jdef) intentionally NOT initializing a cloud provider here since:
//(a) the kubelet doesn't actually use it
//(b) we don't need to create N-kubelet connections to zookeeper for no good reason
//cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
//log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
if err != nil {
return err
}
tlsOptions, err := s.InitializeTLS()
if err != nil {
return err
}
mounter := mount.New()
if s.Containerized {
log.V(2).Info("Running kubelet in containerized mode (experimental)")
mounter = &mount.NsenterMounter{}
}
var dockerExecHandler dockertools.ExecHandler
switch s.DockerExecHandlerName {
case "native":
dockerExecHandler = &dockertools.NativeExecHandler{}
case "nsenter":
dockerExecHandler = &dockertools.NsenterExecHandler{}
default:
log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
dockerExecHandler = &dockertools.NativeExecHandler{}
}
kcfg := app.KubeletConfig{
Address: s.Address,
AllowPrivileged: s.AllowPrivileged,
HostNetworkSources: hostNetworkSources,
HostnameOverride: s.HostnameOverride,
RootDirectory: s.RootDirectory,
// ConfigFile: ""
// ManifestURL: ""
FileCheckFrequency: s.FileCheckFrequency,
//.........这里部分代码省略.........
示例5: NewProxyServerDefault
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *ProxyServerConfig) (*ProxyServer, error) {
protocol := utiliptables.ProtocolIpv4
if config.BindAddress.To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// We ommit creation of pretty much everything if we run in cleanup mode
if config.CleanupAndExit {
execer := exec.New()
dbus := utildbus.New()
IptInterface := utiliptables.New(execer, dbus, protocol)
return &ProxyServer{
Config: config,
IptInterface: IptInterface,
}, nil
}
// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OomAdjuster
if config.OOMScoreAdj != 0 {
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
if config.ResourceContainer != "" {
// Run in its own container.
if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
}
}
// Create a Kube Client
// define api config source
if config.Kubeconfig == "" && config.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
if err != nil {
return nil, err
}
client, err := kubeclient.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Create event recorder
hostname := nodeutil.GetHostname(config.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
eventBroadcaster.StartRecordingToSink(client.Events(""))
// Create a iptables utils.
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
var proxier proxy.ProxyProvider
var endpointsHandler proxyconfig.EndpointsConfigHandler
useIptablesProxy := false
if mayTryIptablesProxy(config.ProxyMode, client.Nodes(), hostname) {
var err error
// guaranteed false on error, error only necessary for debugging
useIptablesProxy, err = iptables.ShouldUseIptablesProxier()
if err != nil {
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
}
}
if useIptablesProxy {
glog.V(2).Info("Using iptables Proxier.")
execer := exec.New()
proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.SyncPeriod, config.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(iptInterface)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
proxierUserspace, err := userspace.NewProxier(loadBalancer, config.BindAddress, iptInterface, config.PortRange, config.SyncPeriod, config.UDPIdleTimeout)
if err != nil {
//.........这里部分代码省略.........
示例6: Run
// Run runs the specified ProxyServer. This should never exit.
func (s *ProxyServer) Run(_ []string) error {
// TODO(vmarmol): Use container config for this.
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
// Run in its own container.
if err := util.RunInResourceContainer(s.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer)
}
serviceConfig := config.NewServiceConfig()
endpointsConfig := config.NewEndpointsConfig()
protocol := iptables.ProtocolIpv4
if s.BindAddress.To4() == nil {
protocol = iptables.ProtocolIpv6
}
loadBalancer := userspace.NewLoadBalancerRR()
proxier, err := userspace.NewProxier(loadBalancer, s.BindAddress, iptables.New(exec.New(), protocol), s.PortRange)
if err != nil {
glog.Fatalf("Unable to create proxer: %v", err)
}
// Wire proxier to handle changes to services
serviceConfig.RegisterHandler(proxier)
// And wire loadBalancer to handle changes to endpoints to services
endpointsConfig.RegisterHandler(loadBalancer)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
// define api config source
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
client, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
config.NewSourceAPI(
client,
30*time.Second,
serviceConfig.Channel("api"),
endpointsConfig.Channel("api"),
)
if s.HealthzPort > 0 {
go util.Forever(func() {
err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second)
}
// Just loop forever for now...
proxier.SyncLoop()
return nil
}