本文整理汇总了Golang中k8s/io/kubernetes/pkg/kubelet/cm.NewStubContainerManager函数的典型用法代码示例。如果您正苦于以下问题:Golang NewStubContainerManager函数的具体用法?Golang NewStubContainerManager怎么用?Golang NewStubContainerManager使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewStubContainerManager函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
config := HollowNodeConfig{}
config.addFlags(pflag.CommandLine)
flag.InitFlags()
if !knownMorphs.Has(config.Morph) {
glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List())
}
// create a client to communicate with API server.
clientConfig, err := config.createClientConfigFromFile()
if err != nil {
glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err)
}
cl, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a Client: %v. Exiting.", err)
}
clientset, err := internalclientset.NewForConfig(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err)
}
if config.Morph == "kubelet" {
cadvisorInterface := new(cadvisortest.Fake)
containerManager := cm.NewStubContainerManager()
fakeDockerClient := dockertools.NewFakeDockerClient()
fakeDockerClient.EnableSleep = true
hollowKubelet := kubemark.NewHollowKubelet(
config.NodeName,
clientset,
cadvisorInterface,
fakeDockerClient,
config.KubeletPort,
config.KubeletReadOnlyPort,
containerManager,
maxPods,
podsPerCore,
)
hollowKubelet.Run()
}
if config.Morph == "proxy" {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName})
iptInterface := fakeiptables.NewFake()
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder)
hollowProxy.Run()
}
}
示例2: TestRunOnce
func TestRunOnce(t *testing.T) {
cadvisor := &cadvisor.Mock{}
cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient())
diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
fakeRuntime := &kubecontainer.FakeRuntime{}
basePath, err := ioutil.TempDir(os.TempDir(), "kubelet")
if err != nil {
t.Fatalf("can't make a temp rootdir %v", err)
}
defer os.RemoveAll(basePath)
kb := &Kubelet{
rootDirectory: basePath,
recorder: &record.FakeRecorder{},
cadvisor: cadvisor,
nodeLister: testNodeLister{},
nodeInfo: testNodeInfo{},
statusManager: status.NewManager(nil, podManager),
containerRefManager: kubecontainer.NewRefManager(),
podManager: podManager,
os: kubecontainer.FakeOS{},
volumeManager: newVolumeManager(),
diskSpaceManager: diskSpaceManager,
containerRuntime: fakeRuntime,
}
kb.containerManager = cm.NewStubContainerManager()
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
if err := kb.setupDataDirs(); err != nil {
t.Errorf("Failed to init data dirs: %v", err)
}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
},
},
}
podManager.SetPods(pods)
results, err := kb.runOnce(pods, time.Millisecond)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if results[0].Err != nil {
t.Errorf("unexpected run pod error: %v", results[0].Err)
}
if results[0].Pod.Name != "foo" {
t.Errorf("unexpected pod: %q", results[0].Pod.Name)
}
}
示例3: main
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
config := HollowNodeConfig{}
config.addFlags(pflag.CommandLine)
util.InitFlags()
if !knownMorphs.Has(config.Morph) {
glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List())
}
// create a client to communicate with API server.
cl, err := createClientFromFile(config.KubeconfigPath)
clientset := clientset.FromUnversionedClient(cl)
if err != nil {
glog.Fatal("Failed to create a Client. Exiting.")
}
if config.Morph == "kubelet" {
cadvisorInterface := new(cadvisortest.Fake)
containerManager := cm.NewStubContainerManager()
fakeDockerClient := dockertools.NewFakeDockerClient()
fakeDockerClient.VersionInfo = docker.Env{"Version=1.1.3", "ApiVersion=1.18"}
fakeDockerClient.EnableSleep = true
hollowKubelet := kubemark.NewHollowKubelet(
config.NodeName,
clientset,
cadvisorInterface,
fakeDockerClient,
config.KubeletPort,
config.KubeletReadOnlyPort,
containerManager,
maxPods,
)
hollowKubelet.Run()
}
if config.Morph == "proxy" {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName})
iptInterface := fakeiptables.NewFake()
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder)
hollowProxy.Run()
}
}
示例4: SetFakeContainerManagerInterfaceForIntegrationTest
// SetFakeContainerManagerInterfaceForIntegrationTest sets a fake container manager implementation to allow the node to run in integration tests
func SetFakeContainerManagerInterfaceForIntegrationTest() {
defaultContainerManagerInterface = cm.NewStubContainerManager()
}
示例5: TestRunOnce
func TestRunOnce(t *testing.T) {
cadvisor := &cadvisortest.Mock{}
cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 600 * mb,
}, nil)
cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 9 * mb,
Capacity: 10 * mb,
}, nil)
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())
diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
fakeRuntime := &containertest.FakeRuntime{}
basePath, err := utiltesting.MkTmpdir("kubelet")
if err != nil {
t.Fatalf("can't make a temp rootdir %v", err)
}
defer os.RemoveAll(basePath)
kb := &Kubelet{
rootDirectory: basePath,
recorder: &record.FakeRecorder{},
cadvisor: cadvisor,
nodeLister: testNodeLister{},
nodeInfo: testNodeInfo{},
statusManager: status.NewManager(nil, podManager),
containerRefManager: kubecontainer.NewRefManager(),
podManager: podManager,
os: &containertest.FakeOS{},
diskSpaceManager: diskSpaceManager,
containerRuntime: fakeRuntime,
reasonCache: NewReasonCache(),
clock: util.RealClock{},
kubeClient: &fake.Clientset{},
hostname: testKubeletHostname,
nodeName: testKubeletHostname,
}
kb.containerManager = cm.NewStubContainerManager()
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
kb.volumePluginMgr, err =
NewInitializedVolumePluginMgr(kb, []volume.VolumePlugin{plug})
if err != nil {
t.Fatalf("failed to initialize VolumePluginMgr: %v", err)
}
kb.volumeManager, err = kubeletvolume.NewVolumeManager(
true,
kb.hostname,
kb.podManager,
kb.kubeClient,
kb.volumePluginMgr,
fakeRuntime)
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR)
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
volumeStatsAggPeriod := time.Second * 10
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
nodeRef := &api.ObjectReference{
Kind: "Node",
Name: kb.nodeName,
UID: types.UID(kb.nodeName),
Namespace: "",
}
fakeKillPodFunc := func(pod *api.Pod, podStatus api.PodStatus, gracePeriodOverride *int64) error {
return nil
}
evictionManager, evictionAdmitHandler, err := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, kb.recorder, nodeRef, kb.clock)
if err != nil {
t.Fatalf("failed to initialize eviction manager: %v", err)
}
kb.evictionManager = evictionManager
kb.AddPodAdmitHandler(evictionAdmitHandler)
if err := kb.setupDataDirs(); err != nil {
t.Errorf("Failed to init data dirs: %v", err)
}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
},
},
}
podManager.SetPods(pods)
// The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While
// the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass.
// Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime.
// This is also a meaningless test, because the isPodRunning will also always return true after setting this. However,
// because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
// TODO(random-liu) Fix the test, make it meaningful.
fakeRuntime.PodStatus = kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{
//.........这里部分代码省略.........
示例6: startComponents
//.........这里部分代码省略.........
masterConfig.ReadWritePort = portNumber
masterConfig.PublicAddress = hostIP
masterConfig.CacheTimeout = 2 * time.Second
// Create a master and install handlers into mux.
m := master.New(masterConfig)
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run()
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpointcontroller.NewEndpointController(clientset, controller.NoResyncPeriodFunc).
Run(3, util.NeverStop)
// TODO: Write an integration test for the replication controllers watch.
go replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas).
Run(3, util.NeverStop)
nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
nodeController.Run(5 * time.Second)
cadvisorInterface := new(cadvisor.Fake)
// Kubelet (localhost)
testRootDir := integration.MakeTempDirOrDie("kubelet_integ_1.", "")
configFilePath := integration.MakeTempDirOrDie("config", testRootDir)
glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
cm := cm.NewStubContainerManager()
kcfg := kubeletapp.SimpleKubelet(
clientset,
fakeDocker1,
"localhost",
testRootDir,
firstManifestURL,
"127.0.0.1",
10250, /* KubeletPort */
0, /* ReadOnlyPort */
api.NamespaceDefault,
empty_dir.ProbeVolumePlugins(),
nil,
cadvisorInterface,
configFilePath,
nil,
kubecontainer.FakeOS{},
1*time.Second, /* FileCheckFrequency */
1*time.Second, /* HTTPCheckFrequency */
10*time.Second, /* MinimumGCAge */
3*time.Second, /* NodeStatusUpdateFrequency */
10*time.Second, /* SyncFrequency */
10*time.Second, /* OutOfDiskTransitionFrequency */
40, /* MaxPods */
cm, net.ParseIP("127.0.0.1"))
kubeletapp.RunKubelet(kcfg)
// Kubelet (machine)
// Create a second kubelet so that the guestbook example's two redis slaves both
// have a place they can schedule.
testRootDir = integration.MakeTempDirOrDie("kubelet_integ_2.", "")
glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
kcfg = kubeletapp.SimpleKubelet(
clientset,
fakeDocker2,
"127.0.0.1",
testRootDir,
secondManifestURL,
"127.0.0.1",
10251, /* KubeletPort */
0, /* ReadOnlyPort */
api.NamespaceDefault,
empty_dir.ProbeVolumePlugins(),
nil,
cadvisorInterface,
"",
nil,
kubecontainer.FakeOS{},
1*time.Second, /* FileCheckFrequency */
1*time.Second, /* HTTPCheckFrequency */
10*time.Second, /* MinimumGCAge */
3*time.Second, /* NodeStatusUpdateFrequency */
10*time.Second, /* SyncFrequency */
10*time.Second, /* OutOfDiskTransitionFrequency */
40, /* MaxPods */
cm,
net.ParseIP("127.0.0.1"))
kubeletapp.RunKubelet(kcfg)
return apiServer.URL, configFilePath
}
示例7: startComponents
//.........这里部分代码省略.........
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
ReadWritePort: portNumber,
PublicAddress: hostIP,
CacheTimeout: 2 * time.Second,
StorageVersions: storageVersions,
})
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, nil)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run()
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpointcontroller.NewEndpointController(cl, controller.NoResyncPeriodFunc).
Run(3, util.NeverStop)
// TODO: Write an integration test for the replication controllers watch.
go replicationcontroller.NewReplicationManager(cl, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas).
Run(3, util.NeverStop)
nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
nodeController.Run(5 * time.Second)
cadvisorInterface := new(cadvisor.Fake)
// Kubelet (localhost)
testRootDir := integration.MakeTempDirOrDie("kubelet_integ_1.", "")
configFilePath := integration.MakeTempDirOrDie("config", testRootDir)
glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.20"}
cm := cm.NewStubContainerManager()
kcfg := kubeletapp.SimpleKubelet(
cl,
fakeDocker1,
"localhost",
testRootDir,
firstManifestURL,
"127.0.0.1",
10250, /* KubeletPort */
0, /* ReadOnlyPort */
api.NamespaceDefault,
empty_dir.ProbeVolumePlugins(),
nil,
cadvisorInterface,
configFilePath,
nil,
kubecontainer.FakeOS{},
1*time.Second, /* FileCheckFrequency */
1*time.Second, /* HTTPCheckFrequency */
10*time.Second, /* MinimumGCAge */
3*time.Second, /* NodeStatusUpdateFrequency */
10*time.Second, /* SyncFrequency */
40, /* MaxPods */
cm)
kubeletapp.RunKubelet(kcfg)
// Kubelet (machine)
// Create a second kubelet so that the guestbook example's two redis slaves both
// have a place they can schedule.
testRootDir = integration.MakeTempDirOrDie("kubelet_integ_2.", "")
glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.20"}
kcfg = kubeletapp.SimpleKubelet(
cl,
fakeDocker2,
"127.0.0.1",
testRootDir,
secondManifestURL,
"127.0.0.1",
10251, /* KubeletPort */
0, /* ReadOnlyPort */
api.NamespaceDefault,
empty_dir.ProbeVolumePlugins(),
nil,
cadvisorInterface,
"",
nil,
kubecontainer.FakeOS{},
1*time.Second, /* FileCheckFrequency */
1*time.Second, /* HTTPCheckFrequency */
10*time.Second, /* MinimumGCAge */
3*time.Second, /* NodeStatusUpdateFrequency */
10*time.Second, /* SyncFrequency */
40, /* MaxPods */
cm)
kubeletapp.RunKubelet(kcfg)
return apiServer.URL, configFilePath
}
示例8: TestRunOnce
func TestRunOnce(t *testing.T) {
cadvisor := &cadvisor.Mock{}
cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
cadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 600 * mb,
}, nil)
cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 9 * mb,
Capacity: 10 * mb,
}, nil)
podManager := kubepod.NewBasicPodManager(kubepod.NewFakeMirrorClient())
diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{})
fakeRuntime := &kubecontainer.FakeRuntime{}
basePath, err := utiltesting.MkTmpdir("kubelet")
if err != nil {
t.Fatalf("can't make a temp rootdir %v", err)
}
defer os.RemoveAll(basePath)
kb := &Kubelet{
rootDirectory: basePath,
recorder: &record.FakeRecorder{},
cadvisor: cadvisor,
nodeLister: testNodeLister{},
nodeInfo: testNodeInfo{},
statusManager: status.NewManager(nil, podManager),
containerRefManager: kubecontainer.NewRefManager(),
podManager: podManager,
os: kubecontainer.FakeOS{},
volumeManager: newVolumeManager(),
diskSpaceManager: diskSpaceManager,
containerRuntime: fakeRuntime,
reasonCache: NewReasonCache(),
clock: util.RealClock{},
}
kb.containerManager = cm.NewStubContainerManager()
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
if err := kb.setupDataDirs(); err != nil {
t.Errorf("Failed to init data dirs: %v", err)
}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
},
},
}
podManager.SetPods(pods)
// The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While
// the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass.
// Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime.
// This is also a meaningless test, because the isPodRunning will also always return true after setting this. However,
// because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
// TODO(random-liu) Fix the test, make it meaningful.
fakeRuntime.PodStatus = kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{
{
Name: "bar",
State: kubecontainer.ContainerStateRunning,
},
},
}
results, err := kb.runOnce(pods, time.Millisecond)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if results[0].Err != nil {
t.Errorf("unexpected run pod error: %v", results[0].Err)
}
if results[0].Pod.Name != "foo" {
t.Errorf("unexpected pod: %q", results[0].Pod.Name)
}
}