当前位置: 首页>>代码示例>>Golang>>正文


Golang framework.GetPauseImageName函数代码示例

本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.GetPauseImageName函数的典型用法代码示例。如果您正苦于以下问题:Golang GetPauseImageName函数的具体用法?Golang GetPauseImageName怎么用?Golang GetPauseImageName使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了GetPauseImageName函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: createOutOfDiskPod

// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
	podClient := c.Pods(ns)

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "pause",
					Image: framework.GetPauseImageName(c),
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							// Request enough CPU to fit only two pods on a given node.
							api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	_, err := podClient.Create(pod)
	framework.ExpectNoError(err)
}
开发者ID:CodeJuan,项目名称:kubernetes,代码行数:27,代码来源:nodeoutofdisk.go

示例2: runServiceLatencies

func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
	cfg := testutils.RCConfig{
		Client:       f.Client,
		Image:        framework.GetPauseImageName(f.Client),
		Name:         "svc-latency-rc",
		Namespace:    f.Namespace.Name,
		Replicas:     1,
		PollInterval: time.Second,
	}
	if err := framework.RunRC(cfg); err != nil {
		return nil, err
	}

	// Run a single watcher, to reduce the number of API calls we have to
	// make; this is to minimize the timing error. It's how kube-proxy
	// consumes the endpoints data, so it seems like the right thing to
	// test.
	endpointQueries := newQuerier()
	startEndpointWatcher(f, endpointQueries)
	defer close(endpointQueries.stop)

	// run one test and throw it away-- this is to make sure that the pod's
	// ready status has propagated.
	singleServiceLatency(f, cfg.Name, endpointQueries)

	// These channels are never closed, and each attempt sends on exactly
	// one of these channels, so the sum of the things sent over them will
	// be exactly total.
	errs := make(chan error, total)
	durations := make(chan time.Duration, total)

	blocker := make(chan struct{}, inParallel)
	for i := 0; i < total; i++ {
		go func() {
			defer GinkgoRecover()
			blocker <- struct{}{}
			defer func() { <-blocker }()
			if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil {
				errs <- err
			} else {
				durations <- d
			}
		}()
	}

	errCount := 0
	for i := 0; i < total; i++ {
		select {
		case e := <-errs:
			framework.Logf("Got error: %v", e)
			errCount += 1
		case d := <-durations:
			output = append(output, d)
		}
	}
	if errCount != 0 {
		return output, fmt.Errorf("got %v errors", errCount)
	}
	return output, nil
}
开发者ID:miminar,项目名称:kubernetes,代码行数:60,代码来源:service_latency.go

示例3: initPausePod

func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
	if conf.Affinity != "" {
		if conf.Annotations == nil {
			conf.Annotations = map[string]string{
				api.AffinityAnnotationKey: conf.Affinity,
			}
		} else {
			conf.Annotations[api.AffinityAnnotationKey] = conf.Affinity
		}
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:        conf.Name,
			Labels:      conf.Labels,
			Annotations: conf.Annotations,
		},
		Spec: api.PodSpec{
			NodeSelector: conf.NodeSelector,
			Containers: []api.Container{
				{
					Name:  podName,
					Image: framework.GetPauseImageName(f.Client),
				},
			},
		},
	}
	if conf.Resources != nil {
		pod.Spec.Containers[0].Resources = *conf.Resources
	}
	return pod
}
开发者ID:Random-Liu,项目名称:kubernetes,代码行数:31,代码来源:scheduler_predicates.go

示例4: runSchedulerNoPhantomPodsTest

func runSchedulerNoPhantomPodsTest(client *client.Client) {
	pod := &api.Pod{
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "c1",
					Image: e2e.GetPauseImageName(client),
					Ports: []api.ContainerPort{
						{ContainerPort: 1234, HostPort: 9999},
					},
					ImagePullPolicy: api.PullIfNotPresent,
				},
			},
		},
	}

	// Assuming we only have two kubelets, the third pod here won't schedule
	// if the scheduler doesn't correctly handle the delete for the second
	// pod.
	pod.ObjectMeta.Name = "phantom.foo"
	foo, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, foo.Namespace, foo.Name)); err != nil {
		glog.Fatalf("FAILED: pod never started running %v", err)
	}

	pod.ObjectMeta.Name = "phantom.bar"
	bar, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, bar.Namespace, bar.Name)); err != nil {
		glog.Fatalf("FAILED: pod never started running %v", err)
	}

	// Delete a pod to free up room.
	glog.Infof("Deleting pod %v", bar.Name)
	err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
	if err != nil {
		glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
	}

	pod.ObjectMeta.Name = "phantom.baz"
	baz, err := client.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		glog.Fatalf("Failed to create pod: %v, %v", pod, err)
	}
	if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, baz.Namespace, baz.Name)); err != nil {
		if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
			glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v, err: %v", pod, err)
		} else {
			glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: err: %v, perr: %v", err, perr)
		}
	}

	glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
开发者ID:ncdc,项目名称:origin,代码行数:59,代码来源:integration.go

示例5: createPod

func createPod(client clientset.Interface, name string, annotation map[string]string) *api.Pod {
	return &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation},
		Spec: api.PodSpec{
			Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
		},
	}
}
开发者ID:neujie,项目名称:kubernetes,代码行数:8,代码来源:scheduler_test.go

示例6: runResourceTrackingTest

func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
	expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(framework.RunRC(testutils.RCConfig{
		Client:         f.ClientSet,
		InternalClient: f.InternalClientset,
		Name:           rcName,
		Namespace:      f.Namespace.Name,
		Image:          framework.GetPauseImageName(f.ClientSet),
		Replicas:       totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	rm.LogLatest()
	rm.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling framework.ResourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		timeLeft := deadline.Sub(time.Now())
		framework.Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(f.ClientSet, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(f.ClientSet, nodeNames.List())
	usageSummary, err := rm.GetLatest()
	Expect(err).NotTo(HaveOccurred())
	// TODO(random-liu): Remove the original log when we migrate to new perfdash
	framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
	// Log perf result
	framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
	verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)

	cpuSummary := rm.GetCPUSummary()
	framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
	// Log perf result
	framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
	verifyCPULimits(expectedCPU, cpuSummary)

	By("Deleting the RC")
	framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:58,代码来源:kubelet_perf.go

示例7: createPod

func createPod(client clientset.Interface, name string, scheduler string) *v1.Pod {
	return &v1.Pod{
		ObjectMeta: metav1.ObjectMeta{Name: name},
		Spec: v1.PodSpec{
			Containers:    []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
			SchedulerName: scheduler,
		},
	}
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:9,代码来源:scheduler_test.go

示例8: SpreadServiceOrFail

// Check that the pods comprising a service get spread evenly across available zones
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
	// First create the service
	serviceName := "test-service"
	serviceSpec := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      serviceName,
			Namespace: f.Namespace.Name,
		},
		Spec: api.ServiceSpec{
			Selector: map[string]string{
				"service": serviceName,
			},
			Ports: []api.ServicePort{{
				Port:       80,
				TargetPort: intstr.FromInt(80),
			}},
		},
	}
	_, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec)
	Expect(err).NotTo(HaveOccurred())

	// Now create some pods behind the service
	podSpec := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:   serviceName,
			Labels: map[string]string{"service": serviceName},
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "test",
					Image: framework.GetPauseImageName(f.Client),
				},
			},
		},
	}

	// Caution: StartPods requires at least one pod to replicate.
	// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
	// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
	// test for replicaCount > 0. Otherwise, StartPods panics.
	framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)

	// Wait for all of them to be scheduled
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
	pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
	Expect(err).NotTo(HaveOccurred())

	// Now make sure they're spread across zones
	zoneNames, err := getZoneNames(f.Client)
	Expect(err).NotTo(HaveOccurred())
	Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
}
开发者ID:shmurata,项目名称:kubernetes,代码行数:54,代码来源:ubernetes_lite.go

示例9: ensurePodsAreRemovedWhenNamespaceIsDeleted

func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
	var err error

	By("Creating a test namespace")
	namespace, err := f.CreateNamespace("nsdeletetest", nil)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for a default service account to be provisioned in namespace")
	err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
	Expect(err).NotTo(HaveOccurred())

	By("Creating a pod in the namespace")
	pod := &v1.Pod{
		ObjectMeta: v1.ObjectMeta{
			Name: "test-pod",
		},
		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{
					Name:  "nginx",
					Image: framework.GetPauseImageName(f.ClientSet),
				},
			},
		},
	}
	pod, err = f.ClientSet.Core().Pods(namespace.Name).Create(pod)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for the pod to have running status")
	framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))

	By("Deleting the namespace")
	err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil)
	Expect(err).NotTo(HaveOccurred())

	By("Waiting for the namespace to be removed.")
	maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
	framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
		func() (bool, error) {
			_, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
			if err != nil && errors.IsNotFound(err) {
				return true, nil
			}
			return false, nil
		}))

	By("Verifying there is no pod in the namespace")
	_, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
	Expect(err).To(HaveOccurred())
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:50,代码来源:namespace.go

示例10: ReserveCpu

func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
	By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
	request := int64(millicores / replicas)
	config := &testutils.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    defaultTimeout,
		Image:      framework.GetPauseImageName(f.Client),
		Replicas:   replicas,
		CpuRequest: request,
	}
	framework.ExpectNoError(framework.RunRC(*config))
}
开发者ID:Random-Liu,项目名称:kubernetes,代码行数:14,代码来源:cluster_size_autoscaling.go

示例11: DoTestPodScheduling

func DoTestPodScheduling(t *testing.T, restClient *client.Client) {
	goodCondition := api.NodeCondition{
		Type:              api.NodeReady,
		Status:            api.ConditionTrue,
		Reason:            fmt.Sprintf("schedulable condition"),
		LastHeartbeatTime: unversioned.Time{time.Now()},
	}
	node := &api.Node{
		Spec: api.NodeSpec{Unschedulable: false},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
			},
			Conditions: []api.NodeCondition{goodCondition},
		},
	}

	for ii := 0; ii < 5; ii++ {
		node.Name = fmt.Sprintf("machine%d", ii+1)
		if _, err := restClient.Nodes().Create(node); err != nil {
			t.Fatalf("Failed to create nodes: %v", err)
		}
	}

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
		Spec: api.PodSpec{
			Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}},
		},
	}

	myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod)
	if err != nil {
		t.Fatalf("Failed to create pod: %v", err)
	}

	err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(restClient, myPod.Namespace, myPod.Name))
	if err != nil {
		t.Fatalf("Failed to schedule pod: %v", err)
	}

	if myPod, err := restClient.Pods(api.NamespaceDefault).Get(myPod.Name); err != nil {
		t.Fatalf("Failed to get pod: %v", err)
	} else if myPod.Spec.NodeName != "machine3" {
		t.Fatalf("Failed to schedule using extender, expected machine3, got %v", myPod.Spec.NodeName)
	}
	t.Logf("Scheduled pod using extenders")
}
开发者ID:XbinZh,项目名称:kubernetes,代码行数:48,代码来源:extender_test.go

示例12: CreateHostPortPods

func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves host port"))
	config := &testutils.RCConfig{
		Client:    f.Client,
		Name:      id,
		Namespace: f.Namespace.Name,
		Timeout:   defaultTimeout,
		Image:     framework.GetPauseImageName(f.Client),
		Replicas:  replicas,
		HostPorts: map[string]int{"port1": 4321},
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}
}
开发者ID:Random-Liu,项目名称:kubernetes,代码行数:16,代码来源:cluster_size_autoscaling.go

示例13: ReserveMemory

func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
	request := int64(1024 * 1024 * megabytes / replicas)
	config := &testutils.RCConfig{
		Client:     f.Client,
		Name:       id,
		Namespace:  f.Namespace.Name,
		Timeout:    defaultTimeout,
		Image:      framework.GetPauseImageName(f.Client),
		Replicas:   replicas,
		MemRequest: request,
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}
}
开发者ID:Random-Liu,项目名称:kubernetes,代码行数:17,代码来源:cluster_size_autoscaling.go

示例14: CreateNodeSelectorPods

func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) {
	By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))

	config := &testutils.RCConfig{
		Client:       f.Client,
		Name:         "node-selector",
		Namespace:    f.Namespace.Name,
		Timeout:      defaultTimeout,
		Image:        framework.GetPauseImageName(f.Client),
		Replicas:     replicas,
		HostPorts:    map[string]int{"port1": 4321},
		NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
	}
	err := framework.RunRC(*config)
	if expectRunning {
		framework.ExpectNoError(err)
	}
}
开发者ID:Random-Liu,项目名称:kubernetes,代码行数:18,代码来源:cluster_size_autoscaling.go

示例15: newTestPod

// newTestPod returns a pod that has the specified requests and limits
func newTestPod(f *framework.Framework, name string, requests api.ResourceList, limits api.ResourceList) *api.Pod {
	return &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "pause",
					Image: framework.GetPauseImageName(f.ClientSet),
					Resources: api.ResourceRequirements{
						Requests: requests,
						Limits:   limits,
					},
				},
			},
		},
	}
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:20,代码来源:limit_range.go


注:本文中的k8s/io/kubernetes/test/e2e/framework.GetPauseImageName函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。