当前位置: 首页>>代码示例>>Golang>>正文


Golang framework.GetPauseImageNameForHostArch函数代码示例

本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.GetPauseImageNameForHostArch函数的典型用法代码示例。如果您正苦于以下问题:Golang GetPauseImageNameForHostArch函数的具体用法?Golang GetPauseImageNameForHostArch怎么用?Golang GetPauseImageNameForHostArch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了GetPauseImageNameForHostArch函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: runDensitySeqTest

// runDensitySeqTest runs the density sequential pod creation test
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string) (time.Duration, []framework.PodLatencyData) {
	const (
		podType               = "density_test_pod"
		sleepBeforeCreatePods = 30 * time.Second
	)
	bgPods := newTestPods(testArg.bgPodsNr, framework.GetPauseImageNameForHostArch(), "background_pod")
	testPods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)

	By("Creating a batch of background pods")

	// CreatBatch is synchronized, all pods are running when it returns
	f.PodClient().CreateBatch(bgPods)

	time.Sleep(sleepBeforeCreatePods)

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
	defer rc.Stop()

	// Create pods sequentially (back-to-back). e2eLags have been sorted.
	batchlag, e2eLags := createBatchPodSequential(f, testPods)

	// Log throughput data.
	logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo)

	return batchlag, e2eLags
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:29,代码来源:density_test.go

示例2: createIdlePod

func createIdlePod(podName string, podClient *framework.PodClient) {
	podClient.Create(&api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			RestartPolicy: api.RestartPolicyNever,
			Containers: []api.Container{
				{
					Image: framework.GetPauseImageNameForHostArch(),
					Name:  podName,
				},
			},
		},
	})
}
开发者ID:Q-Lee,项目名称:kubernetes,代码行数:16,代码来源:disk_eviction_test.go

示例3: makePodSpec

func makePodSpec() api.PodSpec {
	return api.PodSpec{
		Containers: []api.Container{{
			Name:  "pause",
			Image: e2e.GetPauseImageNameForHostArch(),
			Ports: []api.ContainerPort{{ContainerPort: 80}},
			Resources: api.ResourceRequirements{
				Limits: api.ResourceList{
					api.ResourceCPU:    resource.MustParse("100m"),
					api.ResourceMemory: resource.MustParse("500Mi"),
				},
				Requests: api.ResourceList{
					api.ResourceCPU:    resource.MustParse("100m"),
					api.ResourceMemory: resource.MustParse("500Mi"),
				},
			},
		}},
	}
}
开发者ID:olegshaldybin,项目名称:kubernetes,代码行数:19,代码来源:util.go

示例4:

	// This is a very simple test that exercises the Kubelet's mounter code path.
	// If the mount fails, the pod will not be able to run, and CreateSync will timeout.
	It("should be able to mount an emptydir on a container", func() {
		pod := &v1.Pod{
			TypeMeta: metav1.TypeMeta{
				Kind:       "Pod",
				APIVersion: "v1",
			},
			ObjectMeta: metav1.ObjectMeta{
				Name: "simple-mount-pod",
			},
			Spec: v1.PodSpec{
				Containers: []v1.Container{
					{
						Name:  "simple-mount-container",
						Image: framework.GetPauseImageNameForHostArch(),
						VolumeMounts: []v1.VolumeMount{
							{
								Name:      "simply-mounted-volume",
								MountPath: "/opt/",
							},
						},
					},
				},
				Volumes: []v1.Volume{
					{
						Name: "simply-mounted-volume",
						VolumeSource: v1.VolumeSource{
							EmptyDir: &v1.EmptyDirVolumeSource{
								Medium: "Memory",
							},
开发者ID:kubernetes,项目名称:kubernetes,代码行数:31,代码来源:simple_mount.go

示例5:

		podCount            = 100
		podCreationInterval = 100 * time.Millisecond
		recoverTimeout      = 5 * time.Minute
		startTimeout        = 3 * time.Minute
		// restartCount is chosen so even with minPods we exhaust the default
		// allocation of a /24.
		minPods      = 50
		restartCount = 6
	)

	f := framework.NewDefaultFramework("restart-test")
	Context("Docker Daemon", func() {
		Context("Network", func() {
			It("should recover from ip leak", func() {

				pods := newTestPods(podCount, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
				By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
				createBatchPodWithRateControl(f, pods, podCreationInterval)
				defer deletePodsSync(f, pods)

				// Give the node some time to stabilize, assume pods that enter RunningReady within
				// startTimeout fit on the node and the node is now saturated.
				runningPods := waitForPods(f, podCount, startTimeout)
				if len(runningPods) < minPods {
					framework.Failf("Failed to start %d pods, cannot test that restarting docker doesn't leak IPs", minPods)
				}

				for i := 0; i < restartCount; i += 1 {
					By(fmt.Sprintf("Restarting Docker Daemon iteration %d", i))

					// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:31,代码来源:restart_test.go

示例6: runDensityBatchTest

// runDensityBatchTest runs the density batch pod creation test
func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest, testInfo map[string]string,
	isLogTimeSeries bool) (time.Duration, []framework.PodLatencyData) {
	const (
		podType               = "density_test_pod"
		sleepBeforeCreatePods = 30 * time.Second
	)
	var (
		mutex      = &sync.Mutex{}
		watchTimes = make(map[string]metav1.Time, 0)
		stopCh     = make(chan struct{})
	)

	// create test pod data structure
	pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)

	// the controller watches the change of pod status
	controller := newInformerWatchPod(f, mutex, watchTimes, podType)
	go controller.Run(stopCh)
	defer close(stopCh)

	// TODO(coufon): in the test we found kubelet starts while it is busy on something, as a result 'syncLoop'
	// does not response to pod creation immediately. Creating the first pod has a delay around 5s.
	// The node status has already been 'ready' so `wait and check node being ready does not help here.
	// Now wait here for a grace period to let 'syncLoop' be ready
	time.Sleep(sleepBeforeCreatePods)

	rc.Start()
	// Explicitly delete pods to prevent namespace controller cleanning up timeout
	defer deletePodsSync(f, append(pods, getCadvisorPod()))
	defer rc.Stop()

	By("Creating a batch of pods")
	// It returns a map['pod name']'creation time' containing the creation timestamps
	createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)

	By("Waiting for all Pods to be observed by the watch...")

	Eventually(func() bool {
		return len(watchTimes) == testArg.podsNr
	}, 10*time.Minute, 10*time.Second).Should(BeTrue())

	if len(watchTimes) < testArg.podsNr {
		framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
	}

	// Analyze results
	var (
		firstCreate metav1.Time
		lastRunning metav1.Time
		init        = true
		e2eLags     = make([]framework.PodLatencyData, 0)
	)

	for name, create := range createTimes {
		watch, ok := watchTimes[name]
		Expect(ok).To(Equal(true))

		e2eLags = append(e2eLags,
			framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})

		if !init {
			if firstCreate.Time.After(create.Time) {
				firstCreate = create
			}
			if lastRunning.Time.Before(watch.Time) {
				lastRunning = watch
			}
		} else {
			init = false
			firstCreate, lastRunning = create, watch
		}
	}

	sort.Sort(framework.LatencySlice(e2eLags))
	batchLag := lastRunning.Time.Sub(firstCreate.Time)

	// Log time series data.
	if isLogTimeSeries {
		logDensityTimeSeries(rc, createTimes, watchTimes, testInfo)
	}
	// Log throughput data.
	logPodCreateThroughput(batchLag, e2eLags, testArg.podsNr, testInfo)

	return batchLag, e2eLags
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:86,代码来源:density_test.go

示例7: runEvictionTest


//.........这里部分代码省略.........
							priorityPod = p
						}
					}
					Expect(priorityPod).NotTo(BeNil())

					// Check eviction ordering.
					// Note: it is alright for a priority 1 and priority 2 pod (for example) to fail in the same round
					for _, lowPriorityPodSpec := range podTestSpecs {
						var lowPriorityPod v1.Pod
						for _, p := range updatedPods {
							if p.Name == lowPriorityPodSpec.pod.Name {
								lowPriorityPod = p
							}
						}
						Expect(lowPriorityPod).NotTo(BeNil())
						if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
							Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
								fmt.Sprintf("%s pod failed before %s pod", priorityPodSpec.pod.Name, lowPriorityPodSpec.pod.Name))
						}
					}

					// EvictionPriority 0 pods should not fail
					if priorityPodSpec.evictionPriority == 0 {
						Expect(priorityPod.Status.Phase).NotTo(Equal(v1.PodFailed),
							fmt.Sprintf("%s pod failed (and shouldn't have failed)", priorityPod.Name))
					}

					// If a pod that is not evictionPriority 0 has not been evicted, we are not done
					if priorityPodSpec.evictionPriority != 0 && priorityPod.Status.Phase != v1.PodFailed {
						done = false
					}
				}
				if done {
					return nil
				}
				return fmt.Errorf("pods that caused %s have not been evicted.", testCondition)
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			// We observe pressure from the API server.  The eviction manager observes pressure from the kubelet internal stats.
			// This means the eviction manager will observe pressure before we will, creating a delay between when the eviction manager
			// evicts a pod, and when we observe the pressure by querrying the API server.  Add a delay here to account for this delay
			By("making sure pressure from test has surfaced before continuing")
			time.Sleep(pressureDelay)

			By("making sure conditions eventually return to normal")
			Eventually(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return fmt.Errorf("Conditions havent returned to normal, we still have %s", testCondition)
				}
				return nil
			}, evictionTestTimeout, evictionPollInterval).Should(BeNil())

			By("making sure conditions do not return")
			Consistently(func() error {
				hasPressure, err := hasPressureCondition(f, testCondition)
				framework.ExpectNoError(err, fmt.Sprintf("checking if we have %s", testCondition))
				if hasPressure {
					return fmt.Errorf("%s dissappeared and then reappeared", testCondition)
				}
				return nil
			}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())

			By("making sure we can start a new pod after the test")
			podName := "test-admit-pod"
			f.PodClient().CreateSync(&v1.Pod{
				ObjectMeta: v1.ObjectMeta{
					Name: podName,
				},
				Spec: v1.PodSpec{
					RestartPolicy: v1.RestartPolicyNever,
					Containers: []v1.Container{
						{
							Image: framework.GetPauseImageNameForHostArch(),
							Name:  podName,
						},
					},
				},
			})
		})

		AfterEach(func() {
			By("deleting pods")
			for _, spec := range podTestSpecs {
				By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
				f.PodClient().DeleteSync(spec.pod.Name, &v1.DeleteOptions{}, podDisappearTimeout)
			}

			if CurrentGinkgoTestDescription().Failed {
				if framework.TestContext.DumpLogsOnFailure {
					logPodEvents(f)
					logNodeEvents(f)
				}
				By("sleeping to allow for cleanup of test")
				time.Sleep(postTestConditionMonitoringPeriod)
			}
		})
	})
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:101,代码来源:inode_eviction_test.go


注:本文中的k8s/io/kubernetes/test/e2e/framework.GetPauseImageNameForHostArch函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。