本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.WaitForPodNotPending函数的典型用法代码示例。如果您正苦于以下问题:Golang WaitForPodNotPending函数的具体用法?Golang WaitForPodNotPending怎么用?Golang WaitForPodNotPending使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WaitForPodNotPending函数的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: runLivenessTest
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout)
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount {
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount)
}
}
observedRestarts = restartCount - initialRestartCount
if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
// Stop if we have observed more than expectNumRestarts restarts.
break
}
lastRestartCount = restartCount
}
// If we expected 0 restarts, fail if observed any restart.
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) {
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts)
}
}
示例2:
By("Trying to relaunch the pod, now with labels.")
labelPodName := "with-labels"
pod := createPausePod(f, pausePodConfig{
Name: labelPodName,
NodeSelector: map[string]string{
"kubernetes.io/hostname": nodeName,
k: v,
},
})
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion))
labelPod, err := c.Pods(ns).Get(labelPodName)
framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
})
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
It("validates that NodeAffinity is respected if not matching", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
framework.WaitForStableCluster(c, masterNodes)
createPausePod(f, pausePodConfig{
Name: podName,
示例3:
},
NodeSelector: map[string]string{
"kubernetes.io/hostname": nodeName,
k: v,
},
},
})
framework.ExpectNoError(err)
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
// check that pod got scheduled. We intentionally DO NOT check that the
// pod is running because this will create a race condition with the
// kubelet and the scheduler: the scheduler might have scheduled a pod
// already when the kubelet does not know about its new label yet. The
// kubelet will then refuse to launch the pod.
framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName))
labelPod, err := c.Pods(ns).Get(labelPodName)
framework.ExpectNoError(err)
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
})
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
It("validates that NodeAffinity is respected if not matching", func() {
By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
waitForStableCluster(c)
_, err := c.Pods(ns).Create(&api.Pod{
TypeMeta: unversioned.TypeMeta{
示例4: getNsCmdFlag
queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name))
// create a pod in each namespace
for _, ns := range namespaces {
framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie()
}
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
// that we cannot wait for the pods to be running because our pods terminate by themselves.
for _, ns := range namespaces {
err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName)
framework.ExpectNoError(err)
}
// wait for pods to print their result
for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
})
func getNsCmdFlag(ns *api.Namespace) string {
return fmt.Sprintf("--namespace=%v", ns.Name)
}