本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.Framework.PodClient方法的典型用法代码示例。如果您正苦于以下问题:Golang Framework.PodClient方法的具体用法?Golang Framework.PodClient怎么用?Golang Framework.PodClient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/test/e2e/framework.Framework
的用法示例。
在下文中一共展示了Framework.PodClient方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: runDensitySeqTest
// runDensitySeqTest runs the density sequential pod creation test
func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg densityTest) (time.Duration, []framework.PodLatencyData) {
const (
podType = "density_test_pod"
sleepBeforeCreatePods = 30 * time.Second
)
bgPods := newTestPods(testArg.bgPodsNr, ImageRegistry[pauseImage], "background_pod")
testPods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], podType)
By("Creating a batch of background pods")
// CreatBatch is synchronized, all pods are running when it returns
f.PodClient().CreateBatch(bgPods)
time.Sleep(sleepBeforeCreatePods)
rc.Start()
// Explicitly delete pods to prevent namespace controller cleanning up timeout
defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
defer rc.Stop()
// Create pods sequentially (back-to-back). e2eLags have been sorted.
batchlag, e2eLags := createBatchPodSequential(f, testPods)
// Log throughput data.
logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testArg.getTestName())
return batchlag, e2eLags
}
示例2: volumeTestCleanup
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.prefix))
defer GinkgoRecover()
podClient := f.PodClient()
err := podClient.Delete(config.prefix+"-client", nil)
if err != nil {
// Log the error before failing test: if the test has already failed,
// framework.ExpectNoError() won't print anything to logs!
glog.Warningf("Failed to delete client pod: %v", err)
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
}
if config.serverImage != "" {
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so client can stop and unmount")
time.Sleep(20 * time.Second)
err = podClient.Delete(config.prefix+"-server", nil)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
}
}
}
示例3: reportLogsFromFluentdPod
func reportLogsFromFluentdPod(f *framework.Framework) error {
synthLoggerPod, err := f.PodClient().Get(synthLoggerPodName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Failed to get synth logger pod due to %v", err)
}
synthLoggerNodeName := synthLoggerPod.Spec.NodeName
if synthLoggerNodeName == "" {
return errors.New("Synthlogger pod is not assigned to the node")
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
options := v1.ListOptions{LabelSelector: label.String()}
fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
for _, fluentdPod := range fluentdPods.Items {
if fluentdPod.Spec.NodeName == synthLoggerNodeName {
containerName := fluentdPod.Spec.Containers[0].Name
logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName)
if err != nil {
return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err)
}
framework.Logf("Logs from fluentd pod %s:\n%s", fluentdPod.Name, logs)
return nil
}
}
return fmt.Errorf("Failed to find fluentd pod running on node %s", synthLoggerNodeName)
}
示例4: runAppArmorTest
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
pod := createPodWithAppArmor(f, profile)
framework.ExpectNoError(f.WaitForPodNoLongerRunning(pod.Name))
p, err := f.PodClient().Get(pod.Name)
framework.ExpectNoError(err)
return p.Status
}
示例5: runAppArmorTest
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
pod := createPodWithAppArmor(f, profile)
if shouldRun {
// The pod needs to start before it stops, so wait for the longer start timeout.
framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
} else {
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
w, err := f.PodClient().Watch(v1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
framework.ExpectNoError(err)
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
switch e.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
}
switch t := e.Object.(type) {
case *v1.Pod:
if t.Status.Reason == "AppArmor" {
return true, nil
}
}
return false, nil
})
framework.ExpectNoError(err)
}
p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return p.Status
}
示例6: runLivenessTest
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout)
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount {
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount)
}
}
observedRestarts = restartCount - initialRestartCount
if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
// Stop if we have observed more than expectNumRestarts restarts.
break
}
lastRestartCount = restartCount
}
// If we expected 0 restarts, fail if observed any restart.
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) {
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts)
}
}
示例7: runAppArmorTest
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
pod := createPodWithAppArmor(f, profile)
// The pod needs to start before it stops, so wait for the longer start timeout.
framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
f.Client, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
p, err := f.PodClient().Get(pod.Name)
framework.ExpectNoError(err)
return p.Status
}
示例8: createBatchPodWithRateControl
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
// between creations there is an interval for throughput control
func createBatchPodWithRateControl(f *framework.Framework, pods []*api.Pod, interval time.Duration) map[string]unversioned.Time {
createTimes := make(map[string]unversioned.Time)
for _, pod := range pods {
createTimes[pod.ObjectMeta.Name] = unversioned.Now()
go f.PodClient().Create(pod)
time.Sleep(interval)
}
return createTimes
}
示例9: createBatchPodSequential
// createBatchPodSequential creats pods back-to-back in sequence.
func createBatchPodSequential(f *framework.Framework, pods []*api.Pod) (time.Duration, []framework.PodLatencyData) {
batchStartTime := unversioned.Now()
e2eLags := make([]framework.PodLatencyData, 0)
for _, pod := range pods {
create := unversioned.Now()
f.PodClient().CreateSync(pod)
e2eLags = append(e2eLags,
framework.PodLatencyData{Name: pod.Name, Latency: unversioned.Now().Time.Sub(create.Time)})
}
batchLag := unversioned.Now().Time.Sub(batchStartTime.Time)
sort.Sort(framework.LatencySlice(e2eLags))
return batchLag, e2eLags
}
示例10: createMemhogPod
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res api.ResourceRequirements) *api.Pod {
env := []api.EnvVar{
{
Name: "MEMORY_LIMIT",
ValueFrom: &api.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
}
// If there is a limit specified, pass 80% of it for -mem-total, otherwise use the downward API
// to pass limits.memory, which will be the total memory available.
// This helps prevent a guaranteed pod from triggering an OOM kill due to it's low memory limit,
// which will cause the test to fail inappropriately.
var memLimit string
if limit, ok := res.Limits["memory"]; ok {
memLimit = strconv.Itoa(int(
float64(limit.Value()) * 0.8))
} else {
memLimit = "$(MEMORY_LIMIT)"
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
GenerateName: genName,
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyNever,
Containers: []api.Container{
{
Name: ctnName,
Image: "gcr.io/google-containers/stress:v1",
ImagePullPolicy: "Always",
Env: env,
// 60 min timeout * 60s / tick per 10s = 360 ticks before timeout => ~11.11Mi/tick
// to fill ~4Gi of memory, so initial ballpark 12Mi/tick.
// We might see flakes due to timeout if the total memory on the nodes increases.
Args: []string{"-mem-alloc-size", "12Mi", "-mem-alloc-sleep", "10s", "-mem-total", memLimit},
Resources: res,
},
},
},
}
// The generated pod.Name will be on the pod spec returned by CreateSync
pod = f.PodClient().CreateSync(pod)
glog.Infof("pod created with name: %s", pod.Name)
return pod
}
示例11: runResourceUsageTest
// runResourceUsageTest runs the resource usage test
func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg resourceTest) {
const (
// The monitoring time for one test
monitoringTime = 10 * time.Minute
// The periodic reporting period
reportingPeriod = 5 * time.Minute
// sleep for an interval here to measure steady data
sleepAfterCreatePods = 10 * time.Second
)
pods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], "test_pod")
rc.Start()
// Explicitly delete pods to prevent namespace controller cleanning up timeout
defer deletePodsSync(f, append(pods, getCadvisorPod()))
defer rc.Stop()
By("Creating a batch of Pods")
f.PodClient().CreateBatch(pods)
// wait for a while to let the node be steady
time.Sleep(sleepAfterCreatePods)
// Log once and flush the stats.
rc.LogLatest()
rc.Reset()
By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met.
// Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine
// for the current test duration, but we should reclaim the
// entries if we plan to monitor longer (e.g., 8 hours).
deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now())
framework.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
time.Sleep(reportingPeriod)
}
logPods(f.Client)
}
By("Reporting overall resource usage")
logPods(f.Client)
}
示例12: deletePodsSync
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
var wg sync.WaitGroup
for _, pod := range pods {
wg.Add(1)
go func(pod *v1.Pod) {
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
}(pod)
}
wg.Wait()
return
}
示例13: createPodWithAppArmor
func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
Annotations: map[string]string{
apparmor.ContainerAnnotationKeyPrefix + "test": profile,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"touch", "foo"},
}},
RestartPolicy: v1.RestartPolicyNever,
},
}
return f.PodClient().Create(pod)
}
示例14: createPodWithAppArmor
func createPodWithAppArmor(f *framework.Framework, profile string) *api.Pod {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)),
Annotations: map[string]string{
"container.apparmor.security.alpha.kubernetes.io/test": profile,
},
},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "test",
Image: ImageRegistry[busyBoxImage],
Command: []string{"touch", "foo"},
}},
RestartPolicy: api.RestartPolicyNever,
},
}
return f.PodClient().Create(pod)
}
示例15: createSynthLogger
func createSynthLogger(f *framework.Framework, linesCount int) {
f.PodClient().Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: synthLoggerPodName,
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: synthLoggerPodName,
Image: "gcr.io/google_containers/busybox:1.24",
// notice: the subshell syntax is escaped with `$$`
Command: []string{"/bin/sh", "-c", fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo $i; i=`expr $i + 1`; done", linesCount)},
},
},
},
})
}