本文整理汇总了Golang中github.com/qingyuancloud/QingYuan/pkg/client.Client.Pods方法的典型用法代码示例。如果您正苦于以下问题:Golang Client.Pods方法的具体用法?Golang Client.Pods怎么用?Golang Client.Pods使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/qingyuancloud/QingYuan/pkg/client.Client
的用法示例。
在下文中一共展示了Client.Pods方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例2: testHostIP
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, pod *api.Pod) {
ns := "e2e-test-" + string(util.NewUUID())
_, err := createNamespaceIfDoesNotExist(c, ns)
expectNoError(err, fmt.Sprintf("creating namespace %s", ns))
podClient := c.Pods(ns)
By("creating pod")
defer podClient.Delete(pod.Name, nil)
_, err = podClient.Create(pod)
if err != nil {
Fail(fmt.Sprintf("Failed to create pod: %v", err))
}
By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
err = waitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred())
if p.Status.HostIP != "" {
Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
}
if time.Since(t) >= hostIPTimeout {
Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds())
}
Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second)
}
}
示例3: verifyExpectedRcsExistAndGetExpectedPods
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
if err != nil {
return nil, err
}
if len(rcList.Items) != 1 {
return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
rcLabel, len(rcList.Items))
}
for _, rc := range rcList.Items {
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
expectedPods = append(expectedPods, string(pod.UID))
}
}
}
return expectedPods, nil
}
示例4: runSchedulerNoPhantomPodsTest
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "qingyuan/pause",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, nil)
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
示例5: translatePodNameToIpOrFail
func translatePodNameToIpOrFail(c *client.Client, ns string, expectedEndpoints map[string][]int) map[string][]int {
portsByIp := make(map[string][]int)
for name, portList := range expectedEndpoints {
pod, err := c.Pods(ns).Get(name)
if err != nil {
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
}
portsByIp[pod.Status.PodIP] = portList
By(fmt.Sprintf(""))
}
By(fmt.Sprintf("successfully translated pod names to ips: %v -> %v on namespace %s", expectedEndpoints, portsByIp, ns))
return portsByIp
}
示例6: doServiceAccountAPIRequests
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
testSecret := &api.Secret{
ObjectMeta: api.ObjectMeta{Name: "testSecret"},
Data: map[string][]byte{"test": []byte("data")},
}
readOps := []testOperation{
func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err },
func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err },
}
writeOps := []testOperation{
func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
func() error { return c.Secrets(ns).Delete(testSecret.Name) },
}
for _, op := range readOps {
err := op()
unauthorizedError := errors.IsUnauthorized(err)
forbiddenError := errors.IsForbidden(err)
switch {
case !authenticated && !unauthorizedError:
t.Fatalf("expected unauthorized error, got %v", err)
case authenticated && unauthorizedError:
t.Fatalf("unexpected unauthorized error: %v", err)
case authenticated && canRead && forbiddenError:
t.Fatalf("unexpected forbidden error: %v", err)
case authenticated && !canRead && !forbiddenError:
t.Fatalf("expected forbidden error, got: %v", err)
}
}
for _, op := range writeOps {
err := op()
unauthorizedError := errors.IsUnauthorized(err)
forbiddenError := errors.IsForbidden(err)
switch {
case !authenticated && !unauthorizedError:
t.Fatalf("expected unauthorized error, got %v", err)
case authenticated && unauthorizedError:
t.Fatalf("unexpected unauthorized error: %v", err)
case authenticated && canWrite && forbiddenError:
t.Fatalf("unexpected forbidden error: %v", err)
case authenticated && !canWrite && !forbiddenError:
t.Fatalf("expected forbidden error, got: %v", err)
}
}
}
示例7: podScheduled
func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podName)
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
}
if pod.Spec.NodeName == "" {
return false, nil
}
return true, nil
}
}
示例8: podRunning
func podRunning(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podName)
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry, but log the error.
glog.Errorf("Error when reading pod %q: %v", podName, err)
return false, nil
}
if pod.Status.Phase != api.PodRunning {
return false, nil
}
return true, nil
}
}
示例9: startVolumeServer
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func startVolumeServer(client *client.Client, config VolumeTestConfig) *api.Pod {
podClient := client.Pods(config.namespace)
portCount := len(config.serverPorts)
serverPodPorts := make([]api.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.prefix, i)
serverPodPorts[i] = api.ContainerPort{
Name: portName,
ContainerPort: config.serverPorts[i],
Protocol: api.ProtocolTCP,
}
}
By(fmt.Sprint("creating ", config.prefix, " server pod"))
privileged := new(bool)
*privileged = true
serverPod := &api.Pod{
TypeMeta: api.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: api.ObjectMeta{
Name: config.prefix + "-server",
Labels: map[string]string{
"role": config.prefix + "-server",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.prefix + "-server",
Image: config.serverImage,
SecurityContext: &api.SecurityContext{
Privileged: privileged,
},
Ports: serverPodPorts,
},
},
},
}
_, err := podClient.Create(serverPod)
expectNoError(err, "Failed to create %s pod: %v", serverPod.Name, err)
expectNoError(waitForPodRunningInNamespace(client, serverPod.Name, config.namespace))
By("locating the server pod")
pod, err := podClient.Get(serverPod.Name)
expectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
By("sleeping a bit to give the server time to start")
time.Sleep(20 * time.Second)
return pod
}
示例10: volumeTestCleanup
// Clean both server and client pods.
func volumeTestCleanup(client *client.Client, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.prefix))
defer GinkgoRecover()
podClient := client.Pods(config.namespace)
// ignore all errors, the pods may not be even created
podClient.Delete(config.prefix+"-client", nil)
podClient.Delete(config.prefix+"-server", nil)
}
示例11: addEndpointPodOrFail
func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
By(fmt.Sprintf("Adding pod %v in namespace %v", name, ns))
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test",
Image: "qingyuan/pause",
Ports: containerPorts,
},
},
},
}
_, err := c.Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
}
示例12: runLivenessTest
func runLivenessTest(c *client.Client, podDescr *api.Pod, expectRestart bool) {
ns := "e2e-test-" + string(util.NewUUID())
_, err := createNamespaceIfDoesNotExist(c, ns)
expectNoError(err, fmt.Sprintf("creating namespace %s", ns))
By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns))
_, err = c.Pods(ns).Create(podDescr)
expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
c.Pods(ns).Delete(podDescr.Name, nil)
}()
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
expectNoError(waitForPodNotPending(c, ns, podDescr.Name),
fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns))
By(fmt.Sprintf("Started pod %s in namespace %s", podDescr.Name, ns))
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := c.Pods(ns).Get(podDescr.Name)
expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns))
initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
By(fmt.Sprintf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount))
// Wait for at most 48 * 5 = 240s = 4 minutes until restartCount is incremented
restarts := false
for i := 0; i < 48; i++ {
// Wait until restartCount is incremented.
time.Sleep(5 * time.Second)
pod, err = c.Pods(ns).Get(podDescr.Name)
expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name))
restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
By(fmt.Sprintf("Restart count of pod %s in namespace %s is now %d", podDescr.Name, ns, restartCount))
if restartCount > initialRestartCount {
By(fmt.Sprintf("Restart count of pod %s in namespace %s increased from %d to %d during the test", podDescr.Name, ns, initialRestartCount, restartCount))
restarts = true
break
}
}
if restarts != expectRestart {
Fail(fmt.Sprintf("pod %s in namespace %s - expected restarts: %v, found restarts: %v", podDescr.Name, ns, expectRestart, restarts))
}
}
示例13: podsOnMinions
// podsOnMinions returns true when all of the selected pods exist on a minion.
func podsOnMinions(c *client.Client, podNamespace string, labelSelector labels.Selector) wait.ConditionFunc {
// Wait until all pods are running on the node.
return func() (bool, error) {
pods, err := c.Pods(podNamespace).List(labelSelector, fields.Everything())
if err != nil {
glog.Infof("Unable to get pods to list: %v", err)
return false, nil
}
for i := range pods.Items {
pod := pods.Items[i]
podString := fmt.Sprintf("%q/%q", pod.Namespace, pod.Name)
glog.Infof("Check whether pod %q exists on node %q", podString, pod.Spec.NodeName)
if len(pod.Spec.NodeName) == 0 {
glog.Infof("Pod %q is not bound to a host yet", podString)
return false, nil
}
if pod.Status.Phase != api.PodRunning {
return false, nil
}
}
return true, nil
}
}
示例14: createRunningPod
func createRunningPod(wg *sync.WaitGroup, c *client.Client, name, ns, image string, labels map[string]string) {
defer GinkgoRecover()
defer wg.Done()
pod := &api.Pod{
TypeMeta: api.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
},
},
},
}
_, err := c.Pods(ns).Create(pod)
expectNoError(err)
expectNoError(waitForPodRunningInNamespace(c, name, ns))
}
示例15: makePersistentVolume
err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
Expect(err).NotTo(HaveOccurred())
// allow the recycler a chance to catch up. it has to perform NFS scrub, which can be slow in e2e.
waitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second)
pv, err = c.PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred())
if pv.Spec.ClaimRef != nil {
Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv)
}
// The NFS Server pod we're using contains an index.html file
// Verify the file was really scrubbed from the volume
podTemplate := makeCheckPod(ns, serverIP)
checkpod, err := c.Pods(ns).Create(podTemplate)
expectNoError(err, "Failed to create checker pod: %v", err)
err = waitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace)
Expect(err).NotTo(HaveOccurred())
})
})
func makePersistentVolume(serverIP string) *api.PersistentVolume {
return &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "nfs-",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("2Gi"),