本文整理汇总了Golang中vulcan/kubernetes/pkg/client/unversioned.Client.Pods方法的典型用法代码示例。如果您正苦于以下问题:Golang Client.Pods方法的具体用法?Golang Client.Pods怎么用?Golang Client.Pods使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类vulcan/kubernetes/pkg/client/unversioned.Client
的用法示例。
在下文中一共展示了Client.Pods方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: testHostIP
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
podClient := c.Pods(ns)
By("creating pod")
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create pod: %v", err)
}
By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
err := waitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred())
if p.Status.HostIP != "" {
Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
}
if time.Since(t) >= hostIPTimeout {
Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds())
}
Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second)
}
}
示例2: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例3: verifyExpectedRcsExistAndGetExpectedPods
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
if err != nil {
return nil, err
}
if len(rcList.Items) != 1 {
return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
rcLabel, len(rcList.Items))
}
for _, rc := range rcList.Items {
podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, string(pod.UID))
}
}
}
return expectedPods, nil
}
示例4: podsCreated
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
timeout := 2 * time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
pods, err := c.Pods(ns).List(label, fields.Everything())
if err != nil {
return nil, err
}
created := []api.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if len(created) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
示例5: waitForPodRunning
func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) (status api.PodPhase, err error) {
for {
pod, err := c.Pods(pod.Namespace).Get(pod.Name)
if err != nil {
return api.PodUnknown, err
}
ready := false
if pod.Status.Phase == api.PodRunning {
ready = true
for _, status := range pod.Status.ContainerStatuses {
if !status.Ready {
ready = false
break
}
}
if ready {
return api.PodRunning, nil
}
}
if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
return pod.Status.Phase, nil
}
fmt.Fprintf(out, "Waiting for pod %s/%s to be running, status is %s, pod ready: %v\n", pod.Namespace, pod.Name, pod.Status.Phase, ready)
time.Sleep(2 * time.Second)
continue
}
}
示例6: runSchedulerNoPhantomPodsTest
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "kubernetes/pause",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v", pod)
} else {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
示例7: newPodOnNode
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error {
pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
if err == nil {
Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else {
Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
}
return err
}
示例8: cleanupPods
func cleanupPods(c *client.Client, ns string) {
By("Removing all pods in namespace " + ns)
pods, err := c.Pods(ns).List(labels.Everything(), fields.Everything())
expectNoError(err)
opt := api.NewDeleteOptions(0)
for _, p := range pods.Items {
expectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt))
}
}
示例9: translatePodNameToUIDOrFail
func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints PortsByPodName) PortsByPodUID {
portsByUID := make(PortsByPodUID)
for name, portList := range expectedEndpoints {
pod, err := c.Pods(ns).Get(name)
if err != nil {
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
}
portsByUID[pod.ObjectMeta.UID] = portList
}
// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
return portsByUID
}
示例10: getContainerRestarts
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) {
pods, err := c.Pods(ns).List(labelSelector, fields.Everything())
expectNoError(err)
failedContainers := 0
containerRestartNodes := sets.NewString()
for _, p := range pods.Items {
for _, v := range FailedContainers(&p) {
failedContainers = failedContainers + v.restarts
containerRestartNodes.Insert(p.Spec.NodeName)
}
}
return failedContainers, containerRestartNodes.List()
}
示例11: doServiceAccountAPIRequests
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
testSecret := &api.Secret{
ObjectMeta: api.ObjectMeta{Name: "testSecret"},
Data: map[string][]byte{"test": []byte("data")},
}
readOps := []testOperation{
func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err },
func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err },
}
writeOps := []testOperation{
func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
func() error { return c.Secrets(ns).Delete(testSecret.Name) },
}
for _, op := range readOps {
err := op()
unauthorizedError := errors.IsUnauthorized(err)
forbiddenError := errors.IsForbidden(err)
switch {
case !authenticated && !unauthorizedError:
t.Fatalf("expected unauthorized error, got %v", err)
case authenticated && unauthorizedError:
t.Fatalf("unexpected unauthorized error: %v", err)
case authenticated && canRead && forbiddenError:
t.Fatalf("unexpected forbidden error: %v", err)
case authenticated && !canRead && !forbiddenError:
t.Fatalf("expected forbidden error, got: %v", err)
}
}
for _, op := range writeOps {
err := op()
unauthorizedError := errors.IsUnauthorized(err)
forbiddenError := errors.IsForbidden(err)
switch {
case !authenticated && !unauthorizedError:
t.Fatalf("expected unauthorized error, got %v", err)
case authenticated && unauthorizedError:
t.Fatalf("unexpected unauthorized error: %v", err)
case authenticated && canWrite && forbiddenError:
t.Fatalf("unexpected forbidden error: %v", err)
case authenticated && !canWrite && !forbiddenError:
t.Fatalf("expected forbidden error, got: %v", err)
}
}
}
示例12: handleAttachReplicationController
func handleAttachReplicationController(c *client.Client, controller *api.ReplicationController, opts *AttachOptions) error {
var pods *api.PodList
for pods == nil || len(pods.Items) == 0 {
var err error
if pods, err = c.Pods(controller.Namespace).List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything()); err != nil {
return err
}
if len(pods.Items) == 0 {
fmt.Fprint(opts.Out, "Waiting for pod to be scheduled\n")
time.Sleep(2 * time.Second)
}
}
pod := &pods.Items[0]
return handleAttachPod(c, pod, opts)
}
示例13: waitForAllPodsRunning
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunning(c *client.Client, ns, jobName string, parallelism int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
return wait.Poll(poll, jobTimeout, func() (bool, error) {
pods, err := c.Pods(ns).List(label, fields.Everything())
if err != nil {
return false, err
}
count := 0
for _, p := range pods.Items {
if p.Status.Phase == api.PodRunning {
count++
}
}
return count == parallelism, nil
})
}
示例14: podScheduled
func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podName)
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
}
if pod.Spec.NodeName == "" {
return false, nil
}
return true, nil
}
}
示例15: runLivenessTest
func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRestarts int) {
By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns))
_, err := c.Pods(ns).Create(podDescr)
expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
}()
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
expectNoError(waitForPodNotPending(c, ns, podDescr.Name),
fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns))
By(fmt.Sprintf("Started pod %s in namespace %s", podDescr.Name, ns))
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := c.Pods(ns).Get(podDescr.Name)
expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns))
initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
By(fmt.Sprintf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount))
// Wait for the restart state to be as desired.
deadline := time.Now().Add(2 * time.Minute)
lastRestartCount := initialRestartCount
observedRestarts := 0
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = c.Pods(ns).Get(podDescr.Name)
expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name))
restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
if restartCount != lastRestartCount {
By(fmt.Sprintf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, podDescr.Name, restartCount, time.Since(start)))
if restartCount < lastRestartCount {
Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, podDescr.Name, lastRestartCount, restartCount)
}
}
observedRestarts = restartCount - initialRestartCount
if expectNumRestarts > 0 && observedRestarts >= expectNumRestarts {
// Stop if we have observed more than expectNumRestarts restarts.
break
}
lastRestartCount = restartCount
}
// If we expected 0 restarts, fail if observed any restart.
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
observedRestarts < expectNumRestarts) {
Failf("pod %s/%s - expected number of restarts: %t, found restarts: %t",
ns, podDescr.Name, expectNumRestarts, observedRestarts)
}
}