本文整理汇总了Golang中k8s/io/kubernetes/pkg/client.Client.Pods方法的典型用法代码示例。如果您正苦于以下问题:Golang Client.Pods方法的具体用法?Golang Client.Pods怎么用?Golang Client.Pods使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/client.Client
的用法示例。
在下文中一共展示了Client.Pods方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: testHostIP
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
podClient := c.Pods(ns)
By("creating pod")
defer podClient.Delete(pod.Name, nil)
_, err := podClient.Create(pod)
if err != nil {
Failf("Failed to create pod: %v", err)
}
By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
err = waitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred())
if p.Status.HostIP != "" {
Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
}
if time.Since(t) >= hostIPTimeout {
Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds())
}
Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second)
}
}
示例2: verifyResult
func verifyResult(c *client.Client, podName string, ns string, oldNotRunning int) {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err)
_, notRunningPods := getPodsNumbers(allPods)
schedEvents, err := c.Events(ns).List(
labels.Everything(),
fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": podName,
"involvedObject.namespace": ns,
"source": "scheduler",
"reason": "FailedScheduling",
}.AsSelector())
expectNoError(err)
printed := false
printOnce := func(msg string) string {
if !printed {
printed = true
return msg
} else {
return ""
}
}
Expect(notRunningPods).To(Equal(1+oldNotRunning), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
}
示例3: verifyExpectedRcsExistAndGetExpectedPods
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
if err != nil {
return nil, err
}
if len(rcList.Items) != 1 {
return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
rcLabel, len(rcList.Items))
}
for _, rc := range rcList.Items {
podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
expectedPods = append(expectedPods, string(pod.UID))
}
}
}
return expectedPods, nil
}
示例4: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例5: newPodOnNode
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error {
pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
if err == nil {
Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else {
Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
}
return err
}
示例6: runSchedulerNoPhantomPodsTest
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "kubernetes/pause",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, nil)
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
示例7: getContainerRestarts
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Selector) (int, []string) {
pods, err := c.Pods(ns).List(labelSelector, fields.Everything())
expectNoError(err)
failedContainers := 0
containerRestartNodes := util.NewStringSet()
for _, p := range pods.Items {
for _, v := range FailedContainers(&p) {
failedContainers = failedContainers + v.restarts
containerRestartNodes.Insert(p.Spec.NodeName)
}
}
return failedContainers, containerRestartNodes.List()
}
示例8: runBuildRunningPodDeleteTest
func runBuildRunningPodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), "0")
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer buildWatch.Stop()
created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
if err != nil {
t.Fatalf("Couldn't create Build: %v", err)
}
podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to Pods %v", err)
}
defer podWatch.Stop()
// wait for initial build event from the creation of the imagerepo with tag latest
event := waitForWatch(t, "initial build added", buildWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
// initial pod creation for build
event = waitForWatch(t, "build pod created", podWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
event = waitForWatch(t, "build updated to pending", buildWatch)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
if newBuild.Status.Phase != buildapi.BuildPhasePending {
t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
}
clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildutil.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
event = waitForWatch(t, "build updated to error", buildWatch)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
if newBuild.Status.Phase != buildapi.BuildPhaseError {
t.Fatalf("expected build status to be marked error, but was marked %s", newBuild.Status.Phase)
}
}
示例9: translatePodNameToIpOrFail
func translatePodNameToIpOrFail(c *client.Client, ns string, expectedEndpoints map[string][]int) map[string][]int {
portsByIp := make(map[string][]int)
for name, portList := range expectedEndpoints {
pod, err := c.Pods(ns).Get(name)
if err != nil {
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
}
portsByIp[pod.Status.PodIP] = portList
By(fmt.Sprintf(""))
}
By(fmt.Sprintf("successfully translated pod names to ips: %v -> %v on namespace %s", expectedEndpoints, portsByIp, ns))
return portsByIp
}
示例10: doServiceAccountAPIRequests
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
testSecret := &api.Secret{
ObjectMeta: api.ObjectMeta{Name: "testSecret"},
Data: map[string][]byte{"test": []byte("data")},
}
readOps := []testOperation{
func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err },
func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err },
}
writeOps := []testOperation{
func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
func() error { return c.Secrets(ns).Delete(testSecret.Name) },
}
for _, op := range readOps {
err := op()
unauthorizedError := errors.IsUnauthorized(err)
forbiddenError := errors.IsForbidden(err)
switch {
case !authenticated && !unauthorizedError:
t.Fatalf("expected unauthorized error, got %v", err)
case authenticated && unauthorizedError:
t.Fatalf("unexpected unauthorized error: %v", err)
case authenticated && canRead && forbiddenError:
t.Fatalf("unexpected forbidden error: %v", err)
case authenticated && !canRead && !forbiddenError:
t.Fatalf("expected forbidden error, got: %v", err)
}
}
for _, op := range writeOps {
err := op()
unauthorizedError := errors.IsUnauthorized(err)
forbiddenError := errors.IsForbidden(err)
switch {
case !authenticated && !unauthorizedError:
t.Fatalf("expected unauthorized error, got %v", err)
case authenticated && unauthorizedError:
t.Fatalf("unexpected unauthorized error: %v", err)
case authenticated && canWrite && forbiddenError:
t.Fatalf("unexpected forbidden error: %v", err)
case authenticated && !canWrite && !forbiddenError:
t.Fatalf("expected forbidden error, got: %v", err)
}
}
}
示例11: handleAttachReplicationController
func handleAttachReplicationController(c *client.Client, controller *api.ReplicationController, opts *AttachOptions) error {
var pods *api.PodList
for pods == nil || len(pods.Items) == 0 {
var err error
if pods, err = c.Pods(controller.Namespace).List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything()); err != nil {
return err
}
if len(pods.Items) == 0 {
fmt.Fprint(opts.Out, "Waiting for pod to be scheduled\n")
time.Sleep(2 * time.Second)
}
}
pod := &pods.Items[0]
return handleAttachPod(c, pod, opts)
}
示例12: podScheduled
func podScheduled(c *client.Client, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podName)
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
}
if pod.Spec.NodeName == "" {
return false, nil
}
return true, nil
}
}
示例13: podsCreated
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
timeout := time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
pods, err := c.Pods(ns).List(label, fields.Everything())
if err != nil {
return nil, err
}
Logf("Pod name %s: Found %d pods out of %d", name, len(pods.Items), replicas)
if len(pods.Items) == replicas {
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
示例14: podRunning
func podRunning(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podName)
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry, but log the error.
glog.Errorf("Error when reading pod %q: %v", podName, err)
return false, nil
}
if pod.Status.Phase != api.PodRunning {
return false, nil
}
return true, nil
}
}
示例15: runBuildDeleteTest
func runBuildDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), "0")
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer buildWatch.Stop()
created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
if err != nil {
t.Fatalf("Couldn't create Build: %v", err)
}
podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to Pods %v", err)
}
defer podWatch.Stop()
// wait for initial build event from the creation of the imagerepo with tag latest
event := waitForWatch(t, "initial build added", buildWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
// initial pod creation for build
event = waitForWatch(t, "build pod created", podWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
clusterAdminClient.Builds(testutil.Namespace()).Delete(newBuild.Name)
event = waitForWatch(t, "pod deleted due to build deleted", podWatch)
if e, a := watchapi.Deleted, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
pod := event.Object.(*kapi.Pod)
if expected := buildutil.GetBuildPodName(newBuild); pod.Name != expected {
t.Fatalf("Expected pod %s to be deleted, but pod %s was deleted", expected, pod.Name)
}
}