本文整理匯總了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/client.Client類的典型用法代碼示例。如果您正苦於以下問題:Golang Client類的具體用法?Golang Client怎麽用?Golang Client使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Client類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: DescriberFor
// Describer returns the default describe functions for each of the standard
// Kubernetes types.
func DescriberFor(kind string, c *client.Client) (Describer, bool) {
switch kind {
case "Pod":
return &PodDescriber{
PodClient: func(namespace string) (client.PodInterface, error) {
return c.Pods(namespace), nil
},
ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) {
return c.ReplicationControllers(namespace), nil
},
}, true
case "ReplicationController":
return &ReplicationControllerDescriber{
PodClient: func(namespace string) (client.PodInterface, error) {
return c.Pods(namespace), nil
},
ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) {
return c.ReplicationControllers(namespace), nil
},
}, true
case "Service":
return &ServiceDescriber{
ServiceClient: func(namespace string) (client.ServiceInterface, error) {
return c.Services(namespace), nil
},
}, true
}
return nil, false
}
示例2: watchNodes
func watchNodes(client *client.Client) {
nodeList, err := client.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
log.Fatal(err)
}
nodes := nodeList.Items
writeNodeTargetsFile(nodes)
watcher, err := client.Nodes().Watch(labels.Everything(), fields.Everything(), nodeList.ResourceVersion)
if err != nil {
log.Fatal(err)
}
for event := range watcher.ResultChan() {
switch event.Type {
case watch.Added:
switch obj := event.Object.(type) {
case *api.Node:
nodes = append(nodes, *obj)
}
writeNodeTargetsFile(nodes)
case watch.Deleted:
switch obj := event.Object.(type) {
case *api.Node:
index := findNodeIndexInSlice(nodes, obj)
nodes = append(nodes[:index], nodes[index+1:]...)
}
writeNodeTargetsFile(nodes)
}
}
}
示例3: runMasterServiceTest
func runMasterServiceTest(client *client.Client) {
time.Sleep(12 * time.Second)
svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything())
if err != nil {
glog.Fatalf("unexpected error listing services: %v", err)
}
var foundRW bool
found := util.StringSet{}
for i := range svcList.Items {
found.Insert(svcList.Items[i].Name)
if svcList.Items[i].Name == "kubernetes" {
foundRW = true
}
}
if foundRW {
ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
if err != nil {
glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err)
}
if countEndpoints(ep) == 0 {
glog.Fatalf("no endpoints for kubernetes service: %v", ep)
}
} else {
glog.Errorf("no RW service found: %v", found)
glog.Fatal("Kubernetes service test failed")
}
glog.Infof("Master service test passed.")
}
示例4: verifyExpectedRcsExistAndGetExpectedPods
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Everything())
if err != nil {
return nil, err
}
expectedPods := []string{}
for _, rc := range rcList.Items {
if _, ok := expectedRcs[rc.Name]; ok {
if rc.Status.Replicas != 1 {
return nil, fmt.Errorf("expected to find only one replica for rc %q, found %d", rc.Name, rc.Status.Replicas)
}
expectedRcs[rc.Name] = true
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
expectedPods = append(expectedPods, string(pod.UID))
}
}
}
for rc, found := range expectedRcs {
if !found {
return nil, fmt.Errorf("Replication Controller %q not found.", rc)
}
}
return expectedPods, nil
}
示例5: runStaticPodTest
func runStaticPodTest(c *client.Client, configFilePath string) {
manifest := `version: v1beta2
id: static-pod
containers:
- name: static-container
image: kubernetes/pause`
manifestFile, err := ioutil.TempFile(configFilePath, "")
defer os.Remove(manifestFile.Name())
ioutil.WriteFile(manifestFile.Name(), []byte(manifest), 0600)
// Wait for the mirror pod to be created.
hostname, _ := os.Hostname()
podName := fmt.Sprintf("static-pod-%s", hostname)
namespace := kubelet.NamespaceDefault
if err := wait.Poll(time.Second, time.Second*30,
podRunning(c, namespace, podName)); err != nil {
glog.Fatalf("FAILED: mirror pod has not been created or is not running: %v", err)
}
// Delete the mirror pod, and wait for it to be recreated.
c.Pods(namespace).Delete(podName)
if err = wait.Poll(time.Second, time.Second*30,
podRunning(c, namespace, podName)); err != nil {
glog.Fatalf("FAILED: mirror pod has not been re-created or is not running: %v", err)
}
// Remove the manifest file, and wait for the mirror pod to be deleted.
os.Remove(manifestFile.Name())
if err = wait.Poll(time.Second, time.Second*30,
podNotFound(c, namespace, podName)); err != nil {
glog.Fatalf("FAILED: mirror pod has not been deleted: %v", err)
}
}
示例6: createNamespaceIfDoesNotExist
// createNamespaceIfDoesNotExist ensures that the namespace with specified name exists, or returns an error
func createNamespaceIfDoesNotExist(c *client.Client, name string) (*api.Namespace, error) {
namespace, err := c.Namespaces().Get(name)
if err != nil {
namespace, err = c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: name}})
}
return namespace, err
}
示例7: waitForClusterSize
func waitForClusterSize(c *client.Client, size int) error {
timeout := 4 * time.Minute
if providerIs("aws") {
// AWS is not as fast as gce/gke at having nodes come online
timeout = 10 * time.Minute
}
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out not-ready nodes.
filterNodes(nodes, func(node api.Node) bool {
return isNodeReadySetAsExpected(&node, true)
})
if len(nodes.Items) == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d", size, len(nodes.Items))
}
return fmt.Errorf("timeout waiting for cluster size to be %d", size)
}
示例8: runReplicationControllerTest
func runReplicationControllerTest(c *client.Client) {
clientAPIVersion := c.APIVersion()
data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json")
if err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
var controller api.ReplicationController
if err := api.Scheme.DecodeInto(data, &controller); err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
glog.Infof("Creating replication controllers")
updated, err := c.ReplicationControllers("test").Create(&controller)
if err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
glog.Infof("Done creating replication controllers")
// Give the controllers some time to actually create the pods
if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, updated)); err != nil {
glog.Fatalf("FAILED: pods never created %v", err)
}
// Poll till we can retrieve the status of all pods matching the given label selector from their minions.
// This involves 3 operations:
// - The scheduler must assign all pods to a minion
// - The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
// - We need to be able to query the kubelet on that minion for information about the pod
if err := wait.Poll(
time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
glog.Fatalf("FAILED: pods never started running %v", err)
}
glog.Infof("Pods created")
}
示例9: watchOnce
func watchOnce(etcdClient *etcd.Client, kubeClient *kclient.Client) {
// Start the goroutine to produce update events.
updates := make(chan serviceUpdate)
startWatching(kubeClient.Services(kapi.NamespaceAll), updates)
// This loop will break if the channel closes, which is how the
// goroutine signals an error.
for ev := range updates {
if *verbose {
log.Printf("Received update event: %#v", ev)
}
switch ev.Op {
case SetServices, AddService:
for i := range ev.Services {
s := &ev.Services[i]
name := buildNameString(s.Name, s.Namespace, *domain)
mutateEtcdOrDie(func() error { return addDNS(name, s, etcdClient) })
}
case RemoveService:
for i := range ev.Services {
s := &ev.Services[i]
name := buildNameString(s.Name, s.Namespace, *domain)
mutateEtcdOrDie(func() error { return removeDNS(name, etcdClient) })
}
}
}
//TODO: fully resync periodically.
}
示例10: verifyExpectedRcsExistAndGetExpectedPods
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
if err != nil {
return nil, err
}
if len(rcList.Items) != 1 {
return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
rcLabel, len(rcList.Items))
}
for _, rc := range rcList.Items {
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
expectedPods = append(expectedPods, string(pod.UID))
}
}
}
return expectedPods, nil
}
示例11: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例12: TestKubernetesROService
func TestKubernetesROService(c *client.Client) bool {
svc := api.ServiceList{}
err := c.Get().
Namespace("default").
AbsPath("/api/v1beta1/proxy/services/kubernetes-ro/api/v1beta1/services").
Do().
Into(&svc)
if err != nil {
glog.Errorf("unexpected error listing services using ro service: %v", err)
return false
}
var foundRW, foundRO bool
for i := range svc.Items {
if svc.Items[i].Name == "kubernetes" {
foundRW = true
}
if svc.Items[i].Name == "kubernetes-ro" {
foundRO = true
}
}
if !foundRW {
glog.Error("no RW service found")
}
if !foundRO {
glog.Error("no RO service found")
}
if !foundRW || !foundRO {
return false
}
return true
}
示例13: runReplicationControllerTest
func runReplicationControllerTest(kubeClient *client.Client) {
data, err := ioutil.ReadFile("api/examples/controller.json")
if err != nil {
glog.Fatalf("Unexpected error: %#v", err)
}
var controllerRequest api.ReplicationController
if err := json.Unmarshal(data, &controllerRequest); err != nil {
glog.Fatalf("Unexpected error: %#v", err)
}
glog.Infof("Creating replication controllers")
if _, err := kubeClient.CreateReplicationController(controllerRequest); err != nil {
glog.Fatalf("Unexpected error: %#v", err)
}
glog.Infof("Done creating replication controllers")
// Give the controllers some time to actually create the pods
time.Sleep(time.Second * 10)
// Validate that they're truly up.
pods, err := kubeClient.ListPods(labels.Set(controllerRequest.DesiredState.ReplicaSelector).AsSelector())
if err != nil || len(pods.Items) != controllerRequest.DesiredState.Replicas {
glog.Fatalf("FAILED: %#v", pods.Items)
}
glog.Infof("Replication controller produced:\n\n%#v\n\n", pods)
}
示例14: waitForPodSuccess
// waitForPodSuccess returns true if the pod reached state success, or false if it reached failure or ran too long.
func waitForPodSuccess(c *client.Client, podName string, contName string) bool {
for i := 0; i < 10; i++ {
if i > 0 {
time.Sleep(5 * time.Second)
}
pod, err := c.Pods(api.NamespaceDefault).Get(podName)
if err != nil {
glog.Warningf("Get pod failed: %v", err)
continue
}
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
ci, ok := pod.Status.Info[contName]
if !ok {
glog.Infof("No Status.Info for container %s in pod %s yet", contName, podName)
} else {
if ci.State.Termination != nil {
if ci.State.Termination.ExitCode == 0 {
glog.Infof("Saw pod success")
return true
} else {
glog.Infof("Saw pod failure: %+v", ci.State.Termination)
}
glog.Infof("Waiting for pod %q status to be success or failure", podName)
} else {
glog.Infof("Nil State.Termination for container %s in pod %s so far", contName, podName)
}
}
}
glog.Warningf("Gave up waiting for pod %q status to be success or failure", podName)
return false
}
示例15: CreateNewControllerFromCurrentController
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) {
// load the old RC into the "new" RC
newRc, err := c.ReplicationControllers(namespace).Get(oldName)
if err != nil {
return nil, err
}
if len(newRc.Spec.Template.Spec.Containers) > 1 {
// TODO: support multi-container image update.
return nil, goerrors.New("Image update is not supported for multi-container pods")
}
if len(newRc.Spec.Template.Spec.Containers) == 0 {
return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
}
newRc.Spec.Template.Spec.Containers[0].Image = image
newHash, err := api.HashObject(newRc, c.Codec)
if err != nil {
return nil, err
}
if len(newName) == 0 {
newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
}
newRc.Name = newName
newRc.Spec.Selector[deploymentKey] = newHash
newRc.Spec.Template.Labels[deploymentKey] = newHash
// Clear resource version after hashing so that identical updates get different hashes.
newRc.ResourceVersion = ""
return newRc, nil
}