本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.Client类的典型用法代码示例。如果您正苦于以下问题:Golang Client类的具体用法?Golang Client怎么用?Golang Client使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Client类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: addServiceAccount
func addServiceAccount(c *k8sclient.Client, f *cmdutil.Factory, name string) (Result, error) {
ns, _, e := f.DefaultNamespace()
if e != nil {
util.Fatal("No default namespace")
return Failure, e
}
sas := c.ServiceAccounts(ns)
_, err := sas.Get(name)
if err != nil {
sa := kapi.ServiceAccount{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Labels: map[string]string{
"provider": "fabric8.io",
},
},
}
_, err = sas.Create(&sa)
}
r := Success
if err != nil {
r = Failure
}
return r, err
}
示例2: createRunningPod
func createRunningPod(wg *sync.WaitGroup, c *client.Client, name, ns, image string, labels map[string]string) {
defer GinkgoRecover()
defer wg.Done()
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
},
},
DNSPolicy: api.DNSDefault,
},
}
_, err := c.Pods(ns).Create(pod)
expectNoError(err)
expectNoError(waitForPodRunningInNamespace(c, name, ns))
}
示例3: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例4: getMetrics
// Retrieves metrics information.
func getMetrics(c *client.Client) (string, error) {
body, err := c.Get().AbsPath("/metrics").DoRaw()
if err != nil {
return "", err
}
return string(body), nil
}
示例5: runServiceAndRCForResourceConsumer
func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, replicas int, cpuLimitMillis, memLimitMb int64) {
By(fmt.Sprintf("Running consuming RC %s with %v replicas", name, replicas))
_, err := c.Services(ns).Create(&api.Service{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Port: port,
TargetPort: util.NewIntOrStringFromInt(targetPort),
}},
Selector: map[string]string{
"name": name,
},
},
})
expectNoError(err)
config := RCConfig{
Client: c,
Image: resourceConsumerImage,
Name: name,
Namespace: ns,
Timeout: timeoutRC,
Replicas: replicas,
CpuRequest: cpuLimitMillis,
CpuLimit: cpuLimitMillis,
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
MemLimit: memLimitMb * 1024 * 1024,
}
expectNoError(RunRC(config))
// Make sure endpoints are propagated.
// TODO(piosz): replace sleep with endpoints watch.
time.Sleep(10 * time.Second)
}
示例6: testOne
func testOne(t *testing.T, client *kclient.Client, namespace, addrType string, success bool) *kapi.Endpoints {
testEndpoint := &kapi.Endpoints{}
testEndpoint.GenerateName = "test"
testEndpoint.Subsets = []kapi.EndpointSubset{
{
Addresses: []kapi.EndpointAddress{
{
IP: exampleAddresses[addrType],
},
},
Ports: []kapi.EndpointPort{
{
Port: 9999,
Protocol: kapi.ProtocolTCP,
},
},
},
}
ep, err := client.Endpoints(namespace).Create(testEndpoint)
if err != nil && success {
t.Fatalf("unexpected error creating %s network endpoint: %v", addrType, err)
} else if err == nil && !success {
t.Fatalf("unexpected success creating %s network endpoint", addrType)
}
return ep
}
示例7: CreateNewControllerFromCurrentController
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) {
// load the old RC into the "new" RC
newRc, err := c.ReplicationControllers(namespace).Get(oldName)
if err != nil {
return nil, err
}
if len(newRc.Spec.Template.Spec.Containers) > 1 {
// TODO: support multi-container image update.
return nil, goerrors.New("Image update is not supported for multi-container pods")
}
if len(newRc.Spec.Template.Spec.Containers) == 0 {
return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
}
newRc.Spec.Template.Spec.Containers[0].Image = image
newHash, err := api.HashObject(newRc, c.Codec)
if err != nil {
return nil, err
}
if len(newName) == 0 {
newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
}
newRc.Name = newName
newRc.Spec.Selector[deploymentKey] = newHash
newRc.Spec.Template.Labels[deploymentKey] = newHash
// Clear resource version after hashing so that identical updates get different hashes.
newRc.ResourceVersion = ""
return newRc, nil
}
示例8: updateNodes
func updateNodes(kubeClient *kube.Client, cPort int) (hostIPtoNodeMap map[string]kubeAPI.Node, nodeIPs []string) {
hostIPtoNodeMap = make(map[string]kubeAPI.Node, 2)
nodeIPs = make([]string, 0, 2)
nodeList, apiErr := kubeClient.Nodes().List(kubeLabels.Everything(), kubeFields.Everything())
if apiErr != nil {
glog.Errorf("Failed to list kubernetes nodes. Error: %v\n", apiErr)
} else {
for _, node := range nodeList.Items {
var hostIP string
for _, nodeAddress := range node.Status.Addresses {
switch nodeAddress.Type {
case kubeAPI.NodeInternalIP:
hostIP = nodeAddress.Address
break
case kubeAPI.NodeLegacyHostIP:
hostIP = nodeAddress.Address
}
}
if hostIP != "" {
hostIP = fmt.Sprintf("http://%v:%v", hostIP, cPort)
nodeIPs = append(nodeIPs, hostIP)
hostIPtoNodeMap[hostIP] = node
}
}
}
return hostIPtoNodeMap, nodeIPs
}
示例9: updateNodeLabels
// updates labels of nodes given by nodeNames.
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove map[string]string) {
const maxRetries = 5
for nodeName := range nodeNames {
var node *api.Node
var err error
for i := 0; i < maxRetries; i++ {
node, err = c.Nodes().Get(nodeName)
if err != nil {
framework.Logf("Error getting node %s: %v", nodeName, err)
continue
}
if toAdd != nil {
for k, v := range toAdd {
node.ObjectMeta.Labels[k] = v
}
}
if toRemove != nil {
for k := range toRemove {
delete(node.ObjectMeta.Labels, k)
}
}
_, err = c.Nodes().Update(node)
if err != nil {
framework.Logf("Error updating node %s: %v", nodeName, err)
} else {
break
}
}
Expect(err).NotTo(HaveOccurred())
}
}
示例10: processResource
func processResource(c *k8sclient.Client, b []byte, ns string, kind string) error {
util.Infof("Processing resource kind: %s in namespace %s\n", kind, ns)
req := c.Post().Body(b)
if kind == "Deployment" {
req.AbsPath("apis", "extensions/v1beta1", "namespaces", ns, strings.ToLower(kind+"s"))
} else if kind == "BuildConfig" || kind == "DeploymentConfig" || kind == "Template" || kind == "PolicyBinding" || kind == "Role" || kind == "RoleBinding" {
req.AbsPath("oapi", "v1", "namespaces", ns, strings.ToLower(kind+"s"))
} else if kind == "OAuthClient" || kind == "Project" || kind == "ProjectRequest" {
req.AbsPath("oapi", "v1", strings.ToLower(kind+"s"))
} else if kind == "Namespace" {
req.AbsPath("api", "v1", "namespaces")
} else {
req.Namespace(ns).Resource(strings.ToLower(kind + "s"))
}
res := req.Do()
if res.Error() != nil {
err := res.Error()
if err != nil {
util.Warnf("Failed to create %s: %v", kind, err)
return err
}
}
var statusCode int
res.StatusCode(&statusCode)
if statusCode != http.StatusCreated {
return fmt.Errorf("Failed to create %s: %d", kind, statusCode)
}
return nil
}
示例11: openService
func openService(ns string, serviceName string, c *k8sclient.Client, printURL bool, retry bool) {
if retry {
if err := RetryAfter(40, func() error { return CheckService(ns, serviceName, c) }, 10*time.Second); err != nil {
util.Errorf("Could not find finalized endpoint being pointed to by %s: %v", serviceName, err)
os.Exit(1)
}
}
svcs, err := c.Services(ns).List(kubeApi.ListOptions{})
if err != nil {
util.Errorf("No services found %v\n", err)
}
found := false
for _, service := range svcs.Items {
if serviceName == service.Name {
url := service.ObjectMeta.Annotations[exposeURLAnnotation]
if printURL {
util.Successf("%s\n", url)
} else {
util.Successf("\nOpening URL %s\n", url)
browser.OpenURL(url)
}
found = true
break
}
}
if !found {
util.Errorf("No service %s in namespace %s\n", serviceName, ns)
}
}
示例12: loadTemplateData
func loadTemplateData(ns string, templateName string, c *k8sclient.Client, oc *oclient.Client) ([]byte, string, error) {
typeOfMaster := util.TypeOfMaster(c)
if typeOfMaster == util.Kubernetes {
catalogName := "catalog-" + templateName
configMap, err := c.ConfigMaps(ns).Get(catalogName)
if err != nil {
return nil, "", err
}
for k, v := range configMap.Data {
if strings.LastIndex(k, ".json") >= 0 {
return []byte(v), "json", nil
}
if strings.LastIndex(k, ".yml") >= 0 || strings.LastIndex(k, ".yaml") >= 0 {
return []byte(v), "yaml", nil
}
}
return nil, "", fmt.Errorf("Could not find a key for the catalog %s which ends with `.json` or `.yml`", catalogName)
} else {
template, err := oc.Templates(ns).Get(templateName)
if err != nil {
return nil, "", err
}
data, err := json.Marshal(template)
return data, "json", err
}
return nil, "", nil
}
示例13: addIngressInfraLabel
func addIngressInfraLabel(c *k8sclient.Client, ns string) string {
nodeClient := c.Nodes()
nodes, err := nodeClient.List(api.ListOptions{})
if err != nil {
util.Errorf("\nUnable to find any nodes: %s\n", err)
}
changed := false
hasExistingExposeIPLabel, externalNodeName := hasExistingLabel(nodes, externalIPLabel)
if externalNodeName != "" {
return externalNodeName
}
if !hasExistingExposeIPLabel && len(nodes.Items) > 0 {
for _, node := range nodes.Items {
if !node.Spec.Unschedulable {
changed = addLabelIfNotExist(&node.ObjectMeta, externalIPLabel, "true")
if changed {
_, err = nodeClient.Update(&node)
if err != nil {
printError("Failed to label node with ", err)
}
return node.Name
}
}
}
}
if !changed && !hasExistingExposeIPLabel {
util.Warnf("Unable to add label for ingress controller to run on a specific node, please add manually: kubectl label node [your node name] %s=true", externalIPLabel)
}
return ""
}
示例14: createMissingPVs
func createMissingPVs(c *k8sclient.Client, ns string) {
found, pvcs, pendingClaimNames := findPendingPVs(c, ns)
if found {
sshCommand := ""
createPV(c, ns, pendingClaimNames, sshCommand)
items := pvcs.Items
for _, item := range items {
status := item.Status.Phase
if status == api.ClaimPending || status == api.ClaimLost {
err := c.PersistentVolumeClaims(ns).Delete(item.ObjectMeta.Name)
if err != nil {
util.Infof("Error deleting PVC %s\n", item.ObjectMeta.Name)
} else {
util.Infof("Recreating PVC %s\n", item.ObjectMeta.Name)
c.PersistentVolumeClaims(ns).Create(&api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: item.ObjectMeta.Name,
Namespace: ns,
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: ns + "-" + item.ObjectMeta.Name,
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
})
}
}
}
}
}
示例15: podsOnNodes
// podsOnNodes returns true when all of the selected pods exist on a node.
func podsOnNodes(c *client.Client, podNamespace string, labelSelector labels.Selector) wait.ConditionFunc {
// Wait until all pods are running on the node.
return func() (bool, error) {
options := api.ListOptions{LabelSelector: labelSelector}
pods, err := c.Pods(podNamespace).List(options)
if err != nil {
glog.Infof("Unable to get pods to list: %v", err)
return false, nil
}
for i := range pods.Items {
pod := pods.Items[i]
podString := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)
glog.Infof("Check whether pod %q exists on node %q", podString, pod.Spec.NodeName)
if len(pod.Spec.NodeName) == 0 {
glog.Infof("Pod %q is not bound to a host yet", podString)
return false, nil
}
if pod.Status.Phase != api.PodRunning {
glog.Infof("Pod %q is not running, status: %v", podString, pod.Status.Phase)
return false, nil
}
}
return true, nil
}
}