本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_5.Interface.Core方法的典型用法代码示例。如果您正苦于以下问题:Golang Interface.Core方法的具体用法?Golang Interface.Core怎么用?Golang Interface.Core使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/client/clientset_generated/release_1_5.Interface
的用法示例。
在下文中一共展示了Interface.Core方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: getMetrics
// Retrieves metrics information.
func getMetrics(c clientset.Interface) (string, error) {
body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw()
if err != nil {
return "", err
}
return string(body), nil
}
示例2: Query
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
result, err := c.Core().RESTClient().Get().
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
if err != nil {
return nil, err
}
var response influxdb.Response
dec := json.NewDecoder(bytes.NewReader(result))
dec.UseNumber()
err = dec.Decode(&response)
if err != nil {
return nil, err
}
return &response, nil
}
示例3: checkMirrorPodDisappear
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
_, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
return goerrors.New("pod not disappear")
}
示例4: NewResourceQuotaEvaluator
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
allResources := []api.ResourceName{api.ResourceQuotas}
return &generic.GenericEvaluator{
Name: "Evaluator.ResourceQuota",
InternalGroupKind: api.Kind("ResourceQuota"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceQuotas),
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceQuotas),
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
if err != nil {
return nil, err
}
results := make([]runtime.Object, 0, len(itemList.Items))
for i := range itemList.Items {
results = append(results, &itemList.Items[i])
}
return results, nil
},
}
}
示例5: createOutOfDiskPod
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
podClient := c.Core().Pods(ns)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(c),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
// Request enough CPU to fit only two pods on a given node.
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
},
},
},
},
},
}
_, err := podClient.Create(pod)
framework.ExpectNoError(err)
}
示例6: updateNodeLabels
// updates labels of nodes given by nodeNames.
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
const maxRetries = 5
for nodeName := range nodeNames {
var node *v1.Node
var err error
for i := 0; i < maxRetries; i++ {
node, err = c.Core().Nodes().Get(nodeName)
if err != nil {
framework.Logf("Error getting node %s: %v", nodeName, err)
continue
}
if toAdd != nil {
for k, v := range toAdd {
node.ObjectMeta.Labels[k] = v
}
}
if toRemove != nil {
for k := range toRemove {
delete(node.ObjectMeta.Labels, k)
}
}
_, err = c.Core().Nodes().Update(node)
if err != nil {
framework.Logf("Error updating node %s: %v", nodeName, err)
} else {
break
}
}
Expect(err).NotTo(HaveOccurred())
}
}
示例7: WaitForClusterSizeFunc
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
glog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == numReady && sizeFunc(numReady) {
glog.Infof("Cluster has reached the desired size")
return nil
}
glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
示例8: NewServiceAccountsController
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, nsInformer informers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
e := &ServiceAccountsController{
client: cl,
serviceAccountsToEnsure: options.ServiceAccounts,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
}
if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter())
}
saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: e.serviceAccountDeleted,
})
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.namespaceAdded,
UpdateFunc: e.namespaceUpdated,
})
e.saSynced = saInformer.Informer().HasSynced
e.saLister = saInformer.Lister()
e.nsSynced = nsInformer.Informer().HasSynced
e.nsLister = nsInformer.Lister()
e.syncHandler = e.syncNamespace
return e
}
示例9: RemoveLabelOffNode
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []string) error {
var node *v1.Node
var err error
for attempt := 0; attempt < retries; attempt++ {
node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
if node.Labels == nil {
return nil
}
for _, labelKey := range labelKeys {
if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
break
}
delete(node.Labels, labelKey)
}
_, err = c.Core().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
return err
} else {
glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
return err
}
示例10: StartPods
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string,
pod v1.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Core().Pods(namespace).Create(&pod)
if err != nil {
return err
}
}
logFunc("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
if err != nil {
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
}
}
return nil
}
示例11: createController
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error {
rc := &v1.ReplicationController{
ObjectMeta: v1.ObjectMeta{
Name: controllerName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(podCount),
Selector: map[string]string{"name": controllerName},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": controllerName},
},
Spec: podTemplate.Spec,
},
},
}
var err error
for attempt := 0; attempt < retries; attempt++ {
if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil {
return nil
}
glog.Errorf("Error while creating rc, maybe retry: %v", err)
}
return fmt.Errorf("Terminal error while creating rc, won't retry: %v", err)
}
示例12: LabelPodsWithHash
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
// The returned bool value can be used to tell if all pods are actually labeled.
func LabelPodsWithHash(podList *v1.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
allPodsLabeled := true
for _, pod := range podList.Items {
// Only label the pod that doesn't already have the new hash
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
func(podToUpdate *v1.Pod) error {
// Precondition: the pod doesn't contain the new hash in its label.
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
return errors.ErrPreconditionViolated
}
podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
return nil
}); err != nil {
return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err)
} else if podUpdated {
glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash)
} else {
// If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error.
// Then we can't say all pods are labeled
allPodsLabeled = false
}
}
}
return allPodsLabeled, nil
}
示例13: runKubernetesServiceTestContainer
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
p, err := podFromManifest(path)
if err != nil {
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.Core().Pods(ns).Create(p); err != nil {
framework.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil {
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
framework.Logf("Output of clusterapi-tester:\n%v", logs)
}
}
示例14: deleteDNSScalingConfigMap
func deleteDNSScalingConfigMap(c clientset.Interface) error {
if err := c.Core().ConfigMaps(api.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap deleted.")
return nil
}
示例15: PatchNodeStatus
// PatchNodeStatus patches node status.
func PatchNodeStatus(c clientset.Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) {
oldData, err := json.Marshal(oldNode)
if err != nil {
return nil, fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
}
// Reset spec to make sure only patch for Status or ObjectMeta is generated.
// Note that we don't reset ObjectMeta here, because:
// 1. This aligns with Nodes().UpdateStatus().
// 2. Some component does use this to update node annotations.
newNode.Spec = oldNode.Spec
newData, err := json.Marshal(newNode)
if err != nil {
return nil, fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return nil, fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
updatedNode, err := c.Core().Nodes().Patch(string(nodeName), api.StrategicMergePatchType, patchBytes, "status")
if err != nil {
return nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err)
}
return updatedNode, nil
}