本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_5.Interface类的典型用法代码示例。如果您正苦于以下问题:Golang Interface类的具体用法?Golang Interface怎么用?Golang Interface使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Interface类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createOutOfDiskPod
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
podClient := c.Core().Pods(ns)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(c),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
// Request enough CPU to fit only two pods on a given node.
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
},
},
},
},
},
}
_, err := podClient.Create(pod)
framework.ExpectNoError(err)
}
示例2: LabelPodsWithHash
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
// The returned bool value can be used to tell if all pods are actually labeled.
func LabelPodsWithHash(podList *v1.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
allPodsLabeled := true
for _, pod := range podList.Items {
// Only label the pod that doesn't already have the new hash
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
func(podToUpdate *v1.Pod) error {
// Precondition: the pod doesn't contain the new hash in its label.
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
return errors.ErrPreconditionViolated
}
podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
return nil
}); err != nil {
return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err)
} else if podUpdated {
glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash)
} else {
// If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error.
// Then we can't say all pods are labeled
allPodsLabeled = false
}
}
}
return allPodsLabeled, nil
}
示例3: checkMirrorPodDisappear
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
_, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
return goerrors.New("pod not disappear")
}
示例4: buildAuth
func buildAuth(nodeName types.NodeName, client clientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) {
// Get clients, if provided
var (
tokenClient authenticationclient.TokenReviewInterface
sarClient authorizationclient.SubjectAccessReviewInterface
)
if client != nil && !reflect.ValueOf(client).IsNil() {
tokenClient = client.Authentication().TokenReviews()
sarClient = client.Authorization().SubjectAccessReviews()
}
authenticator, err := buildAuthn(tokenClient, config.Authentication)
if err != nil {
return nil, err
}
attributes := server.NewNodeAuthorizerAttributesGetter(nodeName)
authorizer, err := buildAuthz(sarClient, config.Authorization)
if err != nil {
return nil, err
}
return server.NewKubeletAuth(authenticator, attributes, authorizer), nil
}
示例5: RemoveLabelOffNode
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []string) error {
var node *v1.Node
var err error
for attempt := 0; attempt < retries; attempt++ {
node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
if node.Labels == nil {
return nil
}
for _, labelKey := range labelKeys {
if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
break
}
delete(node.Labels, labelKey)
}
_, err = c.Core().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
return err
} else {
glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
return err
}
示例6: getMetrics
// Retrieves metrics information.
func getMetrics(c clientset.Interface) (string, error) {
body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw()
if err != nil {
return "", err
}
return string(body), nil
}
示例7: updateNodeLabels
// updates labels of nodes given by nodeNames.
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
const maxRetries = 5
for nodeName := range nodeNames {
var node *v1.Node
var err error
for i := 0; i < maxRetries; i++ {
node, err = c.Core().Nodes().Get(nodeName)
if err != nil {
framework.Logf("Error getting node %s: %v", nodeName, err)
continue
}
if toAdd != nil {
for k, v := range toAdd {
node.ObjectMeta.Labels[k] = v
}
}
if toRemove != nil {
for k := range toRemove {
delete(node.ObjectMeta.Labels, k)
}
}
_, err = c.Core().Nodes().Update(node)
if err != nil {
framework.Logf("Error updating node %s: %v", nodeName, err)
} else {
break
}
}
Expect(err).NotTo(HaveOccurred())
}
}
示例8: fetchDNSScalingConfigMap
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return cm, nil
}
示例9: NewSync
// NewSync for ConfigMap from namespace `ns` and `name`.
func NewSync(client clientset.Interface, ns string, name string) Sync {
sync := &kubeSync{
ns: ns,
name: name,
client: client,
channel: make(chan *Config),
}
listWatch := &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String()
return client.Core().ConfigMaps(ns).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String()
return client.Core().ConfigMaps(ns).Watch(options)
},
}
store, controller := cache.NewInformer(
listWatch,
&v1.ConfigMap{},
time.Duration(0),
cache.ResourceEventHandlerFuncs{
AddFunc: sync.onAdd,
DeleteFunc: sync.onDelete,
UpdateFunc: sync.onUpdate,
})
sync.store = store
sync.controller = controller
return sync
}
示例10: waitOnPVandPVC
// Wait for the pv and pvc to bind to each other.
func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
// Re-get the pv and pvc objects
pv, err = c.Core().PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred())
// Re-get the pvc and
pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
Expect(err).NotTo(HaveOccurred())
// The pv and pvc are both bound, but to each other?
// Check that the PersistentVolume.ClaimRef matches the PVC
Expect(pv.Spec.ClaimRef).NotTo(BeNil())
Expect(pv.Spec.ClaimRef.Name).To(Equal(pvc.Name))
Expect(pvc.Spec.VolumeName).To(Equal(pv.Name))
Expect(pv.Spec.ClaimRef.UID).To(Equal(pvc.UID))
}
示例11: deleteDNSScalingConfigMap
func deleteDNSScalingConfigMap(c clientset.Interface) error {
if err := c.Core().ConfigMaps(api.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap deleted.")
return nil
}
示例12: createController
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error {
rc := &v1.ReplicationController{
ObjectMeta: v1.ObjectMeta{
Name: controllerName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(podCount),
Selector: map[string]string{"name": controllerName},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": controllerName},
},
Spec: podTemplate.Spec,
},
},
}
var err error
for attempt := 0; attempt < retries; attempt++ {
if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil {
return nil
}
glog.Errorf("Error while creating rc, maybe retry: %v", err)
}
return fmt.Errorf("Terminal error while creating rc, won't retry: %v", err)
}
示例13: Query
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
result, err := c.Core().RESTClient().Get().
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
if err != nil {
return nil, err
}
var response influxdb.Response
dec := json.NewDecoder(bytes.NewReader(result))
dec.UseNumber()
err = dec.Decode(&response)
if err != nil {
return nil, err
}
return &response, nil
}
示例14: NewResourceQuotaEvaluator
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
allResources := []api.ResourceName{api.ResourceQuotas}
return &generic.GenericEvaluator{
Name: "Evaluator.ResourceQuota",
InternalGroupKind: api.Kind("ResourceQuota"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
},
MatchedResourceNames: allResources,
MatchesScopeFunc: generic.MatchesNoScopeFunc,
ConstraintsFunc: generic.ObjectCountConstraintsFunc(api.ResourceQuotas),
UsageFunc: generic.ObjectCountUsageFunc(api.ResourceQuotas),
ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
if err != nil {
return nil, err
}
results := make([]runtime.Object, 0, len(itemList.Items))
for i := range itemList.Items {
results = append(results, &itemList.Items[i])
}
return results, nil
},
}
}
示例15: NewPodEvaluator
// NewPodEvaluator returns an evaluator that can evaluate pods
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
computeResources := []api.ResourceName{
api.ResourceCPU,
api.ResourceMemory,
api.ResourceRequestsCPU,
api.ResourceRequestsMemory,
api.ResourceLimitsCPU,
api.ResourceLimitsMemory,
}
allResources := append(computeResources, api.ResourcePods)
listFuncByNamespace := listPodsByNamespaceFuncUsingClient(kubeClient)
if f != nil {
listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "pods"})
}
return &generic.GenericEvaluator{
Name: "Evaluator.Pod",
InternalGroupKind: api.Kind("Pod"),
InternalOperationResources: map[admission.Operation][]api.ResourceName{
admission.Create: allResources,
// TODO: the quota system can only charge for deltas on compute resources when pods support updates.
// admission.Update: computeResources,
},
GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) {
return kubeClient.Core().Pods(namespace).Get(name)
},
ConstraintsFunc: PodConstraintsFunc,
MatchedResourceNames: allResources,
MatchesScopeFunc: PodMatchesScopeFunc,
UsageFunc: PodUsageFunc,
ListFuncByNamespace: listFuncByNamespace,
}
}