本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/clientset.Interface类的典型用法代码示例。如果您正苦于以下问题:Golang Interface类的具体用法?Golang Interface怎么用?Golang Interface使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Interface类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createOutOfDiskPod
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
podClient := c.Core().Pods(ns)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(c),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
// Request enough CPU to fit only two pods on a given node.
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
},
},
},
},
},
}
_, err := podClient.Create(pod)
framework.ExpectNoError(err)
}
示例2: StartPods
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string,
pod v1.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Core().Pods(namespace).Create(&pod)
if err != nil {
return err
}
}
logFunc("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
if err != nil {
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
}
}
return nil
}
示例3: createController
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error {
rc := &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: controllerName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(podCount),
Selector: map[string]string{"name": controllerName},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": controllerName},
},
Spec: podTemplate.Spec,
},
},
}
var err error
for attempt := 0; attempt < retries; attempt++ {
if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil {
return nil
}
glog.Errorf("Error while creating rc, maybe retry: %v", err)
}
return fmt.Errorf("Terminal error while creating rc, won't retry: %v", err)
}
示例4: New
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
rc := &RouteController{
routes: routes,
kubeClient: kubeClient,
clusterName: clusterName,
clusterCIDR: clusterCIDR,
}
rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return rc.kubeClient.Core().Nodes().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return rc.kubeClient.Core().Nodes().Watch(options)
},
},
&v1.Node{},
controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{},
)
return rc
}
示例5: Query
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
result, err := c.Core().RESTClient().Get().
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
if err != nil {
return nil, err
}
var response influxdb.Response
dec := json.NewDecoder(bytes.NewReader(result))
dec.UseNumber()
err = dec.Decode(&response)
if err != nil {
return nil, err
}
return &response, nil
}
示例6: runKubernetesServiceTestContainer
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
p, err := podFromManifest(path)
if err != nil {
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.Core().Pods(ns).Create(p); err != nil {
framework.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil {
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
framework.Logf("Output of clusterapi-tester:\n%v", logs)
}
}
示例7: NewMetricsGrabber
func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) {
registeredMaster := false
masterName := ""
nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
return nil, err
}
if len(nodeList.Items) < 1 {
glog.Warning("Can't find any Nodes in the API server to grab metrics from")
}
for _, node := range nodeList.Items {
if system.IsMasterNode(node.Name) {
registeredMaster = true
masterName = node.Name
break
}
}
if !registeredMaster {
scheduler = false
controllers = false
glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler and ControllerManager is disabled.")
}
return &MetricsGrabber{
client: c,
grabFromApiServer: apiServer,
grabFromControllerManager: controllers,
grabFromKubelets: kubelets,
grabFromScheduler: scheduler,
masterName: masterName,
registeredMaster: registeredMaster,
}, nil
}
示例8: NewServiceAccountsController
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, nsInformer informers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
e := &ServiceAccountsController{
client: cl,
serviceAccountsToEnsure: options.ServiceAccounts,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
}
if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter())
}
saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: e.serviceAccountDeleted,
})
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.namespaceAdded,
UpdateFunc: e.namespaceUpdated,
})
e.saSynced = saInformer.Informer().HasSynced
e.saLister = saInformer.Lister()
e.nsSynced = nsInformer.Informer().HasSynced
e.nsLister = nsInformer.Lister()
e.syncHandler = e.syncNamespace
return e
}
示例9: readTransactions
// readTransactions reads # of transactions from the k8petstore web server endpoint.
// for more details see the source of the k8petstore web server.
func readTransactions(c clientset.Interface, ns string) (error, int) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
return errProxy, -1
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err := proxyRequest.Namespace(ns).
Context(ctx).
Name("frontend").
Suffix("llen").
DoRaw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to read petstore transactions: %v", err)
}
return err, -1
}
totalTrans, err := strconv.Atoi(string(body))
return err, totalTrans
}
示例10: verifyRemainingDeploymentsAndReplicaSets
// verifyRemainingDeploymentsAndReplicaSets verifies if the number of the remaining deployment
// and rs are deploymentNum and rsNum. It returns error if the
// communication with the API server fails.
func verifyRemainingDeploymentsAndReplicaSets(
f *framework.Framework,
clientSet clientset.Interface,
deployment *v1beta1.Deployment,
deploymentNum, rsNum int,
) (bool, error) {
var ret = true
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
if len(rs.Items) != rsNum {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
}
deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list deployments: %v", err)
}
if len(deployments.Items) != deploymentNum {
ret = false
By(fmt.Sprintf("expected %d Deploymentss, got %d Deployments", deploymentNum, len(deployments.Items)))
}
return ret, nil
}
示例11: Query
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := c.Core().RESTClient().Get().
Prefix("proxy").
Namespace("kube-system").
Resource("services").
Name(influxdbService+":api").
Suffix("query").
Param("q", query).
Param("db", influxdbDatabaseName).
Param("epoch", "s").
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to query influx db: %v", err)
}
return nil, err
}
var response influxdb.Response
dec := json.NewDecoder(bytes.NewReader(result))
dec.UseNumber()
err = dec.Decode(&response)
if err != nil {
return nil, err
}
return &response, nil
}
示例12: RemoveLabelOffNode
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []string) error {
var node *v1.Node
var err error
for attempt := 0; attempt < retries; attempt++ {
node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
if node.Labels == nil {
return nil
}
for _, labelKey := range labelKeys {
if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
break
}
delete(node.Labels, labelKey)
}
_, err = c.Core().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
return err
} else {
glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
return err
}
示例13: NewSync
// NewSync for ConfigMap from namespace `ns` and `name`.
func NewSync(client clientset.Interface, ns string, name string) Sync {
sync := &kubeSync{
ns: ns,
name: name,
client: client,
channel: make(chan *Config),
}
listWatch := &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String()
return client.Core().ConfigMaps(ns).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String()
return client.Core().ConfigMaps(ns).Watch(options)
},
}
store, controller := cache.NewInformer(
listWatch,
&v1.ConfigMap{},
time.Duration(0),
cache.ResourceEventHandlerFuncs{
AddFunc: sync.onAdd,
DeleteFunc: sync.onDelete,
UpdateFunc: sync.onUpdate,
})
sync.store = store
sync.controller = controller
return sync
}
示例14: buildAuth
func buildAuth(nodeName types.NodeName, client clientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) {
// Get clients, if provided
var (
tokenClient authenticationclient.TokenReviewInterface
sarClient authorizationclient.SubjectAccessReviewInterface
)
if client != nil && !reflect.ValueOf(client).IsNil() {
tokenClient = client.Authentication().TokenReviews()
sarClient = client.Authorization().SubjectAccessReviews()
}
authenticator, err := buildAuthn(tokenClient, config.Authentication)
if err != nil {
return nil, err
}
attributes := server.NewNodeAuthorizerAttributesGetter(nodeName)
authorizer, err := buildAuthz(sarClient, config.Authorization)
if err != nil {
return nil, err
}
return server.NewKubeletAuth(authenticator, attributes, authorizer), nil
}
示例15: WaitForClusterSizeFunc
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
glog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == numReady && sizeFunc(numReady) {
glog.Infof("Cluster has reached the desired size")
return nil
}
glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}