本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.Interface类的典型用法代码示例。如果您正苦于以下问题:Golang Interface类的具体用法?Golang Interface怎么用?Golang Interface使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Interface类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewDeploymentConfigController
// NewDeploymentConfigController creates a new DeploymentConfigController.
func NewDeploymentConfigController(dcInformer, rcInformer, podInformer framework.SharedIndexInformer, oc osclient.Interface, kc kclient.Interface, codec runtime.Codec) *DeploymentConfigController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(kc.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"})
c := &DeploymentConfigController{
dn: oc,
rn: kc,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
recorder: recorder,
codec: codec,
}
c.dcStore.Indexer = dcInformer.GetIndexer()
dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addDeploymentConfig,
UpdateFunc: c.updateDeploymentConfig,
DeleteFunc: c.deleteDeploymentConfig,
})
c.rcStore.Indexer = rcInformer.GetIndexer()
rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addReplicationController,
UpdateFunc: c.updateReplicationController,
DeleteFunc: c.deleteReplicationController,
})
c.podStore.Indexer = podInformer.GetIndexer()
c.dcStoreSynced = dcInformer.HasSynced
c.rcStoreSynced = rcInformer.HasSynced
c.podStoreSynced = podInformer.HasSynced
return c
}
示例2: NewDeployer
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface, oclient client.Interface, out, errOut io.Writer, until string) *Deployer {
scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
return &Deployer{
out: out,
errOut: errOut,
until: until,
getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
return client.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(configName)})
},
scaler: scaler,
strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
switch config.Spec.Strategy.Type {
case deployapi.DeploymentStrategyTypeRecreate:
return recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until), nil
case deployapi.DeploymentStrategyTypeRolling:
recreate := recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until)
return rolling.NewRollingDeploymentStrategy(config.Namespace, client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), recreate, out, errOut, until), nil
default:
return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type)
}
},
}
}
示例3: GetSwaggerSchema
// GetSwaggerSchema returns the swagger spec from master
func GetSwaggerSchema(apiVer string, kubeClient client.Interface) (*swagger.ApiDeclaration, error) {
swaggerSchema, err := kubeClient.SwaggerSchema(apiVer)
if err != nil {
return nil, fmt.Errorf("couldn't read swagger schema from server: %v", err)
}
return swaggerSchema, nil
}
示例4: New
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
gcc := &GCController{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "pod-garbage-collector"}),
KubeClient: kubeClient,
},
threshold: threshold,
}
terminatedSelector := compileTerminatedPodSelector()
gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return gcc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), terminatedSelector)
},
WatchFunc: func(rv string) (watch.Interface, error) {
return gcc.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), terminatedSelector, rv)
},
},
&api.Pod{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{},
)
return gcc
}
示例5: DeleteDaemonSet
// TODO(floreks): This should be transactional to make sure that DS will not be deleted without pods
// Deletes daemon set with given name in given namespace and related pods.
// Also deletes services related to daemon set if deleteServices is true.
func DeleteDaemonSet(client k8sClient.Interface, namespace, name string,
deleteServices bool) error {
log.Printf("Deleting %s daemon set from %s namespace", name, namespace)
if deleteServices {
if err := DeleteDaemonSetServices(client, namespace, name); err != nil {
return err
}
}
pods, err := getRawDaemonSetPods(client, namespace, name)
if err != nil {
return err
}
if err := client.Extensions().DaemonSets(namespace).Delete(name); err != nil {
return err
}
for _, pod := range pods.Items {
if err := client.Pods(namespace).Delete(pod.Name, &api.DeleteOptions{}); err != nil {
return err
}
}
log.Printf("Successfully deleted %s daemon set from %s namespace", name, namespace)
return nil
}
示例6: DeleteDaemonSetServices
// DeleteDaemonSetServices deletes services related to daemon set with given name in given namespace.
func DeleteDaemonSetServices(client k8sClient.Interface, namespace, name string) error {
log.Printf("Deleting services related to %s daemon set from %s namespace", name,
namespace)
daemonSet, err := client.Extensions().DaemonSets(namespace).Get(name)
if err != nil {
return err
}
labelSelector, err := unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector)
if err != nil {
return err
}
services, err := getServicesForDSDeletion(client, labelSelector, namespace)
if err != nil {
return err
}
for _, service := range services {
if err := client.Services(namespace).Delete(service.Name); err != nil {
return err
}
}
log.Printf("Successfully deleted services related to %s daemon set from %s namespace",
name, namespace)
return nil
}
示例7: getServicesForDeletion
// Based on given selector returns list of services that are candidates for deletion.
// Services are matched by replication controllers' label selector. They are deleted if given
// label selector is targeting only 1 replication controller.
func getServicesForDeletion(client client.Interface, labelSelector labels.Selector,
namespace string) ([]api.Service, error) {
replicationControllers, err := client.ReplicationControllers(namespace).List(api.ListOptions{
LabelSelector: labelSelector,
FieldSelector: fields.Everything(),
})
if err != nil {
return nil, err
}
// if label selector is targeting only 1 replication controller
// then we can delete services targeted by this label selector,
// otherwise we can not delete any services so just return empty list
if len(replicationControllers.Items) != 1 {
return []api.Service{}, nil
}
services, err := client.Services(namespace).List(api.ListOptions{
LabelSelector: labelSelector,
FieldSelector: fields.Everything(),
})
if err != nil {
return nil, err
}
return services.Items, nil
}
开发者ID:FujitsuEnablingSoftwareTechnologyGmbH,项目名称:dashboard,代码行数:31,代码来源:replicationcontrollercommon.go
示例8: getRawDaemonSetWithPods
// Returns structure containing DaemonSet and Pods for the given daemon set.
func getRawDaemonSetWithPods(client client.Interface, namespace, name string) (
*DaemonSetWithPods, error) {
daemonSet, err := client.Extensions().DaemonSets(namespace).Get(name)
if err != nil {
return nil, err
}
labelSelector, err := unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector)
if err != nil {
return nil, err
}
pods, err := client.Pods(namespace).List(
api.ListOptions{
LabelSelector: labelSelector,
FieldSelector: fields.Everything(),
})
if err != nil {
return nil, err
}
daemonSetAndPods := &DaemonSetWithPods{
DaemonSet: daemonSet,
Pods: pods,
}
return daemonSetAndPods, nil
}
示例9: NewJobController
func NewJobController(kubeClient client.Interface) *JobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
jm := &JobController{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job"}),
},
expectations: controller.NewControllerExpectations(),
queue: workqueue.New(),
}
jm.jobStore.Store, jm.jobController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&experimental.Job{},
replicationcontroller.FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: jm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
if job := cur.(*experimental.Job); !isJobFinished(job) {
jm.enqueueController(job)
}
},
DeleteFunc: jm.enqueueController,
},
)
jm.podStore.Store, jm.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return jm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return jm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.Pod{},
replicationcontroller.PodRelistPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: jm.addPod,
UpdateFunc: jm.updatePod,
DeleteFunc: jm.deletePod,
},
)
jm.updateHandler = jm.updateJobStatus
jm.syncHandler = jm.syncJob
jm.podStoreSynced = jm.podController.HasSynced
return jm
}
示例10: formatImageStreamQuota
func formatImageStreamQuota(out *tabwriter.Writer, c client.Interface, kc kclient.Interface, stream *imageapi.ImageStream) {
quotas, err := kc.ResourceQuotas(stream.Namespace).List(api.ListOptions{})
if err != nil {
return
}
var limit *resource.Quantity
for _, item := range quotas.Items {
// search for smallest ImageStream quota
if value, ok := item.Spec.Hard[imageapi.ResourceImageStreamSize]; ok {
if limit == nil || limit.Cmp(value) > 0 {
limit = &value
}
}
}
if limit != nil {
quantity := imagequota.GetImageStreamSize(c, stream, make(map[string]*imageapi.Image))
scale := mega
if quantity.Value() >= (1<<giga.scale) || limit.Value() >= (1<<giga.scale) {
scale = giga
}
formatString(out, "Quota Usage", fmt.Sprintf("%s / %s",
formatQuantity(quantity, scale), formatQuantity(limit, scale)))
}
}
示例11: StatusViewerFor
func StatusViewerFor(kind unversioned.GroupKind, c client.Interface) (StatusViewer, error) {
switch kind {
case extensions.Kind("Deployment"):
return &DeploymentStatusViewer{c.Extensions()}, nil
}
return nil, fmt.Errorf("no status viewer has been implemented for %v", kind)
}
示例12: DeleteReplicationController
// TODO(floreks): This should be transactional to make sure that RC will not be deleted without pods
// Deletes replication controller with given name in given namespace and related pods.
// Also deletes services related to replication controller if deleteServices is true.
func DeleteReplicationController(client client.Interface, namespace, name string,
deleteServices bool) error {
log.Printf("Deleting %s replication controller from %s namespace", name, namespace)
if deleteServices {
if err := DeleteReplicationControllerServices(client, namespace, name); err != nil {
return err
}
}
pods, err := getRawReplicationControllerPods(client, namespace, name)
if err != nil {
return err
}
if err := client.ReplicationControllers(namespace).Delete(name); err != nil {
return err
}
for _, pod := range pods.Items {
if err := client.Pods(namespace).Delete(pod.Name, &api.DeleteOptions{}); err != nil {
return err
}
}
log.Printf("Successfully deleted %s replication controller from %s namespace", name, namespace)
return nil
}
示例13: makeNNodes
func makeNNodes(c client.Interface, N int) {
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: "scheduler-test-node-",
},
Spec: api.NodeSpec{
ExternalID: "foobar",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < N; i++ {
if _, err := c.Nodes().Create(baseNode); err != nil {
panic("error creating node: " + err.Error())
}
}
}
示例14: InstallRegistry
// InstallRegistry checks whether a registry is installed and installs one if not already installed
func (h *Helper) InstallRegistry(kubeClient kclient.Interface, f *clientcmd.Factory, configDir, images string, out io.Writer) error {
_, err := kubeClient.Services("default").Get(svcDockerRegistry)
if err == nil {
// If there's no error, the registry already exists
return nil
}
if !apierrors.IsNotFound(err) {
return errors.NewError("error retrieving docker registry service").WithCause(err)
}
imageTemplate := variable.NewDefaultImageTemplate()
imageTemplate.Format = images
cfg := ®istry.RegistryConfig{
Name: "registry",
Type: "docker-registry",
ImageTemplate: imageTemplate,
Ports: "5000",
Replicas: 1,
Labels: "docker-registry=default",
Volume: "/registry",
ServiceAccount: "registry",
}
cmd := registry.NewCmdRegistry(f, "", "registry", out)
output := &bytes.Buffer{}
err = registry.RunCmdRegistry(f, cmd, output, cfg, []string{})
glog.V(4).Infof("Registry command output:\n%s", output.String())
return err
}
示例15: RunProjectCache
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
if pcache != nil {
return
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return c.Namespaces().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
},
&kapi.Namespace{},
store,
0,
)
reflector.Run()
pcache = &ProjectCache{
Client: c,
Store: store,
DefaultNodeSelector: defaultNodeSelector,
}
}