当前位置: 首页>>代码示例>>Golang>>正文


Golang Interface.Core方法代码示例

本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.Interface.Core方法的典型用法代码示例。如果您正苦于以下问题:Golang Interface.Core方法的具体用法?Golang Interface.Core怎么用?Golang Interface.Core使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.Interface的用法示例。


在下文中一共展示了Interface.Core方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: GetNewRC

// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
// Returns nil if the new RC doesnt exist yet.
func GetNewRC(deployment extensions.Deployment, c clientset.Interface) (*api.ReplicationController, error) {
	return GetNewRCFromList(deployment, c,
		func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
			rcList, err := c.Core().ReplicationControllers(namespace).List(options)
			return rcList.Items, err
		})
}
开发者ID:initlove,项目名称:kubernetes,代码行数:9,代码来源:deployment.go

示例2: newPersistentVolumeClaimInformer

func newPersistentVolumeClaimInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
	sharedIndexInformer := cache.NewSharedIndexInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				var internalOptions api.ListOptions
				if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
					return nil, err
				}
				return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(internalOptions)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				var internalOptions api.ListOptions
				if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
					return nil, err
				}
				return client.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(internalOptions)
			},
		},
		&api.PersistentVolumeClaim{},
		resyncPeriod,
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	return sharedIndexInformer
}
开发者ID:nak3,项目名称:kubernetes,代码行数:25,代码来源:persistentvolumeclaim.go

示例3: LabelPodsWithHash

// LabelPodsWithHash labels all pods in the given podList with the new hash label.
// The returned bool value can be used to tell if all pods are actually labeled.
func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
	allPodsLabeled := true
	for _, pod := range podList.Items {
		// Only label the pod that doesn't already have the new hash
		if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
			if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
				func(podToUpdate *api.Pod) error {
					// Precondition: the pod doesn't contain the new hash in its label.
					if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
						return errors.ErrPreconditionViolated
					}
					podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
					return nil
				}); err != nil {
				return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err)
			} else if podUpdated {
				glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash)
			} else {
				// If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error.
				// Then we can't say all pods are labeled
				allPodsLabeled = false
			}
		}
	}
	return allPodsLabeled, nil
}
开发者ID:spxtr,项目名称:contrib,代码行数:28,代码来源:deployment_util.go

示例4: NewHollowProxyOrDie

func NewHollowProxyOrDie(
	nodeName string,
	client clientset.Interface,
	endpointsConfig *proxyconfig.EndpointsConfig,
	serviceConfig *proxyconfig.ServiceConfig,
	iptInterface utiliptables.Interface,
	broadcaster record.EventBroadcaster,
	recorder record.EventRecorder,
) *HollowProxy {
	// Create and start Hollow Proxy
	config := options.NewProxyConfig()
	config.OOMScoreAdj = util.Int32Ptr(0)
	config.ResourceContainer = ""
	config.NodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      nodeName,
		UID:       types.UID(nodeName),
		Namespace: "",
	}
	proxyconfig.NewSourceAPI(
		client.Core().RESTClient(),
		30*time.Second,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	hollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, "fake")
	if err != nil {
		glog.Fatalf("Error while creating ProxyServer: %v\n", err)
	}
	return &HollowProxy{
		ProxyServer: hollowProxy,
	}
}
开发者ID:Q-Lee,项目名称:kubernetes,代码行数:34,代码来源:hollow_proxy.go

示例5: createController

func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *api.Pod) error {
	rc := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: controllerName,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: int32(podCount),
			Selector: map[string]string{"name": controllerName},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": controllerName},
				},
				Spec: podTemplate.Spec,
			},
		},
	}
	var err error
	for attempt := 0; attempt < retries; attempt++ {
		if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil {
			return nil
		}
		glog.Errorf("Error while creating rc, maybe retry: %v", err)
	}
	return fmt.Errorf("Terminal error while creating rc, won't retry: %v", err)
}
开发者ID:upmc-enterprises,项目名称:kubernetes,代码行数:25,代码来源:runners.go

示例6: makeNodes

func makeNodes(c clientset.Interface, nodeCount int) {
	glog.Infof("making %d nodes", nodeCount)
	baseNode := &api.Node{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "scheduler-test-node-",
		},
		Spec: api.NodeSpec{
			ExternalID: "foobar",
		},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods:   *resource.NewQuantity(110, resource.DecimalSI),
				api.ResourceCPU:    resource.MustParse("4"),
				api.ResourceMemory: resource.MustParse("32Gi"),
			},
			Phase: api.NodeRunning,
			Conditions: []api.NodeCondition{
				{Type: api.NodeReady, Status: api.ConditionTrue},
			},
		},
	}
	for i := 0; i < nodeCount; i++ {
		if _, err := c.Core().Nodes().Create(baseNode); err != nil {
			panic("error creating node: " + err.Error())
		}
	}
}
开发者ID:Random-Liu,项目名称:kubernetes,代码行数:27,代码来源:util.go

示例7: Query

// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
	result, err := c.Core().RESTClient().Get().
		Prefix("proxy").
		Namespace("kube-system").
		Resource("services").
		Name(influxdbService+":api").
		Suffix("query").
		Param("q", query).
		Param("db", influxdbDatabaseName).
		Param("epoch", "s").
		Do().
		Raw()

	if err != nil {
		return nil, err
	}

	var response influxdb.Response
	dec := json.NewDecoder(bytes.NewReader(result))
	dec.UseNumber()
	err = dec.Decode(&response)

	if err != nil {
		return nil, err
	}
	return &response, nil
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:28,代码来源:monitoring.go

示例8: NewDeployer

// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclientset.Interface, oclient client.Interface, out, errOut io.Writer, until string) *Deployer {
	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
	return &Deployer{
		out:    out,
		errOut: errOut,
		until:  until,
		getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.Core().ReplicationControllers(namespace).Get(name)
		},
		getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
			return client.Core().ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(configName)})
		},
		scaler: scaler,
		strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
			switch config.Spec.Strategy.Type {
			case deployapi.DeploymentStrategyTypeRecreate:
				return recreate.NewRecreateDeploymentStrategy(client, oclient, &kcoreclient.EventSinkImpl{Interface: client.Core().Events("")}, kapi.Codecs.UniversalDecoder(), out, errOut, until), nil
			case deployapi.DeploymentStrategyTypeRolling:
				recreate := recreate.NewRecreateDeploymentStrategy(client, oclient, &kcoreclient.EventSinkImpl{Interface: client.Core().Events("")}, kapi.Codecs.UniversalDecoder(), out, errOut, until)
				return rolling.NewRollingDeploymentStrategy(config.Namespace, client, oclient, &kcoreclient.EventSinkImpl{Interface: client.Core().Events("")}, kapi.Codecs.UniversalDecoder(), recreate, out, errOut, until), nil
			default:
				return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type)
			}
		},
	}
}
开发者ID:xgwang-zte,项目名称:origin,代码行数:27,代码来源:deployer.go

示例9: getServicesForDeletion

// Based on given selector returns list of services that are candidates for deletion.
// Services are matched by replication controllers' label selector. They are deleted if given
// label selector is targeting only 1 replication controller.
func getServicesForDeletion(client client.Interface, labelSelector labels.Selector,
	namespace string) ([]api.Service, error) {

	replicationControllers, err := client.Core().ReplicationControllers(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	// if label selector is targeting only 1 replication controller
	// then we can delete services targeted by this label selector,
	// otherwise we can not delete any services so just return empty list
	if len(replicationControllers.Items) != 1 {
		return []api.Service{}, nil
	}

	services, err := client.Core().Services(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	return services.Items, nil
}
开发者ID:kubernetes,项目名称:dashboard,代码行数:31,代码来源:replicationcontrollercommon.go

示例10: deletePod

// Delete the passed in pod.
func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error {

	framework.Logf("Deleting pod %v", pod.Name)
	err := c.Core().Pods(ns).Delete(pod.Name, nil)
	if err != nil {
		return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err)
	}

	// Wait for pod to terminate
	err = f.WaitForPodTerminated(pod.Name, "")
	if err != nil && !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v will not teminate: %v", pod.Name, err)
	}

	// Re-get the pod to double check that it has been deleted; expect err
	// Note: Get() writes a log error if the pod is not found
	_, err = c.Core().Pods(ns).Get(pod.Name)
	if err == nil {
		return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name)
	}
	if !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v has been deleted but still exists: %v", pod.Name, err)
	}

	framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
	return nil
}
开发者ID:jumpkick,项目名称:kubernetes,代码行数:28,代码来源:persistent_volumes.go

示例11: New

func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	gcc := &GCController{
		kubeClient: kubeClient,
		threshold:  threshold,
		deletePod: func(namespace, name string) error {
			return kubeClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
		},
	}

	terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
开发者ID:CNDonny,项目名称:scope,代码行数:28,代码来源:gc_controller.go

示例12: waitAndValidatePVandPVC

// Waits for the pv and pvc to be bound to each other, then checks that the pv's
// claimRef matches the pvc. Fails test on errors. Return the pv and pvc to
// reflect that these resources have been retrieved again (Get).
// Note: the pv and pvc are returned back to the It() caller so that the
//   AfterEach func can delete these objects if they are not nil.
func waitAndValidatePVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {

	var err error

	// Wait for pv and pvc to bind to each other
	if err = waitOnPVandPVC(c, ns, pv, pvc); err != nil {
		return pv, pvc, err
	}

	// Check that the PersistentVolume.ClaimRef is valid and matches the PVC
	framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
	pv, err = c.Core().PersistentVolumes().Get(pv.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
	}

	pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name)
	}

	if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID != pvc.UID {
		pvJSON, _ := json.Marshal(pv.Spec.ClaimRef)
		return pv, pvc, fmt.Errorf("Expected Bound PersistentVolume %v to have valid ClaimRef: %+v", pv.Name, string(pvJSON))
	}

	return pv, pvc, nil
}
开发者ID:jumpkick,项目名称:kubernetes,代码行数:33,代码来源:persistent_volumes.go

示例13: NewMetricsGrabber

func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) {
	registeredMaster := false
	masterName := ""
	nodeList, err := c.Core().Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, err
	}
	if len(nodeList.Items) < 1 {
		glog.Warning("Can't find any Nodes in the API server to grab metrics from")
	}
	for _, node := range nodeList.Items {
		if system.IsMasterNode(&node) {
			registeredMaster = true
			masterName = node.Name
			break
		}
	}
	if !registeredMaster {
		scheduler = false
		controllers = false
		glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler and ControllerManager is disabled.")
	}

	return &MetricsGrabber{
		client:                    c,
		grabFromApiServer:         apiServer,
		grabFromControllerManager: controllers,
		grabFromKubelets:          kubelets,
		grabFromScheduler:         scheduler,
		masterName:                masterName,
		registeredMaster:          registeredMaster,
	}, nil
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:33,代码来源:metrics_grabber.go

示例14: newQuotaAccessor

// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects.
func newQuotaAccessor(client clientset.Interface) (*quotaAccessor, error) {
	liveLookupCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	updatedCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	lw := &cache.ListWatch{
		ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
			internalOptions := api.ListOptions{}
			v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
			return client.Core().ResourceQuotas(api.NamespaceAll).List(internalOptions)
		},
		WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
			internalOptions := api.ListOptions{}
			v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
			return client.Core().ResourceQuotas(api.NamespaceAll).Watch(internalOptions)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)

	return &quotaAccessor{
		client:          client,
		indexer:         indexer,
		reflector:       reflector,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
		updatedQuotas:   updatedCache,
	}, nil
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:33,代码来源:resource_access.go

示例15: deleteDNSScalingConfigMap

func deleteDNSScalingConfigMap(c clientset.Interface) error {
	if err := c.Core().ConfigMaps(DNSNamespace).Delete(DNSAutoscalerLabelName, nil); err != nil {
		return err
	}
	framework.Logf("DNS autoscaling ConfigMap deleted.")
	return nil
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:7,代码来源:dns_autoscaling.go


注:本文中的k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.Interface.Core方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。