本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_2.Interface.Legacy方法的典型用法代码示例。如果您正苦于以下问题:Golang Interface.Legacy方法的具体用法?Golang Interface.Legacy怎么用?Golang Interface.Legacy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/client/clientset_generated/release_1_2.Interface
的用法示例。
在下文中一共展示了Interface.Legacy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: forcefullyDeletePod
func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) {
var zero int64
err := c.Legacy().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
if err != nil {
utilruntime.HandleError(err)
}
}
示例2: deletePods
func deletePods(kubeClient clientset.Interface, ns string, before unversioned.Time) (int64, error) {
items, err := kubeClient.Legacy().Pods(ns).List(api.ListOptions{})
if err != nil {
return 0, err
}
expired := unversioned.Now().After(before.Time)
var deleteOptions *api.DeleteOptions
if expired {
deleteOptions = api.NewDeleteOptions(0)
}
estimate := int64(0)
for i := range items.Items {
if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
grace := *items.Items[i].Spec.TerminationGracePeriodSeconds
if grace > estimate {
estimate = grace
}
}
err := kubeClient.Legacy().Pods(ns).Delete(items.Items[i].Name, deleteOptions)
if err != nil && !errors.IsNotFound(err) {
return 0, err
}
}
if expired {
estimate = 0
}
return estimate, nil
}
示例3: GetNewRC
// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
// Returns nil if the new RC doesnt exist yet.
func GetNewRC(deployment extensions.Deployment, c clientset.Interface) (*api.ReplicationController, error) {
return GetNewRCFromList(deployment, c,
func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
rcList, err := c.Legacy().ReplicationControllers(namespace).List(options)
return rcList.Items, err
})
}
示例4: New
func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
gcc := &GCController{
kubeClient: kubeClient,
threshold: threshold,
deletePod: func(namespace, name string) error {
return kubeClient.Legacy().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
},
}
terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))
gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = terminatedSelector
return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.FieldSelector = terminatedSelector
return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
},
},
&api.Pod{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{},
)
return gcc
}
示例5: updateNamespaceStatusFunc
// updateNamespaceStatusFunc will verify that the status of the namespace is correct
func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) {
if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == api.NamespaceTerminating {
return namespace, nil
}
newNamespace := api.Namespace{}
newNamespace.ObjectMeta = namespace.ObjectMeta
newNamespace.Status = namespace.Status
newNamespace.Status.Phase = api.NamespaceTerminating
return kubeClient.Legacy().Namespaces().UpdateStatus(&newNamespace)
}
示例6: deleteResourceQuotas
func deleteResourceQuotas(kubeClient clientset.Interface, ns string) error {
resourceQuotas, err := kubeClient.Legacy().ResourceQuotas(ns).List(api.ListOptions{})
if err != nil {
return err
}
for i := range resourceQuotas.Items {
err := kubeClient.Legacy().ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name, nil)
if err != nil && !errors.IsNotFound(err) {
return err
}
}
return nil
}
示例7: deletePersistentVolumeClaims
func deletePersistentVolumeClaims(kubeClient clientset.Interface, ns string) error {
items, err := kubeClient.Legacy().PersistentVolumeClaims(ns).List(api.ListOptions{})
if err != nil {
return err
}
for i := range items.Items {
err := kubeClient.Legacy().PersistentVolumeClaims(ns).Delete(items.Items[i].Name, nil)
if err != nil && !errors.IsNotFound(err) {
return err
}
}
return nil
}
示例8: getPodsForRCs
func getPodsForRCs(c clientset.Interface, replicationControllers []*api.ReplicationController) ([]api.Pod, error) {
allPods := []api.Pod{}
for _, rc := range replicationControllers {
selector := labels.SelectorFromSet(rc.Spec.Selector)
options := api.ListOptions{LabelSelector: selector}
podList, err := c.Legacy().Pods(rc.ObjectMeta.Namespace).List(options)
if err != nil {
return allPods, fmt.Errorf("error listing pods: %v", err)
}
allPods = append(allPods, podList.Items...)
}
return allPods, nil
}
示例9: NewResourceQuota
// NewResourceQuota creates a new resource quota admission control handler
func NewResourceQuota(client clientset.Interface) admission.Interface {
lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Legacy().ResourceQuotas(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Legacy().ResourceQuotas(api.NamespaceAll).Watch(options)
},
}
indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)
reflector.Run()
return createResourceQuota(client, indexer)
}
示例10: New
// New returns a new service controller to keep cloud provider service resources
// (like load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) *ServiceController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
return &ServiceController{
cloud: cloud,
kubeClient: kubeClient,
clusterName: clusterName,
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
eventBroadcaster: broadcaster,
eventRecorder: recorder,
nodeLister: cache.StoreToNodeLister{
Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
},
}
}
示例11: retryOnConflictError
// retryOnConflictError retries the specified fn if there was a conflict error
// TODO RetryOnConflict should be a generic concept in client code
func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) {
latestNamespace := namespace
for {
result, err = fn(kubeClient, latestNamespace)
if err == nil {
return result, nil
}
if !errors.IsConflict(err) {
return nil, err
}
latestNamespace, err = kubeClient.Legacy().Namespaces().Get(latestNamespace.Name)
if err != nil {
return nil, err
}
}
return
}
示例12: NewPersistentVolumeRecycler
// PersistentVolumeRecycler creates a new PersistentVolumeRecycler
func NewPersistentVolumeRecycler(kubeClient clientset.Interface, syncPeriod time.Duration, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) {
recyclerClient := NewRecyclerClient(kubeClient)
recycler := &PersistentVolumeRecycler{
client: recyclerClient,
kubeClient: kubeClient,
cloud: cloud,
}
if err := recycler.pluginMgr.InitPlugins(plugins, recycler); err != nil {
return nil, fmt.Errorf("Could not initialize volume plugins for PVClaimBinder: %+v", err)
}
_, volumeController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Legacy().PersistentVolumes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Legacy().PersistentVolumes().Watch(options)
},
},
&api.PersistentVolume{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
pv, ok := obj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Error casting object to PersistentVolume: %v", obj)
return
}
recycler.reclaimVolume(pv)
},
UpdateFunc: func(oldObj, newObj interface{}) {
pv, ok := newObj.(*api.PersistentVolume)
if !ok {
glog.Errorf("Error casting object to PersistentVolume: %v", newObj)
return
}
recycler.reclaimVolume(pv)
},
},
)
recycler.volumeController = volumeController
return recycler, nil
}
示例13: NewLimitRanger
// NewLimitRanger returns an object that enforces limits based on the supplied limit function
func NewLimitRanger(client clientset.Interface, limitFunc LimitFunc) admission.Interface {
lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Legacy().LimitRanges(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Legacy().LimitRanges(api.NamespaceAll).Watch(options)
},
}
indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.LimitRange{}, 0)
reflector.Run()
return &limitRanger{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: client,
limitFunc: limitFunc,
indexer: indexer,
}
}
示例14: NewProvision
// NewProvision creates a new namespace provision admission control handler
func NewProvision(c clientset.Interface) admission.Interface {
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return c.Legacy().Namespaces().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return c.Legacy().Namespaces().Watch(options)
},
},
&api.Namespace{},
store,
0,
)
reflector.Run()
return createProvision(c, store)
}
示例15: SyncAllPodsWithStore
// SyncAllPodsWithStore lists all pods and inserts them into the given store.
// Though this function is written in a generic manner, it is only used by the
// controllers for a specific purpose, to synchronously populate the store
// with the first batch of pods that would otherwise be sent by the Informer.
// Doing this avoids more complicated forms of synchronization with the
// Informer, though it also means that the controller calling this function
// will receive "OnUpdate" events for all the pods in the store, instead of
// "OnAdd". This should be ok, since most controllers are level triggered
// and make decisions based on the contents of the store.
//
// TODO: Extend this logic to load arbitrary local state for the controllers
// instead of just pods.
func SyncAllPodsWithStore(kubeClient clientset.Interface, store cache.Store) {
var allPods *api.PodList
var err error
listOptions := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
for {
if allPods, err = kubeClient.Legacy().Pods(api.NamespaceAll).List(listOptions); err != nil {
glog.Warningf("Retrying pod list: %v", err)
continue
}
break
}
pods := []interface{}{}
for i := range allPods.Items {
p := allPods.Items[i]
glog.V(4).Infof("Initializing store with pod %v/%v", p.Namespace, p.Name)
pods = append(pods, &p)
}
store.Replace(pods, allPods.ResourceVersion)
return
}