本文整理汇总了Golang中k8s/io/kubernetes/pkg/controller/framework.NewInformer函数的典型用法代码示例。如果您正苦于以下问题:Golang NewInformer函数的具体用法?Golang NewInformer怎么用?Golang NewInformer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewInformer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: New
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
gcc := &GCController{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "pod-garbage-collector"}),
KubeClient: kubeClient,
},
threshold: threshold,
}
terminatedSelector := compileTerminatedPodSelector()
gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return gcc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), terminatedSelector)
},
WatchFunc: func(rv string) (watch.Interface, error) {
return gcc.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), terminatedSelector, rv)
},
},
&api.Pod{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{},
)
return gcc
}
示例2: newIPVSController
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, password string) *ipvsControllerController {
ipvsc := ipvsControllerController{
client: kubeClient,
queue: workqueue.New(),
reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
reloadLock: &sync.Mutex{},
}
clusterNodes := getClusterNodesIP(kubeClient)
nodeInfo, err := getNodeInfo(clusterNodes)
if err != nil {
glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
}
neighbors := getNodeNeighbors(nodeInfo, clusterNodes)
ipvsc.keepalived = &keepalived{
iface: nodeInfo.iface,
ip: nodeInfo.ip,
netmask: nodeInfo.netmask,
nodes: clusterNodes,
neighbors: neighbors,
priority: getNodePriority(nodeInfo.ip, clusterNodes),
useUnicast: useUnicast,
password: password,
}
enqueue := func(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
return
}
ipvsc.queue.Add(key)
}
eventHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: enqueue,
DeleteFunc: enqueue,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
enqueue(cur)
}
},
}
ipvsc.svcLister.Store, ipvsc.svcController = framework.NewInformer(
cache.NewListWatchFromClient(
ipvsc.client, "services", namespace, fields.Everything()),
&api.Service{}, resyncPeriod, eventHandlers)
ipvsc.epLister.Store, ipvsc.epController = framework.NewInformer(
cache.NewListWatchFromClient(
ipvsc.client, "endpoints", namespace, fields.Everything()),
&api.Endpoints{}, resyncPeriod, eventHandlers)
return &ipvsc
}
示例3: NewJobController
func NewJobController(kubeClient client.Interface) *JobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
jm := &JobController{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job"}),
},
expectations: controller.NewControllerExpectations(),
queue: workqueue.New(),
}
jm.jobStore.Store, jm.jobController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&experimental.Job{},
replicationcontroller.FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: jm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
if job := cur.(*experimental.Job); !isJobFinished(job) {
jm.enqueueController(job)
}
},
DeleteFunc: jm.enqueueController,
},
)
jm.podStore.Store, jm.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return jm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return jm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.Pod{},
replicationcontroller.PodRelistPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: jm.addPod,
UpdateFunc: jm.updatePod,
DeleteFunc: jm.deletePod,
},
)
jm.updateHandler = jm.updateJobStatus
jm.syncHandler = jm.syncJob
jm.podStoreSynced = jm.podController.HasSynced
return jm
}
示例4: NewDockercfgController
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController {
e := &DockercfgController{
client: cl,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
dockerURLsIntialized: options.DockerURLsIntialized,
}
var serviceAccountCache cache.Store
serviceAccountCache, e.serviceAccountController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.ServiceAccounts(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return e.client.ServiceAccounts(api.NamespaceAll).Watch(options)
},
},
&api.ServiceAccount{},
options.Resync,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
serviceAccount := obj.(*api.ServiceAccount)
glog.V(5).Infof("Adding service account %s", serviceAccount.Name)
e.enqueueServiceAccount(serviceAccount)
},
UpdateFunc: func(old, cur interface{}) {
serviceAccount := cur.(*api.ServiceAccount)
glog.V(5).Infof("Updating service account %s", serviceAccount.Name)
// Resync on service object relist.
e.enqueueServiceAccount(serviceAccount)
},
},
)
e.serviceAccountCache = NewEtcdMutationCache(serviceAccountCache)
tokenSecretSelector := fields.OneTermEqualSelector(api.SecretTypeField, string(api.SecretTypeServiceAccountToken))
e.secretCache, e.secretController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = tokenSecretSelector
return e.client.Secrets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.FieldSelector = tokenSecretSelector
return e.client.Secrets(api.NamespaceAll).Watch(options)
},
},
&api.Secret{},
options.Resync,
framework.ResourceEventHandlerFuncs{
AddFunc: func(cur interface{}) { e.handleTokenSecretUpdate(nil, cur) },
UpdateFunc: func(old, cur interface{}) { e.handleTokenSecretUpdate(old, cur) },
DeleteFunc: e.handleTokenSecretDelete,
},
)
e.syncHandler = e.syncServiceAccount
return e
}
示例5: NewDockerRegistryServiceController
// NewDockerRegistryServiceController returns a new *DockerRegistryServiceController.
func NewDockerRegistryServiceController(cl client.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController {
e := &DockerRegistryServiceController{
client: cl,
dockercfgController: options.DockercfgController,
registryLocationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
secretsToUpdate: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
serviceName: options.RegistryServiceName,
serviceNamespace: options.RegistryNamespace,
dockerURLsIntialized: options.DockerURLsIntialized,
}
e.serviceCache, e.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
return e.client.Services(options.RegistryNamespace).List(opts)
},
WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
return e.client.Services(options.RegistryNamespace).Watch(opts)
},
},
&kapi.Service{},
options.Resync,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e.enqueueRegistryLocationQueue()
},
UpdateFunc: func(old, cur interface{}) {
e.enqueueRegistryLocationQueue()
},
DeleteFunc: func(obj interface{}) {
e.enqueueRegistryLocationQueue()
},
},
)
e.servicesSynced = e.serviceController.HasSynced
e.syncRegistryLocationHandler = e.syncRegistryLocationChange
dockercfgOptions := kapi.ListOptions{FieldSelector: fields.SelectorFromSet(map[string]string{kapi.SecretTypeField: string(kapi.SecretTypeDockercfg)})}
e.secretCache, e.secretController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
return e.client.Secrets(kapi.NamespaceAll).List(dockercfgOptions)
},
WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
return e.client.Secrets(kapi.NamespaceAll).Watch(dockercfgOptions)
},
},
&kapi.Secret{},
options.Resync,
framework.ResourceEventHandlerFuncs{},
)
e.secretsSynced = e.secretController.HasSynced
e.syncSecretHandler = e.syncSecretUpdate
return e
}
示例6: newLoadBalancerController
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *unversioned.Client, namespace string) *loadBalancerController {
lbc := loadBalancerController{
cfg: cfg,
client: kubeClient,
queue: workqueue.New(),
reloadRateLimiter: util.NewTokenBucketRateLimiter(
reloadQPS, int(reloadQPS)),
targetService: *targetService,
forwardServices: *forwardServices,
httpPort: *httpPort,
tcpServices: map[string]int{},
}
for _, service := range strings.Split(*tcpServices, ",") {
portSplit := strings.Split(service, ":")
if len(portSplit) != 2 {
glog.Errorf("Ignoring misconfigured TCP service %v", service)
continue
}
if port, err := strconv.Atoi(portSplit[1]); err != nil {
glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err)
continue
} else {
lbc.tcpServices[portSplit[0]] = port
}
}
enqueue := func(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
return
}
lbc.queue.Add(key)
}
eventHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: enqueue,
DeleteFunc: enqueue,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
enqueue(cur)
}
},
}
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
cache.NewListWatchFromClient(
lbc.client, "services", namespace, fields.Everything()),
&api.Service{}, resyncPeriod, eventHandlers)
lbc.epLister.Store, lbc.epController = framework.NewInformer(
cache.NewListWatchFromClient(
lbc.client, "endpoints", namespace, fields.Everything()),
&api.Endpoints{}, resyncPeriod, eventHandlers)
return &lbc
}
示例7: NewPersistentVolumeProvisionerController
// NewPersistentVolumeProvisionerController creates a new PersistentVolumeProvisionerController
func NewPersistentVolumeProvisionerController(client controllerClient, syncPeriod time.Duration, clusterName string, plugins []volume.VolumePlugin, provisioner volume.ProvisionableVolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeProvisionerController, error) {
controller := &PersistentVolumeProvisionerController{
client: client,
cloud: cloud,
provisioner: provisioner,
clusterName: clusterName,
}
if err := controller.pluginMgr.InitPlugins(plugins, controller); err != nil {
return nil, fmt.Errorf("Could not initialize volume plugins for PersistentVolumeProvisionerController: %+v", err)
}
glog.V(5).Infof("Initializing provisioner: %s", controller.provisioner.Name())
controller.provisioner.Init(controller)
controller.volumeStore, controller.volumeController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.ListPersistentVolumes(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.WatchPersistentVolumes(options)
},
},
&api.PersistentVolume{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: controller.handleAddVolume,
UpdateFunc: controller.handleUpdateVolume,
// delete handler not needed in this controller.
// volume deletion is handled by the recycler controller
},
)
controller.claimStore, controller.claimController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.ListPersistentVolumeClaims(api.NamespaceAll, options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.WatchPersistentVolumeClaims(api.NamespaceAll, options)
},
},
&api.PersistentVolumeClaim{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: controller.handleAddClaim,
UpdateFunc: controller.handleUpdateClaim,
// delete handler not needed.
// normal recycling applies when a claim is deleted.
// recycling is handled by the binding controller.
},
)
return controller, nil
}
示例8: NewPersistentVolumeClaimBinder
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
volumeIndex := NewPersistentVolumeOrderedIndex()
binderClient := NewBinderClient(kubeClient)
binder := &PersistentVolumeClaimBinder{
volumeIndex: volumeIndex,
client: binderClient,
}
_, volumeController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
options := api.ListOptions{ResourceVersion: resourceVersion}
return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), options)
},
},
&api.PersistentVolume{},
// TODO: Can we have much longer period here?
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: binder.addVolume,
UpdateFunc: binder.updateVolume,
DeleteFunc: binder.deleteVolume,
},
)
_, claimController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
options := api.ListOptions{ResourceVersion: resourceVersion}
return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
},
},
&api.PersistentVolumeClaim{},
// TODO: Can we have much longer period here?
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: binder.addClaim,
UpdateFunc: binder.updateClaim,
// no DeleteFunc needed. a claim requires no clean-up.
// syncVolume handles the missing claim
},
)
binder.claimController = claimController
binder.volumeController = volumeController
return binder
}
示例9: NewPersistentVolumeClaimBinder
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("pv_claim_binder_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
volumeIndex := NewPersistentVolumeOrderedIndex()
binderClient := NewBinderClient(kubeClient)
binder := &PersistentVolumeClaimBinder{
volumeIndex: volumeIndex,
client: binderClient,
}
_, volumeController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumes().Watch(options)
},
},
&api.PersistentVolume{},
// TODO: Can we have much longer period here?
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: binder.addVolume,
UpdateFunc: binder.updateVolume,
DeleteFunc: binder.deleteVolume,
},
)
_, claimController := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
},
},
&api.PersistentVolumeClaim{},
// TODO: Can we have much longer period here?
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: binder.addClaim,
UpdateFunc: binder.updateClaim,
DeleteFunc: binder.deleteClaim,
},
)
binder.claimController = claimController
binder.volumeController = volumeController
return binder
}
示例10: newLoadBalancerController
// newLoadBalancerController creates a controller for nginx loadbalancer
func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc,
namespace, nxgConfigMapName, tcpConfigMapName, udpConfigMapName string, lbRuntimeInfo *lbInfo) (*loadBalancerController, error) {
lbc := loadBalancerController{
client: kubeClient,
stopCh: make(chan struct{}),
lbInfo: lbRuntimeInfo,
nginx: nginx.NewManager(kubeClient),
nxgConfigMap: nxgConfigMapName,
tcpConfigMap: tcpConfigMapName,
udpConfigMap: udpConfigMapName,
defaultSvc: defaultSvc,
}
lbc.syncQueue = NewTaskQueue(lbc.sync)
eventHandler := framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
lbc.syncQueue.enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
lbc.syncQueue.enqueue(obj)
},
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
lbc.syncQueue.enqueue(cur)
}
},
}
lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
&cache.ListWatch{
ListFunc: ingressListFunc(lbc.client, namespace),
WatchFunc: ingressWatchFunc(lbc.client, namespace),
},
&extensions.Ingress{}, resyncPeriod, eventHandler)
lbc.endpLister.Store, lbc.endpController = framework.NewInformer(
&cache.ListWatch{
ListFunc: endpointsListFunc(lbc.client, namespace),
WatchFunc: endpointsWatchFunc(lbc.client, namespace),
},
&api.Endpoints{}, resyncPeriod, eventHandler)
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
&cache.ListWatch{
ListFunc: serviceListFunc(lbc.client, namespace),
WatchFunc: serviceWatchFunc(lbc.client, namespace),
},
&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})
return &lbc, nil
}
示例11: newIPVSController
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, configMapName string) *ipvsControllerController {
ipvsc := ipvsControllerController{
client: kubeClient,
reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
ruCfg: []vip{},
configMapName: configMapName,
}
clusterNodes := getClusterNodesIP(kubeClient)
nodeInfo, err := getNodeInfo(clusterNodes)
if err != nil {
glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
}
neighbors := getNodeNeighbors(nodeInfo, clusterNodes)
execer := exec.New()
dbus := utildbus.New()
iptInterface := utiliptables.New(execer, dbus, utiliptables.ProtocolIpv4)
ipvsc.keepalived = &keepalived{
iface: nodeInfo.iface,
ip: nodeInfo.ip,
netmask: nodeInfo.netmask,
nodes: clusterNodes,
neighbors: neighbors,
priority: getNodePriority(nodeInfo.ip, clusterNodes),
useUnicast: useUnicast,
ipt: iptInterface,
}
err = ipvsc.keepalived.loadTemplate()
if err != nil {
glog.Fatalf("Error loading keepalived template: %v", err)
}
eventHandlers := framework.ResourceEventHandlerFuncs{}
ipvsc.svcLister.Store, ipvsc.svcController = framework.NewInformer(
cache.NewListWatchFromClient(
ipvsc.client, "services", namespace, fields.Everything()),
&api.Service{}, resyncPeriod, eventHandlers)
ipvsc.epLister.Store, ipvsc.epController = framework.NewInformer(
cache.NewListWatchFromClient(
ipvsc.client, "endpoints", namespace, fields.Everything()),
&api.Endpoints{}, resyncPeriod, eventHandlers)
return &ipvsc
}
示例12: NewConfigFactory
// Initializes the factory.
func NewConfigFactory(client *client.Client, schedulerName string) *ConfigFactory {
stopEverything := make(chan struct{})
schedulerCache := schedulercache.New(30*time.Second, stopEverything)
c := &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
ScheduledPodLister: &cache.StoreToPodLister{},
// Only nodes in the "Ready" condition with status == "True" are schedulable
NodeLister: &cache.StoreToNodeLister{},
PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
schedulerCache: schedulerCache,
StopEverything: stopEverything,
SchedulerName: schedulerName,
}
c.PodLister = schedulerCache
// On add/delete to the scheduled pods, remove from the assumed pods.
// We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that
// they may need to call.
c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
c.createAssignedNonTerminatedPodLW(),
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: c.addPodToCache,
UpdateFunc: c.updatePodInCache,
DeleteFunc: c.deletePodFromCache,
},
)
c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
c.createNodeLW(),
&api.Node{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: c.addNodeToCache,
UpdateFunc: c.updateNodeInCache,
DeleteFunc: c.deleteNodeFromCache,
},
)
return c
}
示例13: NewServiceServingCertUpdateController
// NewServiceServingCertUpdateController creates a new ServiceServingCertUpdateController.
// TODO this should accept a shared informer
func NewServiceServingCertUpdateController(serviceClient kcoreclient.ServicesGetter, secretClient kcoreclient.SecretsGetter, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertUpdateController {
sc := &ServiceServingCertUpdateController{
secretClient: secretClient,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
ca: ca,
dnsSuffix: dnsSuffix,
// TODO base the expiry time on a percentage of the time for the lifespan of the cert
minTimeLeftForCert: 1 * time.Hour,
}
sc.serviceCache, sc.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return serviceClient.Services(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return serviceClient.Services(kapi.NamespaceAll).Watch(options)
},
},
&kapi.Service{},
resyncInterval,
framework.ResourceEventHandlerFuncs{},
)
sc.serviceHasSynced = sc.serviceController.HasSynced
sc.secretCache, sc.secretController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return sc.secretClient.Secrets(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return sc.secretClient.Secrets(kapi.NamespaceAll).Watch(options)
},
},
&kapi.Secret{},
resyncInterval,
framework.ResourceEventHandlerFuncs{
AddFunc: sc.addSecret,
UpdateFunc: sc.updateSecret,
},
)
sc.secretHasSynced = sc.secretController.HasSynced
sc.syncHandler = sc.syncSecret
return sc
}
示例14: NewEndpointController
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(client *client.Client, resyncPeriod controller.ResyncPeriodFunc) *EndpointController {
e := &EndpointController{
client: client,
queue: workqueue.New(),
}
e.serviceStore.Store, e.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return e.client.Services(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
options := api.ListOptions{ResourceVersion: rv}
return e.client.Services(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
},
},
&api.Service{},
// TODO: Can we have much longer period here?
FullServiceResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur)
},
DeleteFunc: e.enqueueService,
},
)
e.podStore.Store, e.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return e.client.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
options := api.ListOptions{ResourceVersion: rv}
return e.client.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
},
},
&api.Pod{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{
AddFunc: e.addPod,
UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod,
},
)
return e
}
示例15: newLoadBalancerController
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(c *client.Client, namespace string,
domain string, nodes []string) *loadBalancerController {
mgr := &haproxy.HAProxyManager{
Exec: exec.New(),
ConfigFile: "haproxy.cfg",
DomainName: domain,
}
lbc := loadBalancerController{
client: c,
queue: workqueue.New(),
reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
haproxy: mgr,
domain: domain,
clusterNodes: nodes,
}
enqueue := func(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
return
}
lbc.queue.Add(key)
}
eventHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: enqueue,
DeleteFunc: enqueue,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
enqueue(cur)
}
},
}
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
cache.NewListWatchFromClient(
lbc.client, "services", namespace, fields.Everything()),
&api.Service{}, resyncPeriod, eventHandlers)
lbc.epLister.Store, lbc.epController = framework.NewInformer(
cache.NewListWatchFromClient(
lbc.client, "endpoints", namespace, fields.Everything()),
&api.Endpoints{}, resyncPeriod, eventHandlers)
return &lbc
}