本文整理汇总了Golang中k8s/io/kubernetes/pkg/controller/framework.SharedIndexInformer类的典型用法代码示例。如果您正苦于以下问题:Golang SharedIndexInformer类的具体用法?Golang SharedIndexInformer怎么用?Golang SharedIndexInformer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SharedIndexInformer类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewDeploymentConfigController
// NewDeploymentConfigController creates a new DeploymentConfigController.
func NewDeploymentConfigController(dcInformer, rcInformer, podInformer framework.SharedIndexInformer, oc osclient.Interface, kc kclient.Interface, codec runtime.Codec) *DeploymentConfigController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(kc.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"})
c := &DeploymentConfigController{
dn: oc,
rn: kc,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
recorder: recorder,
codec: codec,
}
c.dcStore.Indexer = dcInformer.GetIndexer()
dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addDeploymentConfig,
UpdateFunc: c.updateDeploymentConfig,
DeleteFunc: c.deleteDeploymentConfig,
})
c.rcStore.Indexer = rcInformer.GetIndexer()
rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addReplicationController,
UpdateFunc: c.updateReplicationController,
DeleteFunc: c.deleteReplicationController,
})
c.podStore.Indexer = podInformer.GetIndexer()
c.dcStoreSynced = dcInformer.HasSynced
c.rcStoreSynced = rcInformer.HasSynced
c.podStoreSynced = podInformer.HasSynced
return c
}
示例2: NewJobController
func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
jm := &JobController{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
},
expectations: controller.NewControllerExpectations(),
queue: workqueue.New(),
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
}
jm.jobStore.Store, jm.jobController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return jm.kubeClient.Batch().Jobs(api.NamespaceAll).Watch(options)
},
},
&batch.Job{},
// TODO: Can we have much longer period here?
replicationcontroller.FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: jm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
if job := cur.(*batch.Job); !IsJobFinished(job) {
jm.enqueueController(job)
}
},
DeleteFunc: jm.enqueueController,
},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: jm.addPod,
UpdateFunc: jm.updatePod,
DeleteFunc: jm.deletePod,
})
jm.podStore.Indexer = podInformer.GetIndexer()
jm.podStoreSynced = podInformer.HasSynced
jm.updateHandler = jm.updateJobStatus
jm.syncHandler = jm.syncJob
return jm
}
示例3: waitToObservePods
// wait for the podInformer to observe the pods. Call this function before
// running the RC manager to prevent the rc manager from creating new pods
// rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer controllerframwork.SharedIndexInformer, podNum int) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
if len(objects) == podNum {
return true, nil
} else {
return false, nil
}
}); err != nil {
t.Fatal(err)
}
}
示例4: NewPetSetController
// NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"})
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
psc := &PetSetController{
kubeClient: kubeClient,
blockingPetStore: newUnHealthyPetTracker(pc),
newSyncer: func(blockingPet *pcb) *petSyncer {
return &petSyncer{pc, blockingPet}
},
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
}
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
// lookup the petset and enqueue
AddFunc: psc.addPod,
// lookup current and old petset if labels changed
UpdateFunc: psc.updatePod,
// lookup petset accounting for deletion tombstones
DeleteFunc: psc.deletePod,
})
psc.podStore.Indexer = podInformer.GetIndexer()
psc.podController = podInformer.GetController()
psc.psStore.Store, psc.psController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options)
},
},
&apps.PetSet{},
petSetResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: psc.enqueuePetSet,
UpdateFunc: func(old, cur interface{}) {
oldPS := old.(*apps.PetSet)
curPS := cur.(*apps.PetSet)
if oldPS.Status.Replicas != curPS.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
}
psc.enqueuePetSet(cur)
},
DeleteFunc: psc.enqueuePetSet,
},
)
// TODO: Watch volumes
psc.podStoreSynced = psc.podController.HasSynced
psc.syncHandler = psc.Sync
return psc
}
示例5: newReplicationManager
// newReplicationManager configures a replication manager with the specified event recorder
func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
rm := &ReplicationManager{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventRecorder,
},
burstReplicas: burstReplicas,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
queue: workqueue.New(),
garbageCollectorEnabled: garbageCollectorEnabled,
}
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
},
},
&api.ReplicationController{},
// TODO: Can we have much longer period here?
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController,
UpdateFunc: rm.updateRC,
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
// way of achieving this is by performing a `stop` operation on the controller.
DeleteFunc: rm.enqueueController,
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: rm.addPod,
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
// the most frequent pod update is status, and the associated rc will only list from local storage, so
// it should be ok.
UpdateFunc: rm.updatePod,
DeleteFunc: rm.deletePod,
})
rm.podStore.Indexer = podInformer.GetIndexer()
rm.podController = podInformer.GetController()
rm.syncHandler = rm.syncReplicationController
rm.podStoreSynced = rm.podController.HasSynced
rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
return rm
}
示例6: NewImageChangeController
// NewImageChangeController returns a new ImageChangeController.
func NewImageChangeController(dcInformer, streamInformer framework.SharedIndexInformer, oc osclient.Interface) *ImageChangeController {
c := &ImageChangeController{
dn: oc,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
}
c.streamLister.Indexer = streamInformer.GetIndexer()
streamInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addImageStream,
UpdateFunc: c.updateImageStream,
})
c.streamStoreSynced = streamInformer.HasSynced
c.dcLister.Indexer = dcInformer.GetIndexer()
dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addDeploymentConfig,
UpdateFunc: c.updateDeploymentConfig,
})
c.dcStoreSynced = dcInformer.HasSynced
return c
}
示例7: NewDeploymentTriggerController
// NewDeploymentTriggerController returns a new DeploymentTriggerController.
func NewDeploymentTriggerController(dcInformer, rcInformer, streamInformer framework.SharedIndexInformer, oc osclient.Interface, codec runtime.Codec) *DeploymentTriggerController {
c := &DeploymentTriggerController{
dn: oc,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
codec: codec,
}
dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addDeploymentConfig,
UpdateFunc: c.updateDeploymentConfig,
})
streamInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addImageStream,
UpdateFunc: c.updateImageStream,
})
c.dcLister.Indexer = dcInformer.GetIndexer()
c.rcLister.Indexer = rcInformer.GetIndexer()
c.dcListerSynced = dcInformer.HasSynced
c.rcListerSynced = rcInformer.HasSynced
return c
}
示例8: NewEndpointController
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
}
e := &EndpointController{
client: client,
queue: workqueue.New(),
}
e.serviceStore.Store, e.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Services(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return e.client.Core().Services(api.NamespaceAll).Watch(options)
},
},
&api.Service{},
// TODO: Can we have much longer period here?
FullServiceResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur)
},
DeleteFunc: e.enqueueService,
},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: e.addPod,
UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod,
})
e.podStore.Indexer = podInformer.GetIndexer()
e.podController = podInformer.GetController()
e.podStoreSynced = podInformer.HasSynced
return e
}
示例9: NewDaemonSetsController
func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
dsc := &DaemonSetsController{
kubeClient: kubeClient,
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}),
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}),
},
burstReplicas: BurstReplicas,
expectations: controller.NewControllerExpectations(),
queue: workqueue.New(),
}
// Manage addition/update of daemon sets.
dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).Watch(options)
},
},
&extensions.DaemonSet{},
// TODO: Can we have much longer period here?
FullDaemonSetResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ds := obj.(*extensions.DaemonSet)
glog.V(4).Infof("Adding daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
},
UpdateFunc: func(old, cur interface{}) {
oldDS := old.(*extensions.DaemonSet)
curDS := cur.(*extensions.DaemonSet)
// We should invalidate the whole lookup cache if a DS's selector has been updated.
//
// Imagine that you have two RSs:
// * old DS1
// * new DS2
// You also have a pod that is attached to DS2 (because it doesn't match DS1 selector).
// Now imagine that you are changing DS1 selector so that it is now matching that pod,
// in such case we must invalidate the whole cache so that pod could be adopted by DS1
//
// This makes the lookup cache less helpful, but selector update does not happen often,
// so it's not a big problem
if !reflect.DeepEqual(oldDS.Spec.Selector, curDS.Spec.Selector) {
dsc.lookupCache.InvalidateAll()
}
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
dsc.enqueueDaemonSet(curDS)
},
DeleteFunc: dsc.deleteDaemonset,
},
)
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: dsc.addPod,
UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod,
})
dsc.podStore.Indexer = podInformer.GetIndexer()
dsc.podController = podInformer.GetController()
dsc.podStoreSynced = podInformer.HasSynced
// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dsc.kubeClient.Core().Nodes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dsc.kubeClient.Core().Nodes().Watch(options)
},
},
&api.Node{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{
AddFunc: dsc.addNode,
UpdateFunc: dsc.updateNode,
},
)
dsc.syncHandler = dsc.syncDaemonSet
dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
return dsc
}
示例10: NewNodeController
// NewNodeController returns a new node controller to sync instances from cloudprovider.
// This method returns an error if it is unable to initialize the CIDR bitmap with
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
// currently, this should be handled as a fatal error.
func NewNodeController(
podInformer framework.SharedIndexInformer,
cloud cloudprovider.Interface,
kubeClient clientset.Interface,
podEvictionTimeout time.Duration,
evictionLimiterQPS float32,
secondaryEvictionLimiterQPS float32,
largeClusterThreshold int32,
unhealthyZoneThreshold float32,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterCIDR *net.IPNet,
serviceCIDR *net.IPNet,
nodeCIDRMaskSize int,
allocateNodeCIDRs bool) (*NodeController, error) {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
if allocateNodeCIDRs {
if clusterCIDR == nil {
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
}
mask := clusterCIDR.Mask
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
glog.Fatal("NodeController: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
}
}
nc := &NodeController{
cloud: cloud,
knownNodeSet: make(map[string]*api.Node),
kubeClient: kubeClient,
recorder: recorder,
podEvictionTimeout: podEvictionTimeout,
maximumGracePeriod: 5 * time.Minute,
zonePodEvictor: make(map[string]*RateLimitedTimedQueue),
zoneTerminationEvictor: make(map[string]*RateLimitedTimedQueue),
nodeStatusMap: make(map[string]nodeStatusData),
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod,
lookupIP: net.LookupIP,
now: unversioned.Now,
clusterCIDR: clusterCIDR,
serviceCIDR: serviceCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
evictionLimiterQPS: evictionLimiterQPS,
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
largeClusterThreshold: largeClusterThreshold,
unhealthyZoneThreshold: unhealthyZoneThreshold,
zoneStates: make(map[string]zoneState),
evictions10Minutes: newEvictionData(10 * time.Minute),
evictions1Hour: newEvictionData(time.Hour),
}
nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
nc.computeZoneStateFunc = nc.ComputeZoneState
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: nc.maybeDeleteTerminatingPod,
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
})
nc.podStore.Indexer = podInformer.GetIndexer()
nc.podController = podInformer.GetController()
nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{}
if nc.allocateNodeCIDRs {
nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
node := obj.(*api.Node)
err := nc.cidrAllocator.AllocateOrOccupyCIDR(node)
if err != nil {
glog.Errorf("Error allocating CIDR: %v", err)
}
},
UpdateFunc: func(_, obj interface{}) {
node := obj.(*api.Node)
// If the PodCIDR is not empty we either:
// - already processed a Node that already had a CIDR after NC restarted
// (cidr is marked as used),
// - already processed a Node successfully and allocated a CIDR for it
// (cidr is marked as used),
//.........这里部分代码省略.........
示例11: newReplicationManagerInternal
// newReplicationManagerInternal configures a replication manager with the specified event recorder
func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
rm := &ReplicationManager{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventRecorder,
},
burstReplicas: burstReplicas,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
queue: workqueue.New(),
}
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
},
},
&api.ReplicationController{},
// TODO: Can we have much longer period here?
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
oldRC := old.(*api.ReplicationController)
curRC := cur.(*api.ReplicationController)
// We should invalidate the whole lookup cache if a RC's selector has been updated.
//
// Imagine that you have two RCs:
// * old RC1
// * new RC2
// You also have a pod that is attached to RC2 (because it doesn't match RC1 selector).
// Now imagine that you are changing RC1 selector so that it is now matching that pod,
// in such case, we must invalidate the whole cache so that pod could be adopted by RC1
//
// This makes the lookup cache less helpful, but selector update does not happen often,
// so it's not a big problem
if !reflect.DeepEqual(oldRC.Spec.Selector, curRC.Spec.Selector) {
rm.lookupCache.InvalidateAll()
}
// You might imagine that we only really need to enqueue the
// controller when Spec changes, but it is safer to sync any
// time this function is triggered. That way a full informer
// resync can requeue any controllers that don't yet have pods
// but whose last attempts at creating a pod have failed (since
// we don't block on creation of pods) instead of those
// controllers stalling indefinitely. Enqueueing every time
// does result in some spurious syncs (like when Status.Replica
// is updated and the watch notification from it retriggers
// this function), but in general extra resyncs shouldn't be
// that bad as rcs that haven't met expectations yet won't
// sync, and all the listing is done using local stores.
if oldRC.Status.Replicas != curRC.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
}
rm.enqueueController(cur)
},
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
// way of achieving this is by performing a `stop` operation on the controller.
DeleteFunc: rm.enqueueController,
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: rm.addPod,
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
// the most frequent pod update is status, and the associated rc will only list from local storage, so
// it should be ok.
UpdateFunc: rm.updatePod,
DeleteFunc: rm.deletePod,
})
rm.podStore.Indexer = podInformer.GetIndexer()
rm.podController = podInformer.GetController()
rm.syncHandler = rm.syncReplicationController
rm.podStoreSynced = rm.podController.HasSynced
rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
return rm
}
示例12: NewPersistentVolumeController
// NewPersistentVolumeController creates a new PersistentVolumeController
func NewPersistentVolumeController(
kubeClient clientset.Interface,
syncPeriod time.Duration,
alphaProvisioner vol.ProvisionableVolumePlugin,
volumePlugins []vol.VolumePlugin,
cloud cloudprovider.Interface,
clusterName string,
pvInformer framework.SharedIndexInformer,
claimSource, classSource cache.ListerWatcher,
eventRecorder record.EventRecorder,
enableDynamicProvisioning bool,
) *PersistentVolumeController {
if eventRecorder == nil {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
eventRecorder = broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"})
}
controller := &PersistentVolumeController{
volumes: newPersistentVolumeOrderedIndex(),
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: kubeClient,
eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
cloud: cloud,
enableDynamicProvisioning: enableDynamicProvisioning,
clusterName: clusterName,
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
createProvisionedPVInterval: createProvisionedPVInterval,
alphaProvisioner: alphaProvisioner,
}
controller.volumePluginMgr.InitPlugins(volumePlugins, controller)
if controller.alphaProvisioner != nil {
if err := controller.alphaProvisioner.Init(controller); err != nil {
glog.Errorf("PersistentVolumeController: error initializing alpha provisioner plugin: %v", err)
}
}
controller.pvInformer = pvInformer
controller.isInformerInternal = false
if claimSource == nil {
claimSource = &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
},
}
}
controller.claimSource = claimSource
if classSource == nil {
classSource = &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Storage().StorageClasses().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Storage().StorageClasses().Watch(options)
},
}
}
controller.classSource = classSource
controller.volumeController = pvInformer.GetController()
_, controller.claimController = framework.NewInformer(
claimSource,
&api.PersistentVolumeClaim{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: controller.addClaim,
UpdateFunc: controller.updateClaim,
DeleteFunc: controller.deleteClaim,
},
)
// This is just a cache of StorageClass instances, no special actions are
// needed when a class is created/deleted/updated.
controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
controller.classReflector = cache.NewReflector(
classSource,
&storage.StorageClass{},
controller.classes,
syncPeriod,
)
return controller
}
示例13: NewDisruptionController
func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
dc := &DisruptionController{
kubeClient: kubeClient,
podController: podInformer.GetController(),
queue: workqueue.New(),
broadcaster: record.NewBroadcaster(),
}
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
dc.getUpdater = func() updater { return dc.writePdbStatus }
dc.podLister.Indexer = podInformer.GetIndexer()
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: dc.addPod,
UpdateFunc: dc.updatePod,
DeleteFunc: dc.deletePod,
})
dc.pdbStore, dc.pdbController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).Watch(options)
},
},
&policy.PodDisruptionBudget{},
30*time.Second,
framework.ResourceEventHandlerFuncs{
AddFunc: dc.addDb,
UpdateFunc: dc.updateDb,
DeleteFunc: dc.removeDb,
},
)
dc.pdbLister.Store = dc.pdbStore
dc.rcIndexer, dc.rcController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(options)
},
},
&api.ReplicationController{},
30*time.Second,
framework.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
dc.rcLister.Indexer = dc.rcIndexer
dc.rsStore, dc.rsController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options)
},
},
&extensions.ReplicaSet{},
30*time.Second,
framework.ResourceEventHandlerFuncs{},
)
dc.rsLister.Store = dc.rsStore
dc.dIndexer, dc.dController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).Watch(options)
},
},
&extensions.Deployment{},
30*time.Second,
framework.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
dc.dLister.Indexer = dc.dIndexer
return dc
}