当前位置: 首页>>代码示例>>Golang>>正文


Golang SharedInformer.AddEventHandler方法代码示例

本文整理汇总了Golang中k8s/io/kubernetes/pkg/controller/framework.SharedInformer.AddEventHandler方法的典型用法代码示例。如果您正苦于以下问题:Golang SharedInformer.AddEventHandler方法的具体用法?Golang SharedInformer.AddEventHandler怎么用?Golang SharedInformer.AddEventHandler使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在k8s/io/kubernetes/pkg/controller/framework.SharedInformer的用法示例。


在下文中一共展示了SharedInformer.AddEventHandler方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: NewAttachDetachController

// NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController(
	kubeClient internalclientset.Interface,
	podInformer framework.SharedInformer,
	nodeInformer framework.SharedInformer,
	pvcInformer framework.SharedInformer,
	pvInformer framework.SharedInformer,
	cloud cloudprovider.Interface,
	plugins []volume.VolumePlugin) (AttachDetachController, error) {
	// TODO: The default resyncPeriod for shared informers is 12 hours, this is
	// unacceptable for the attach/detach controller. For example, if a pod is
	// skipped because the node it is scheduled to didn't set its annotation in
	// time, we don't want to have to wait 12hrs before processing the pod
	// again.
	// Luckily https://github.com/kubernetes/kubernetes/issues/23394 is being
	// worked on and will split resync in to resync and relist. Once that
	// happens the resync period can be set to something much faster (30
	// seconds).
	// If that issue is not resolved in time, then this controller will have to
	// consider some unappealing alternate options: use a non-shared informer
	// and set a faster resync period even if it causes relist, or requeue
	// dropped pods so they are continuously processed until it is accepted or
	// deleted (probably can't do this with sharedInformer), etc.
	adc := &attachDetachController{
		kubeClient:  kubeClient,
		pvcInformer: pvcInformer,
		pvInformer:  pvInformer,
		cloud:       cloud,
	}

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    adc.podAdd,
		UpdateFunc: adc.podUpdate,
		DeleteFunc: adc.podDelete,
	})

	nodeInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    adc.nodeAdd,
		UpdateFunc: adc.nodeUpdate,
		DeleteFunc: adc.nodeDelete,
	})

	if err := adc.volumePluginMgr.InitPlugins(plugins, adc); err != nil {
		return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
	}

	adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
	adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
	adc.attacherDetacher =
		operationexecutor.NewOperationExecutor(&adc.volumePluginMgr)
	adc.reconciler = reconciler.NewReconciler(
		reconcilerLoopPeriod,
		reconcilerMaxWaitForUnmountDuration,
		adc.desiredStateOfWorld,
		adc.actualStateOfWorld,
		adc.attacherDetacher)

	return adc, nil
}
开发者ID:CsatariGergely,项目名称:kubernetes,代码行数:59,代码来源:attach_detach_controller.go

示例2: NewJobController

func NewJobController(podInformer framework.SharedInformer, kubeClient clientset.Interface) *JobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	jm := &JobController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
		},
		expectations: controller.NewControllerExpectations(),
		queue:        workqueue.New(),
		recorder:     eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
	}

	jm.jobStore.Store, jm.jobController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return jm.kubeClient.Batch().Jobs(api.NamespaceAll).Watch(options)
			},
		},
		&batch.Job{},
		// TODO: Can we have much longer period here?
		replicationcontroller.FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: jm.enqueueController,
			UpdateFunc: func(old, cur interface{}) {
				if job := cur.(*batch.Job); !isJobFinished(job) {
					jm.enqueueController(job)
				}
			},
			DeleteFunc: jm.enqueueController,
		},
	)

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    jm.addPod,
		UpdateFunc: jm.updatePod,
		DeleteFunc: jm.deletePod,
	})
	jm.podStore.Store = podInformer.GetStore()
	jm.podStoreSynced = podInformer.HasSynced

	jm.updateHandler = jm.updateJobStatus
	jm.syncHandler = jm.syncJob
	return jm
}
开发者ID:freedev,项目名称:kubernetes,代码行数:56,代码来源:controller.go

示例3: NewEndpointController

// NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer framework.SharedInformer, client *clientset.Clientset) *EndpointController {
	if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
	}
	e := &EndpointController{
		client: client,
		queue:  workqueue.New(),
	}

	e.serviceStore.Store, e.serviceController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.Core().Services(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Core().Services(api.NamespaceAll).Watch(options)
			},
		},
		&api.Service{},
		// TODO: Can we have much longer period here?
		FullServiceResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: e.enqueueService,
			UpdateFunc: func(old, cur interface{}) {
				e.enqueueService(cur)
			},
			DeleteFunc: e.enqueueService,
		},
	)

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    e.addPod,
		UpdateFunc: e.updatePod,
		DeleteFunc: e.deletePod,
	})
	e.podStore.Store = podInformer.GetStore()
	e.podController = podInformer.GetController()
	e.podStoreSynced = podInformer.HasSynced

	return e
}
开发者ID:freedev,项目名称:kubernetes,代码行数:42,代码来源:endpoints_controller.go

示例4: NewAttachDetachController

// NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController(
	kubeClient internalclientset.Interface,
	podInformer framework.SharedInformer,
	nodeInformer framework.SharedInformer,
	resyncPeriod time.Duration) AttachDetachController {
	selfCreatedPodInformer := false
	selfCreatedNodeInformer := false
	if podInformer == nil {
		podInformer = informers.CreateSharedPodInformer(kubeClient, resyncPeriod)
		selfCreatedPodInformer = true
	}
	if nodeInformer == nil {
		nodeInformer = informers.CreateSharedNodeIndexInformer(kubeClient, resyncPeriod)
		selfCreatedNodeInformer = true
	}

	adc := &attachDetachController{
		internalPodInformer:     podInformer,
		selfCreatedPodInformer:  selfCreatedPodInformer,
		internalNodeInformer:    nodeInformer,
		selfCreatedNodeInformer: selfCreatedNodeInformer,
	}

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    adc.podAdd,
		UpdateFunc: adc.podUpdate,
		DeleteFunc: adc.podDelete,
	})

	nodeInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    adc.nodeAdd,
		UpdateFunc: adc.nodeUpdate,
		DeleteFunc: adc.nodeDelete,
	})

	return adc
}
开发者ID:odacremolbap,项目名称:kubernetes,代码行数:38,代码来源:attach_detach_controller.go

示例5: NewReplicationManager

func NewReplicationManager(podInformer framework.SharedInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	rm := &ReplicationManager{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
		},
		burstReplicas: burstReplicas,
		expectations:  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
		queue:         workqueue.New(),
	}

	rm.rcStore.Store, rm.rcController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
			},
		},
		&api.ReplicationController{},
		// TODO: Can we have much longer period here?
		FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: rm.enqueueController,
			UpdateFunc: func(old, cur interface{}) {
				oldRC := old.(*api.ReplicationController)
				curRC := cur.(*api.ReplicationController)

				// We should invalidate the whole lookup cache if a RC's selector has been updated.
				//
				// Imagine that you have two RCs:
				// * old RC1
				// * new RC2
				// You also have a pod that is attached to RC2 (because it doesn't match RC1 selector).
				// Now imagine that you are changing RC1 selector so that it is now matching that pod,
				// in such case, we must invalidate the whole cache so that pod could be adopted by RC1
				//
				// This makes the lookup cache less helpful, but selector update does not happen often,
				// so it's not a big problem
				if !reflect.DeepEqual(oldRC.Spec.Selector, curRC.Spec.Selector) {
					rm.lookupCache.InvalidateAll()
				}

				// You might imagine that we only really need to enqueue the
				// controller when Spec changes, but it is safer to sync any
				// time this function is triggered. That way a full informer
				// resync can requeue any controllers that don't yet have pods
				// but whose last attempts at creating a pod have failed (since
				// we don't block on creation of pods) instead of those
				// controllers stalling indefinitely. Enqueueing every time
				// does result in some spurious syncs (like when Status.Replica
				// is updated and the watch notification from it retriggers
				// this function), but in general extra resyncs shouldn't be
				// that bad as rcs that haven't met expectations yet won't
				// sync, and all the listing is done using local stores.
				if oldRC.Status.Replicas != curRC.Status.Replicas {
					glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
				}
				rm.enqueueController(cur)
			},
			// This will enter the sync loop and no-op, because the controller has been deleted from the store.
			// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
			// way of achieving this is by performing a `stop` operation on the controller.
			DeleteFunc: rm.enqueueController,
		},
	)

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc: rm.addPod,
		// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
		// the most frequent pod update is status, and the associated rc will only list from local storage, so
		// it should be ok.
		UpdateFunc: rm.updatePod,
		DeleteFunc: rm.deletePod,
	})
	rm.podStore.Store = podInformer.GetStore()
	rm.podController = podInformer.GetController()

	rm.syncHandler = rm.syncReplicationController
	rm.podStoreSynced = rm.podController.HasSynced
	rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
	return rm

}
开发者ID:Clarifai,项目名称:kubernetes,代码行数:94,代码来源:replication_controller.go

示例6: NewDaemonSetsController

func NewDaemonSetsController(podInformer framework.SharedInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	dsc := &DaemonSetsController{
		kubeClient:    kubeClient,
		eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}),
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}),
		},
		burstReplicas: BurstReplicas,
		expectations:  controller.NewControllerExpectations(),
		queue:         workqueue.New(),
	}
	// Manage addition/update of daemon sets.
	dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).Watch(options)
			},
		},
		&extensions.DaemonSet{},
		// TODO: Can we have much longer period here?
		FullDaemonSetResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				ds := obj.(*extensions.DaemonSet)
				glog.V(4).Infof("Adding daemon set %s", ds.Name)
				dsc.enqueueDaemonSet(ds)
			},
			UpdateFunc: func(old, cur interface{}) {
				oldDS := old.(*extensions.DaemonSet)
				curDS := cur.(*extensions.DaemonSet)
				// We should invalidate the whole lookup cache if a DS's selector has been updated.
				//
				// Imagine that you have two RSs:
				// * old DS1
				// * new DS2
				// You also have a pod that is attached to DS2 (because it doesn't match DS1 selector).
				// Now imagine that you are changing DS1 selector so that it is now matching that pod,
				// in such case we must invalidate the whole cache so that pod could be adopted by DS1
				//
				// This makes the lookup cache less helpful, but selector update does not happen often,
				// so it's not a big problem
				if !reflect.DeepEqual(oldDS.Spec.Selector, curDS.Spec.Selector) {
					dsc.lookupCache.InvalidateAll()
				}

				glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
				dsc.enqueueDaemonSet(curDS)
			},
			DeleteFunc: func(obj interface{}) {
				ds := obj.(*extensions.DaemonSet)
				glog.V(4).Infof("Deleting daemon set %s", ds.Name)
				dsc.enqueueDaemonSet(ds)
			},
		},
	)

	// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
	// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    dsc.addPod,
		UpdateFunc: dsc.updatePod,
		DeleteFunc: dsc.deletePod,
	})
	dsc.podStore.Store = podInformer.GetStore()
	dsc.podController = podInformer.GetController()
	dsc.podStoreSynced = podInformer.HasSynced

	// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
	dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return dsc.kubeClient.Core().Nodes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return dsc.kubeClient.Core().Nodes().Watch(options)
			},
		},
		&api.Node{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{
			AddFunc:    dsc.addNode,
			UpdateFunc: dsc.updateNode,
		},
	)
	dsc.syncHandler = dsc.syncDaemonSet
	dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
	return dsc
}
开发者ID:freedev,项目名称:kubernetes,代码行数:100,代码来源:controller.go


注:本文中的k8s/io/kubernetes/pkg/controller/framework.SharedInformer.AddEventHandler方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。