本文整理汇总了Golang中k8s/io/kubernetes/pkg/controller.NoResyncPeriodFunc函数的典型用法代码示例。如果您正苦于以下问题:Golang NoResyncPeriodFunc函数的具体用法?Golang NoResyncPeriodFunc怎么用?Golang NoResyncPeriodFunc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NoResyncPeriodFunc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector
// issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
empty := metav1.LabelSelector{}
d.Spec.Selector = &empty
controller.dLister.Indexer.Add(d)
// We expect the deployment controller to not take action here since it's configuration
// is invalid, even though no replicasets exist that match it's selector.
controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name))
filteredActions := filterInformerActions(fake.Actions())
if len(filteredActions) == 0 {
return
}
for _, action := range filteredActions {
t.Logf("unexpected action: %#v", action)
}
t.Errorf("expected deployment controller to not take action")
}
示例2: NewclusterController
// NewclusterController returns a new cluster controller
func NewclusterController(federationClient federationclientset.Interface, clusterMonitorPeriod time.Duration) *ClusterController {
cc := &ClusterController{
knownClusterSet: make(sets.String),
federationClient: federationClient,
clusterMonitorPeriod: clusterMonitorPeriod,
clusterClusterStatusMap: make(map[string]federationv1beta1.ClusterStatus),
clusterKubeClientMap: make(map[string]ClusterClient),
}
cc.clusterStore.Store, cc.clusterController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return cc.federationClient.Federation().Clusters().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return cc.federationClient.Federation().Clusters().Watch(options)
},
},
&federationv1beta1.Cluster{},
controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{
DeleteFunc: cc.delFromClusterSet,
AddFunc: cc.addToClusterSet,
},
)
return cc
}
示例3: NewPodGC
func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInformer, terminatedPodThreshold int) *PodGCController {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
gcc := &PodGCController{
kubeClient: kubeClient,
terminatedPodThreshold: terminatedPodThreshold,
deletePod: func(namespace, name string) error {
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
return kubeClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0))
},
}
gcc.podStore.Indexer = podInformer.GetIndexer()
gcc.podController = podInformer.GetController()
gcc.nodeStore.Store, gcc.nodeController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return gcc.kubeClient.Core().Nodes().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return gcc.kubeClient.Core().Nodes().Watch(options)
},
},
&v1.Node{},
controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{},
)
return gcc
}
示例4: NewNodeControllerFromClient
func NewNodeControllerFromClient(
cloud cloudprovider.Interface,
kubeClient clientset.Interface,
podEvictionTimeout time.Duration,
evictionLimiterQPS float32,
secondaryEvictionLimiterQPS float32,
largeClusterThreshold int32,
unhealthyZoneThreshold float32,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterCIDR *net.IPNet,
serviceCIDR *net.IPNet,
nodeCIDRMaskSize int,
allocateNodeCIDRs bool) (*NodeController, error) {
podInformer := informers.NewPodInformer(kubeClient, controller.NoResyncPeriodFunc())
nc, err := NewNodeController(podInformer, cloud, kubeClient, podEvictionTimeout, evictionLimiterQPS, secondaryEvictionLimiterQPS,
largeClusterThreshold, unhealthyZoneThreshold, nodeMonitorGracePeriod, nodeStartupGracePeriod, nodeMonitorPeriod, clusterCIDR,
serviceCIDR, nodeCIDRMaskSize, allocateNodeCIDRs)
if err != nil {
return nil, err
}
nc.internalPodInformer = podInformer
return nc, nil
}
示例5: New
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
rc := &RouteController{
routes: routes,
kubeClient: kubeClient,
clusterName: clusterName,
clusterCIDR: clusterCIDR,
}
rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return rc.kubeClient.Core().Nodes().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return rc.kubeClient.Core().Nodes().Watch(options)
},
},
&v1.Node{},
controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{},
)
return rc
}
示例6: NewFromClient
func NewFromClient(
kubeClient clientset.Interface,
terminatedPodThreshold int,
) *PodGCController {
podInformer := informers.NewPodInformer(kubeClient, controller.NoResyncPeriodFunc())
controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold)
controller.internalPodInformer = podInformer
return controller
}
示例7: TestScaleDownOldReplicaSets
func TestScaleDownOldReplicaSets(t *testing.T) {
tests := []struct {
oldRSSizes []int
d *extensions.Deployment
}{
{
oldRSSizes: []int{3},
d: newDeployment("foo", 3, nil, nil, nil, map[string]string{"foo": "bar"}),
},
}
for i := range tests {
t.Logf("running scenario %d", i)
test := tests[i]
var oldRSs []*extensions.ReplicaSet
var expected []runtime.Object
for n, size := range test.oldRSSizes {
rs := newReplicaSet(test.d, fmt.Sprintf("%s-%d", test.d.Name, n), size)
oldRSs = append(oldRSs, rs)
objCopy, err := api.Scheme.Copy(rs)
if err != nil {
t.Errorf("unexpected error while deep-copying: %v", err)
continue
}
rsCopy := objCopy.(*extensions.ReplicaSet)
zero := int32(0)
rsCopy.Spec.Replicas = &zero
expected = append(expected, rsCopy)
if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*extensions.ReplicaSet).Spec.Replicas) {
t.Errorf("broken test - original and expected RS have the same size")
}
}
kc := fake.NewSimpleClientset(expected...)
informers := informers.NewSharedInformerFactory(kc, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), kc)
c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d)
for j := range oldRSs {
rs := oldRSs[j]
if *rs.Spec.Replicas != 0 {
t.Errorf("rs %q has non-zero replicas", rs.Name)
}
}
}
}
示例8: newTestController
func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController, *controller.FakePodControl, *fake.Clientset) {
clientset := fake.NewSimpleClientset(initialObjects...)
informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
manager.podStoreSynced = alwaysReady
manager.nodeStoreSynced = alwaysReady
manager.dsStoreSynced = alwaysReady
podControl := &controller.FakePodControl{}
manager.podControl = podControl
return manager, podControl, clientset
}
示例9: newTestController
func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
informerFactory.Start(wait.NeverStop)
manager.podStoreSynced = alwaysReady
manager.nodeStoreSynced = alwaysReady
podControl := &controller.FakePodControl{}
manager.podControl = podControl
return manager, podControl
}
示例10: run
func (f *fixture) run(deploymentName string) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
c.dLister.Indexer.Add(d)
}
for _, rs := range f.rsLister {
c.rsLister.Indexer.Add(rs)
}
for _, pod := range f.podLister {
c.podLister.Indexer.Add(pod)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
err := c.syncDeployment(deploymentName)
if err != nil {
f.t.Errorf("error syncing deployment: %v", err)
}
actions := filterInformerActions(f.client.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) {
f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
continue
}
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
}
示例11: newController
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
c.dLister.Indexer.Add(d)
}
for _, rs := range f.rsLister {
c.rsLister.Indexer.Add(rs)
}
for _, pod := range f.podLister {
c.podLister.Indexer.Add(pod)
}
return c, informers
}
示例12: startComponents
func startComponents(firstManifestURL, secondManifestURL string) (string, string) {
// Setup
handler := delegateHandler{}
apiServer := httptest.NewServer(&handler)
cfg := etcd.Config{
Endpoints: []string{"http://127.0.0.1:4001"},
}
etcdClient, err := etcd.New(cfg)
if err != nil {
glog.Fatalf("Error creating etcd client: %v", err)
}
glog.Infof("Creating etcd client pointing to %v", cfg.Endpoints)
keysAPI := etcd.NewKeysAPI(etcdClient)
sleep := 4 * time.Second
ok := false
for i := 0; i < 3; i++ {
keys, err := keysAPI.Get(context.TODO(), "/", nil)
if err != nil {
glog.Warningf("Unable to list root etcd keys: %v", err)
if i < 2 {
time.Sleep(sleep)
sleep = sleep * sleep
}
continue
}
for _, node := range keys.Node.Nodes {
if _, err := keysAPI.Delete(context.TODO(), node.Key, &etcd.DeleteOptions{Recursive: true}); err != nil {
glog.Fatalf("Unable delete key: %v", err)
}
}
ok = true
break
}
if !ok {
glog.Fatalf("Failed to connect to etcd")
}
cl := client.NewOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
cl.ExtensionsClient = client.NewExtensionsOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}})
// Master
host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
if err != nil {
glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Fatalf("Nonnumeric port? %v", err)
}
publicAddress := net.ParseIP(host)
if publicAddress == nil {
glog.Fatalf("No public address for %s", host)
}
// The caller of master.New should guarantee pulicAddress is properly set
hostIP, err := utilnet.ChooseBindAddress(publicAddress)
if err != nil {
glog.Fatalf("Unable to find suitable network address.error='%v' . "+
"Fail to get a valid public address for master.", err)
}
masterConfig := framework.NewMasterConfig()
masterConfig.EnableCoreControllers = true
masterConfig.EnableProfiling = true
masterConfig.ReadWritePort = portNumber
masterConfig.PublicAddress = hostIP
masterConfig.CacheTimeout = 2 * time.Second
masterConfig.EnableWatchCache = watchCache
// Create a master and install handlers into mux.
m, err := master.New(masterConfig)
if err != nil {
glog.Fatalf("Error in bringing up the master: %v", err)
}
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run()
podInformer := informers.CreateSharedPodInformer(clientset, controller.NoResyncPeriodFunc())
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpointcontroller.NewEndpointController(podInformer, clientset).
Run(3, wait.NeverStop)
//.........这里部分代码省略.........
示例13: NewReplicaSetController
// NewclusterController returns a new cluster controller
func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSetController {
frsc := &ReplicaSetController{
fedClient: federationClient,
replicasetDeliverer: fedutil.NewDelayingDeliverer(),
clusterDeliverer: fedutil.NewDelayingDeliverer(),
replicasetWorkQueue: workqueue.New(),
replicaSetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{
Clusters: map[string]fed.ClusterReplicaSetPreferences{
"*": {Weight: 1},
},
}),
}
replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.ReplicaSet{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnAllChanges(
func(obj runtime.Object) { frsc.deliverLocalReplicaSet(obj, allReplicaSetReviewDealy) },
),
)
}
clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *fedv1.Cluster) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
}
frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle)
podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Core().Pods(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return clientset.Core().Pods(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.Pod{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnAllChanges(
func(obj runtime.Object) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
),
)
}
frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{})
frsc.replicaSetStore.Store, frsc.replicaSetController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.ReplicaSet{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnMetaAndSpecChanges(
func(obj runtime.Object) { frsc.deliverFedReplicaSetObj(obj, replicaSetReviewDelay) },
),
)
return frsc
}
示例14: NewNodeController
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
cloud cloudprovider.Interface,
kubeClient clientset.Interface,
podEvictionTimeout time.Duration,
deletionEvictionLimiter flowcontrol.RateLimiter,
terminationEvictionLimiter flowcontrol.RateLimiter,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterCIDR *net.IPNet,
serviceCIDR *net.IPNet,
nodeCIDRMaskSize int,
allocateNodeCIDRs bool) *NodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
if allocateNodeCIDRs {
if clusterCIDR == nil {
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
}
mask := clusterCIDR.Mask
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
glog.Fatal("NodeController: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
}
}
evictorLock := sync.Mutex{}
nc := &NodeController{
cloud: cloud,
knownNodeSet: make(sets.String),
kubeClient: kubeClient,
recorder: recorder,
podEvictionTimeout: podEvictionTimeout,
maximumGracePeriod: 5 * time.Minute,
evictorLock: &evictorLock,
podEvictor: NewRateLimitedTimedQueue(deletionEvictionLimiter),
terminationEvictor: NewRateLimitedTimedQueue(terminationEvictionLimiter),
nodeStatusMap: make(map[string]nodeStatusData),
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod,
lookupIP: net.LookupIP,
now: unversioned.Now,
clusterCIDR: clusterCIDR,
serviceCIDR: serviceCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
}
nc.podStore.Indexer, nc.podController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return nc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
},
},
&api.Pod{},
controller.NoResyncPeriodFunc(),
framework.ResourceEventHandlerFuncs{
AddFunc: nc.maybeDeleteTerminatingPod,
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
},
// We don't need to build a index for podStore here actually, but build one for consistency.
// It will ensure that if people start making use of the podStore in more specific ways,
// they'll get the benefits they expect. It will also reserve the name for future refactorings.
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{}
if nc.allocateNodeCIDRs {
nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{
AddFunc: nc.allocateOrOccupyCIDR,
DeleteFunc: nc.recycleCIDR,
}
}
nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return nc.kubeClient.Core().Nodes().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return nc.kubeClient.Core().Nodes().Watch(options)
},
//.........这里部分代码省略.........
示例15: New
//.........这里部分代码省略.........
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return s.federationClient.Federation().Clusters().Watch(options)
},
},
&v1beta1.Cluster{},
clusterSyncPeriod,
cache.ResourceEventHandlerFuncs{
DeleteFunc: s.clusterCache.delFromClusterSet,
AddFunc: s.clusterCache.addToClientMap,
UpdateFunc: func(old, cur interface{}) {
oldCluster, ok := old.(*v1beta1.Cluster)
if !ok {
return
}
curCluster, ok := cur.(*v1beta1.Cluster)
if !ok {
return
}
if !reflect.DeepEqual(oldCluster.Spec, curCluster.Spec) {
// update when spec is changed
s.clusterCache.addToClientMap(cur)
}
pred := getClusterConditionPredicate()
// only update when condition changed to ready from not-ready
if !pred(*oldCluster) && pred(*curCluster) {
s.clusterCache.addToClientMap(cur)
}
// did not handle ready -> not-ready
// how could we stop a controller?
},
},
)
clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *v1beta1.Cluster) {
s.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay)
},
}
fedInformerFactory := func(cluster *v1beta1.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().Services(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return targetClient.Core().Services(v1.NamespaceAll).Watch(options)
},
},
&v1.Service{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some service operation succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
// TODO: Use this to enque services.
},
))
}
s.federatedInformer = fedutil.NewFederatedInformer(federationClient, fedInformerFactory, &clusterLifecycle)
federatedUpdater := fedutil.NewFederatedUpdater(s.federatedInformer,
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
svc := obj.(*v1.Service)
_, err := client.Core().Services(svc.Namespace).Create(svc)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
svc := obj.(*v1.Service)
_, err := client.Core().Services(svc.Namespace).Update(svc)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
svc := obj.(*v1.Service)
err := client.Core().Services(svc.Namespace).Delete(svc.Name, &v1.DeleteOptions{})
return err
})
s.deletionHelper = deletionhelper.NewDeletionHelper(
s.hasFinalizerFunc,
s.removeFinalizerFunc,
s.addFinalizerFunc,
// objNameFunc
func(obj pkgruntime.Object) string {
service := obj.(*v1.Service)
return service.Name
},
updateTimeout,
s.eventRecorder,
s.federatedInformer,
federatedUpdater,
)
s.endpointWorkerMap = make(map[string]bool)
s.serviceWorkerMap = make(map[string]bool)
s.endpointWorkerDoneChan = make(chan string, maxNoOfClusters)
s.serviceWorkerDoneChan = make(chan string, maxNoOfClusters)
return s
}