本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/workqueue.NewRateLimitingQueue函数的典型用法代码示例。如果您正苦于以下问题:Golang NewRateLimitingQueue函数的具体用法?Golang NewRateLimitingQueue怎么用?Golang NewRateLimitingQueue使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewRateLimitingQueue函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewDeploymentConfigController
// NewDeploymentConfigController creates a new DeploymentConfigController.
func NewDeploymentConfigController(dcInformer, rcInformer, podInformer framework.SharedIndexInformer, oc osclient.Interface, kc kclient.Interface, codec runtime.Codec) *DeploymentConfigController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(kc.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"})
c := &DeploymentConfigController{
dn: oc,
rn: kc,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
recorder: recorder,
codec: codec,
}
c.dcStore.Indexer = dcInformer.GetIndexer()
dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addDeploymentConfig,
UpdateFunc: c.updateDeploymentConfig,
DeleteFunc: c.deleteDeploymentConfig,
})
c.rcStore.Indexer = rcInformer.GetIndexer()
rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addReplicationController,
UpdateFunc: c.updateReplicationController,
DeleteFunc: c.deleteReplicationController,
})
c.podStore.Indexer = podInformer.GetIndexer()
c.dcStoreSynced = dcInformer.HasSynced
c.rcStoreSynced = rcInformer.HasSynced
c.podStoreSynced = podInformer.HasSynced
return c
}
示例2: NewClusterQuotaMappingController
// NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas
func NewClusterQuotaMappingController(namespaceInformer shared.NamespaceInformer, quotaInformer shared.ClusterResourceQuotaInformer) *ClusterQuotaMappingController {
c := &ClusterQuotaMappingController{
namespaceQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
quotaQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
clusterQuotaMapper: NewClusterQuotaMapper(),
}
namespaceInformer.Informer().AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addNamespace,
UpdateFunc: c.updateNamespace,
DeleteFunc: c.deleteNamespace,
})
c.namespaceLister = namespaceInformer.Lister()
c.namespacesSynced = namespaceInformer.Informer().HasSynced
quotaInformer.Informer().AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: c.addQuota,
UpdateFunc: c.updateQuota,
DeleteFunc: c.deleteQuota,
})
c.quotaLister = quotaInformer.Lister()
c.quotasSynced = quotaInformer.Informer().HasSynced
return c
}
示例3: NewTokensController
// NewTokensController returns a new *TokensController.
func NewTokensController(cl clientset.Interface, options TokensControllerOptions) *TokensController {
maxRetries := options.MaxRetries
if maxRetries == 0 {
maxRetries = 10
}
e := &TokensController{
client: cl,
token: options.TokenGenerator,
rootCA: options.RootCA,
syncServiceAccountQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
syncSecretQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
maxRetries: maxRetries,
}
e.serviceAccounts, e.serviceAccountController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
},
},
&api.ServiceAccount{},
options.ServiceAccountResync,
framework.ResourceEventHandlerFuncs{
AddFunc: e.queueServiceAccountSync,
UpdateFunc: e.queueServiceAccountUpdateSync,
DeleteFunc: e.queueServiceAccountSync,
},
)
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
e.secrets, e.secretController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = tokenSelector
return e.client.Core().Secrets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.FieldSelector = tokenSelector
return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
},
},
&api.Secret{},
options.SecretResync,
framework.ResourceEventHandlerFuncs{
AddFunc: e.queueSecretSync,
UpdateFunc: e.queueSecretUpdateSync,
DeleteFunc: e.queueSecretSync,
},
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
)
return e
}
示例4: NewDockerRegistryServiceController
// NewDockerRegistryServiceController returns a new *DockerRegistryServiceController.
func NewDockerRegistryServiceController(cl client.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController {
e := &DockerRegistryServiceController{
client: cl,
dockercfgController: options.DockercfgController,
registryLocationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
secretsToUpdate: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
serviceName: options.RegistryServiceName,
serviceNamespace: options.RegistryNamespace,
dockerURLsIntialized: options.DockerURLsIntialized,
}
e.serviceCache, e.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
return e.client.Services(options.RegistryNamespace).List(opts)
},
WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
return e.client.Services(options.RegistryNamespace).Watch(opts)
},
},
&kapi.Service{},
options.Resync,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e.enqueueRegistryLocationQueue()
},
UpdateFunc: func(old, cur interface{}) {
e.enqueueRegistryLocationQueue()
},
DeleteFunc: func(obj interface{}) {
e.enqueueRegistryLocationQueue()
},
},
)
e.servicesSynced = e.serviceController.HasSynced
e.syncRegistryLocationHandler = e.syncRegistryLocationChange
dockercfgOptions := kapi.ListOptions{FieldSelector: fields.SelectorFromSet(map[string]string{kapi.SecretTypeField: string(kapi.SecretTypeDockercfg)})}
e.secretCache, e.secretController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
return e.client.Secrets(kapi.NamespaceAll).List(dockercfgOptions)
},
WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
return e.client.Secrets(kapi.NamespaceAll).Watch(dockercfgOptions)
},
},
&kapi.Secret{},
options.Resync,
framework.ResourceEventHandlerFuncs{},
)
e.secretsSynced = e.secretController.HasSynced
e.syncSecretHandler = e.syncSecretUpdate
return e
}
示例5: NewTaskQueue
// NewTaskQueue creates a new task queue with the given sync function.
// The sync function is called for every element inserted into the queue.
func NewTaskQueue(syncFn func(string) error) *taskQueue {
return &taskQueue{
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
sync: syncFn,
workerDone: make(chan struct{}),
}
}
示例6: NewDockercfgController
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl kclientset.Interface, options DockercfgControllerOptions) *DockercfgController {
e := &DockercfgController{
client: cl,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
dockerURLsIntialized: options.DockerURLsIntialized,
}
var serviceAccountCache cache.Store
serviceAccountCache, e.serviceAccountController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
},
},
&api.ServiceAccount{},
options.Resync,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
serviceAccount := obj.(*api.ServiceAccount)
glog.V(5).Infof("Adding service account %s", serviceAccount.Name)
e.enqueueServiceAccount(serviceAccount)
},
UpdateFunc: func(old, cur interface{}) {
serviceAccount := cur.(*api.ServiceAccount)
glog.V(5).Infof("Updating service account %s", serviceAccount.Name)
// Resync on service object relist.
e.enqueueServiceAccount(serviceAccount)
},
},
)
e.serviceAccountCache = NewEtcdMutationCache(serviceAccountCache)
tokenSecretSelector := fields.OneTermEqualSelector(api.SecretTypeField, string(api.SecretTypeServiceAccountToken))
e.secretCache, e.secretController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = tokenSecretSelector
return e.client.Core().Secrets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.FieldSelector = tokenSecretSelector
return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
},
},
&api.Secret{},
options.Resync,
cache.ResourceEventHandlerFuncs{
AddFunc: func(cur interface{}) { e.handleTokenSecretUpdate(nil, cur) },
UpdateFunc: func(old, cur interface{}) { e.handleTokenSecretUpdate(old, cur) },
DeleteFunc: e.handleTokenSecretDelete,
},
)
e.syncHandler = e.syncServiceAccount
return e
}
示例7: NewBucketingWorkQueue
func NewBucketingWorkQueue() BucketingWorkQueue {
return &workQueueBucket{
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
work: map[interface{}][]interface{}{},
dirtyWork: map[interface{}][]interface{}{},
inProgress: map[interface{}]bool{},
}
}
示例8: NewPetSetController
// NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"})
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
psc := &PetSetController{
kubeClient: kubeClient,
blockingPetStore: newUnHealthyPetTracker(pc),
newSyncer: func(blockingPet *pcb) *petSyncer {
return &petSyncer{pc, blockingPet}
},
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
}
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
// lookup the petset and enqueue
AddFunc: psc.addPod,
// lookup current and old petset if labels changed
UpdateFunc: psc.updatePod,
// lookup petset accounting for deletion tombstones
DeleteFunc: psc.deletePod,
})
psc.podStore.Indexer = podInformer.GetIndexer()
psc.podController = podInformer.GetController()
psc.psStore.Store, psc.psController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options)
},
},
&apps.PetSet{},
petSetResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: psc.enqueuePetSet,
UpdateFunc: func(old, cur interface{}) {
oldPS := old.(*apps.PetSet)
curPS := cur.(*apps.PetSet)
if oldPS.Status.Replicas != curPS.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
}
psc.enqueuePetSet(cur)
},
DeleteFunc: psc.enqueuePetSet,
},
)
// TODO: Watch volumes
psc.podStoreSynced = psc.podController.HasSynced
psc.syncHandler = psc.Sync
return psc
}
示例9: newReplicationManager
// newReplicationManager configures a replication manager with the specified event recorder
func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
rm := &ReplicationManager{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventRecorder,
},
burstReplicas: burstReplicas,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
garbageCollectorEnabled: garbageCollectorEnabled,
}
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
},
},
&api.ReplicationController{},
// TODO: Can we have much longer period here?
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController,
UpdateFunc: rm.updateRC,
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
// way of achieving this is by performing a `stop` operation on the controller.
DeleteFunc: rm.enqueueController,
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
AddFunc: rm.addPod,
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
// the most frequent pod update is status, and the associated rc will only list from local storage, so
// it should be ok.
UpdateFunc: rm.updatePod,
DeleteFunc: rm.deletePod,
})
rm.podStore.Indexer = podInformer.GetIndexer()
rm.podController = podInformer.GetController()
rm.syncHandler = rm.syncReplicationController
rm.podStoreSynced = rm.podController.HasSynced
rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
return rm
}
示例10: NewIngressIPController
// NewIngressIPController creates a new IngressIPController.
// TODO this should accept a shared informer
func NewIngressIPController(kc kclientset.Interface, ipNet *net.IPNet, resyncInterval time.Duration) *IngressIPController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&kcoreclient.EventSinkImpl{Interface: kc.Core().Events("")})
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "ingressip-controller"})
ic := &IngressIPController{
client: kc.Core(),
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
maxRetries: 10,
recorder: recorder,
}
ic.cache, ic.controller = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return ic.client.Services(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return ic.client.Services(kapi.NamespaceAll).Watch(options)
},
},
&kapi.Service{},
resyncInterval,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
service := obj.(*kapi.Service)
glog.V(5).Infof("Adding service %s/%s", service.Namespace, service.Name)
ic.enqueueChange(obj, nil)
},
UpdateFunc: func(old, cur interface{}) {
service := cur.(*kapi.Service)
glog.V(5).Infof("Updating service %s/%s", service.Namespace, service.Name)
ic.enqueueChange(cur, old)
},
DeleteFunc: func(obj interface{}) {
service := obj.(*kapi.Service)
glog.V(5).Infof("Deleting service %s/%s", service.Namespace, service.Name)
ic.enqueueChange(nil, obj)
},
},
)
ic.changeHandler = ic.processChange
ic.persistenceHandler = persistService
ic.ipAllocator = ipallocator.NewAllocatorCIDRRange(ipNet, func(max int, rangeSpec string) allocator.Interface {
return allocator.NewAllocationMap(max, rangeSpec)
})
ic.allocationMap = make(map[string]string)
ic.requeuedAllocations = sets.NewString()
return ic
}
示例11: NewUnidlingController
func NewUnidlingController(scaleNS kextclient.ScalesGetter, endptsNS kclient.EndpointsGetter, evtNS kclient.EventsGetter, dcNamespacer deployclient.DeploymentConfigsGetter, rcNamespacer kclient.ReplicationControllersGetter, resyncPeriod time.Duration) *UnidlingController {
fieldSet := fields.Set{}
fieldSet["reason"] = unidlingapi.NeedPodsReason
fieldSelector := fieldSet.AsSelector()
unidlingController := &UnidlingController{
scaleNamespacer: scaleNS,
endpointsNamespacer: endptsNS,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
lastFiredCache: &lastFiredCache{
items: make(map[types.NamespacedName]time.Time),
},
dcNamespacer: dcNamespacer,
rcNamespacer: rcNamespacer,
}
_, controller := framework.NewInformer(
&cache.ListWatch{
// No need to list -- we only care about new events
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return &kapi.EventList{}, nil
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
return evtNS.Events(kapi.NamespaceAll).Watch(options)
},
},
&kapi.Event{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
event := obj.(*kapi.Event)
unidlingController.enqueueEvent(event)
},
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
// retrigger on new last-seen times
event := newObj.(*kapi.Event)
unidlingController.enqueueEvent(event)
},
DeleteFunc: func(obj interface{}) {
// this is just to clean up our cache of the last seen times
event := obj.(*kapi.Event)
unidlingController.clearEventFromCache(event)
},
},
)
unidlingController.controller = controller
return unidlingController
}
示例12: NewServiceServingCertUpdateController
// NewServiceServingCertUpdateController creates a new ServiceServingCertUpdateController.
// TODO this should accept a shared informer
func NewServiceServingCertUpdateController(serviceClient kcoreclient.ServicesGetter, secretClient kcoreclient.SecretsGetter, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertUpdateController {
sc := &ServiceServingCertUpdateController{
secretClient: secretClient,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
ca: ca,
dnsSuffix: dnsSuffix,
// TODO base the expiry time on a percentage of the time for the lifespan of the cert
minTimeLeftForCert: 1 * time.Hour,
}
sc.serviceCache, sc.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return serviceClient.Services(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return serviceClient.Services(kapi.NamespaceAll).Watch(options)
},
},
&kapi.Service{},
resyncInterval,
framework.ResourceEventHandlerFuncs{},
)
sc.serviceHasSynced = sc.serviceController.HasSynced
sc.secretCache, sc.secretController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return sc.secretClient.Secrets(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return sc.secretClient.Secrets(kapi.NamespaceAll).Watch(options)
},
},
&kapi.Secret{},
resyncInterval,
framework.ResourceEventHandlerFuncs{
AddFunc: sc.addSecret,
UpdateFunc: sc.updateSecret,
},
)
sc.secretHasSynced = sc.secretController.HasSynced
sc.syncHandler = sc.syncSecret
return sc
}
示例13: NewNamespaceController
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(
kubeClient clientset.Interface,
clientPool dynamic.ClientPool,
groupVersionResources []unversioned.GroupVersionResource,
resyncPeriod time.Duration,
finalizerToken api.FinalizerName) *NamespaceController {
// create the controller so we can inject the enqueue function
namespaceController := &NamespaceController{
kubeClient: kubeClient,
clientPool: clientPool,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
groupVersionResources: groupVersionResources,
opCache: operationNotSupportedCache{},
finalizerToken: finalizerToken,
}
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
// configure the backing store/controller
store, controller := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().Namespaces().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return kubeClient.Core().Namespaces().Watch(options)
},
},
&api.Namespace{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace := obj.(*api.Namespace)
namespaceController.enqueueNamespace(namespace)
},
UpdateFunc: func(oldObj, newObj interface{}) {
namespace := newObj.(*api.Namespace)
namespaceController.enqueueNamespace(namespace)
},
},
)
namespaceController.store = store
namespaceController.controller = controller
return namespaceController
}
示例14: TestWorkRequeuesWhenFull
func TestWorkRequeuesWhenFull(t *testing.T) {
tests := []struct {
testName string
requeuedChange bool
requeuedService bool
requeued bool
}{
{
testName: "Previously requeued change should be requeued",
requeued: true,
},
{
testName: "The only pending allocation should be requeued",
requeuedChange: true,
requeuedService: true,
requeued: true,
},
{
testName: "Already requeued allocation should not be requeued",
requeuedService: true,
requeued: false,
},
}
for _, test := range tests {
c := newController(t, nil)
c.changeHandler = func(change *serviceChange) error {
return ipallocator.ErrFull
}
// Use a queue with no delay to avoid timing issues
c.queue = workqueue.NewRateLimitingQueue(workqueue.NewMaxOfRateLimiter())
change := &serviceChange{
key: "foo",
requeuedAllocation: test.requeuedChange,
}
if test.requeuedService {
c.requeuedAllocations.Insert(change.key)
}
c.queue.Add(change)
c.work()
requeued := (c.queue.Len() == 1)
if test.requeued != requeued {
t.Errorf("Expected requeued == %v, got %v", test.requeued, requeued)
}
}
}
示例15: NewServiceServingCertController
// NewServiceServingCertController creates a new ServiceServingCertController.
// TODO this should accept a shared informer
func NewServiceServingCertController(serviceClient kclient.ServicesNamespacer, secretClient kclient.SecretsNamespacer, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertController {
sc := &ServiceServingCertController{
serviceClient: serviceClient,
secretClient: secretClient,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
maxRetries: 10,
ca: ca,
dnsSuffix: dnsSuffix,
}
sc.serviceCache, sc.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return sc.serviceClient.Services(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return sc.serviceClient.Services(kapi.NamespaceAll).Watch(options)
},
},
&kapi.Service{},
resyncInterval,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
service := obj.(*kapi.Service)
glog.V(4).Infof("Adding service %s", service.Name)
sc.enqueueService(obj)
},
UpdateFunc: func(old, cur interface{}) {
service := cur.(*kapi.Service)
glog.V(4).Infof("Updating service %s", service.Name)
// Resync on service object relist.
sc.enqueueService(cur)
},
},
)
sc.syncHandler = sc.syncService
return sc
}