本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/cache.NewFIFO函数的典型用法代码示例。如果您正苦于以下问题:Golang NewFIFO函数的具体用法?Golang NewFIFO怎么用?Golang NewFIFO使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewFIFO函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewConfigFactory
// Initializes the factory.
func NewConfigFactory(client *client.Client, rateLimiter util.RateLimiter) *ConfigFactory {
c := &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
NodeQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
ScheduledPodLister: &cache.StoreToPodLister{},
// Only nodes in the "Ready" condition with status == "True" are schedulable
NodeLister: &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
StopEverything: make(chan struct{}),
}
modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: c.PodQueue}, c.ScheduledPodLister)
c.modeler = modeler
c.PodLister = modeler.PodLister()
c.BindPodsRateLimiter = rateLimiter
// On add/delete to the scheduled pods, remove from the assumed pods.
// We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that
// they may need to call.
c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
c.createAssignedPodLW(),
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if pod, ok := obj.(*api.Pod); ok {
c.modeler.LockedAction(func() {
c.modeler.ForgetPod(pod)
})
}
},
DeleteFunc: func(obj interface{}) {
c.modeler.LockedAction(func() {
switch t := obj.(type) {
case *api.Pod:
c.modeler.ForgetPod(t)
case cache.DeletedFinalStateUnknown:
c.modeler.ForgetPodByKey(t.Key)
}
})
},
},
)
return c
}
示例2: Create
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).RunUntil(factory.Stop)
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).RunUntil(factory.Stop)
imageChangeController := &buildcontroller.ImageChangeController{
BuildConfigStore: store,
BuildConfigInstantiator: factory.BuildConfigInstantiator,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
retryFunc("ImageStream update", func(err error) bool {
_, isFatal := err.(buildcontroller.ImageChangeControllerFatalError)
return isFatal
}),
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
imageRepo := obj.(*imageapi.ImageStream)
return imageChangeController.HandleImageRepo(imageRepo)
},
}
}
示例3: Create
// Create constructs a BuildController
func (factory *BuildControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
client := ControllerClient{factory.KubeClient, factory.OSClient}
buildController := &buildcontroller.BuildController{
BuildUpdater: factory.BuildUpdater,
ImageStreamClient: client,
PodManager: client,
BuildStrategy: &typeBasedFactoryStrategy{
DockerBuildStrategy: factory.DockerBuildStrategy,
SourceBuildStrategy: factory.SourceBuildStrategy,
CustomBuildStrategy: factory.CustomBuildStrategy,
},
Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}),
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute),
kutil.NewTokenBucketRateLimiter(1, 10)),
Handle: func(obj interface{}) error {
build := obj.(*buildapi.Build)
return buildController.HandleBuild(build)
},
}
}
示例4: Create
// Create creates a new ConfigChangeController which is used to trigger builds on creation
func (factory *BuildConfigControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, queue, 2*time.Minute).RunUntil(factory.Stop)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
bcController := &buildcontroller.BuildConfigController{
BuildConfigInstantiator: factory.BuildConfigInstantiator,
Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-config-controller"}),
}
return &controller.RetryController{
Queue: controller.NewQueueWrapper(queue),
RetryManager: controller.NewQueueRetryManager(
controller.NewQueueWrapper(queue),
cache.MetaNamespaceKeyFunc,
retryFunc("BuildConfig", buildcontroller.IsFatal),
flowcontrol.NewTokenBucketRateLimiter(1, 10)),
Handle: func(obj interface{}) error {
bc := obj.(*buildapi.BuildConfig)
return bcController.HandleBuildConfig(bc)
},
}
}
示例5: Create
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
lw := &cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()
c := &ImportController{
streams: f.Client,
mappings: f.Client,
}
return &controller.RetryController{
Queue: q,
RetryManager: controller.NewQueueRetryManager(
q,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
util.HandleError(err)
return retries.Count < 5
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
r := obj.(*api.ImageStream)
return c.Next(r)
},
}
}
示例6: NewTurboScheduler
func NewTurboScheduler(kubeClient *client.Client, vmturboMeta *vmtmeta.VMTMeta) *TurboScheduler {
scheduledPodLister := &cache.StoreToPodLister{}
podQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: podQueue}, scheduledPodLister)
bindPodsQPS := float32(15.0)
bindPodsBurst := 20
rateLimiter := util.NewTokenBucketRateLimiter(bindPodsQPS, bindPodsBurst)
config := &Config{
Modeler: modeler,
Binder: &binder{kubeClient},
BindPodsRateLimiter: rateLimiter,
}
eventBroadcaster := record.NewBroadcaster()
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "turboscheduler"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
vmtSched := vmtscheduler.NewVMTScheduler(kubeClient, vmturboMeta)
glog.V(3).Infof("VMTScheduler is set: %++v", vmtSched)
defaultSched := defaultscheduler.NewDefaultScheduler(kubeClient)
glog.V(3).Infof("DefaultScheduler is set: %++v", defaultSched)
return &TurboScheduler{
config: config,
vmtScheduler: vmtSched,
defaultScheduler: defaultSched,
}
}
示例7: TestSchedulerNoPhantomPodAfterDelete
func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(10*time.Minute, stop)
firstPod := podWithPort("pod.Name", "", 8080)
node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, firstPod, &node)
// We use conflicted pod ports to incur fit predicate failure.
secondPod := podWithPort("bar", "", 8080)
queuedPodStore.Add(secondPod)
// queuedPodStore: [bar:8080]
// cache: [(assumed)foo:8080]
scheduler.scheduleOne()
select {
case err := <-errChan:
expectErr := &FitError{
Pod: secondPod,
FailedPredicates: FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
}
if !reflect.DeepEqual(expectErr, err) {
t.Errorf("err want=%v, get=%v", expectErr, err)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
// We mimic the workflow of cache behavior when a pod is removed by user.
// Note: if the schedulercache timeout would be super short, the first pod would expire
// and would be removed itself (without any explicit actions on schedulercache). Even in that case,
// explicitly AddPod will as well correct the behavior.
firstPod.Spec.NodeName = node.Name
if err := scache.AddPod(firstPod); err != nil {
t.Fatalf("err: %v", err)
}
if err := scache.RemovePod(firstPod); err != nil {
t.Fatalf("err: %v", err)
}
queuedPodStore.Add(secondPod)
scheduler.scheduleOne()
select {
case b := <-bindingChan:
expectBinding := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "bar"},
Target: api.ObjectReference{Kind: "Node", Name: node.Name},
}
if !reflect.DeepEqual(expectBinding, b) {
t.Errorf("binding want=%v, get=%v", expectBinding, b)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
}
示例8: Create
// Create creates an ImageChangeController.
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
imageStreamLW := &cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return factory.Client.ImageStreams(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return factory.Client.ImageStreams(kapi.NamespaceAll).Watch(options)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(imageStreamLW, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()
deploymentConfigLW := &cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(options)
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, store, 2*time.Minute).Run()
changeController := &ImageChangeController{
listDeploymentConfigs: func() ([]*deployapi.DeploymentConfig, error) {
configs := []*deployapi.DeploymentConfig{}
objs := store.List()
for _, obj := range objs {
configs = append(configs, obj.(*deployapi.DeploymentConfig))
}
return configs, nil
},
client: factory.Client,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
utilruntime.HandleError(err)
if _, isFatal := err.(fatalError); isFatal {
return false
}
if retries.Count > 0 {
return false
}
return true
},
flowcontrol.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
repo := obj.(*imageapi.ImageStream)
return changeController.Handle(repo)
},
}
}
示例9: TestSchedulerNoPhantomPodAfterExpire
func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(100*time.Millisecond, stop)
pod := podWithPort("pod.Name", "", 8080)
node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, pod, &node)
waitPodExpireChan := make(chan struct{})
timeout := make(chan struct{})
go func() {
for {
select {
case <-timeout:
return
default:
}
pods, err := scache.List(labels.Everything())
if err != nil {
t.Fatalf("cache.List failed: %v", err)
}
if len(pods) == 0 {
close(waitPodExpireChan)
return
}
time.Sleep(100 * time.Millisecond)
}
}()
// waiting for the assumed pod to expire
select {
case <-waitPodExpireChan:
case <-time.After(wait.ForeverTestTimeout):
close(timeout)
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
// We use conflicted pod ports to incur fit predicate failure if first pod not removed.
secondPod := podWithPort("bar", "", 8080)
queuedPodStore.Add(secondPod)
scheduler.scheduleOne()
select {
case b := <-bindingChan:
expectBinding := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "bar"},
Target: api.ObjectReference{Kind: "Node", Name: node.Name},
}
if !reflect.DeepEqual(expectBinding, b) {
t.Errorf("binding want=%v, get=%v", expectBinding, b)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
}
示例10: TestSchedulerErrorWithLongBinding
// Scheduler should preserve predicate constraint even if binding was longer
// than cache ttl
func TestSchedulerErrorWithLongBinding(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
firstPod := podWithPort("foo", "", 8080)
conflictPod := podWithPort("bar", "", 8080)
pods := map[string]*v1.Pod{firstPod.Name: firstPod, conflictPod.Name: conflictPod}
for _, test := range []struct {
Expected map[string]bool
CacheTTL time.Duration
BindingDuration time.Duration
}{
{
Expected: map[string]bool{firstPod.Name: true},
CacheTTL: 100 * time.Millisecond,
BindingDuration: 300 * time.Millisecond,
},
{
Expected: map[string]bool{firstPod.Name: true},
CacheTTL: 10 * time.Second,
BindingDuration: 300 * time.Millisecond,
},
} {
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(test.CacheTTL, stop)
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1"}}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*v1.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan := setupTestSchedulerLongBindingWithRetry(
queuedPodStore, scache, nodeLister, predicateMap, stop, test.BindingDuration)
scheduler.Run()
queuedPodStore.Add(firstPod)
queuedPodStore.Add(conflictPod)
resultBindings := map[string]bool{}
waitChan := time.After(5 * time.Second)
for finished := false; !finished; {
select {
case b := <-bindingChan:
resultBindings[b.Name] = true
p := pods[b.Name]
p.Spec.NodeName = b.Target.Name
scache.AddPod(p)
case <-waitChan:
finished = true
}
}
if !reflect.DeepEqual(resultBindings, test.Expected) {
t.Errorf("Result binding are not equal to expected. %v != %v", resultBindings, test.Expected)
}
}
}
示例11: Create
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
deploymentConfigLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
changeController := &DeploymentConfigChangeController{
changeStrategy: &changeStrategyImpl{
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
return factory.KubeClient.ReplicationControllers(namespace).Get(name)
},
generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Generate(name)
},
updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Update(config)
},
},
decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
},
recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
if _, isFatal := err.(fatalError); isFatal {
return false
}
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
config := obj.(*deployapi.DeploymentConfig)
return changeController.Handle(config)
},
}
}
示例12: TestSchedulerFailedSchedulingReasons
func TestSchedulerFailedSchedulingReasons(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(10*time.Minute, stop)
node := api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(2, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(100, resource.DecimalSI)),
api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(2, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(100, resource.DecimalSI)),
api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
}},
}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{
"PodFitsResources": predicates.PodFitsResources,
}
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap)
podWithTooBigResourceRequests := podWithResources("bar", "", api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(4, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(500, resource.DecimalSI)),
}, api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(4, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(500, resource.DecimalSI)),
})
queuedPodStore.Add(podWithTooBigResourceRequests)
scheduler.scheduleOne()
select {
case err := <-errChan:
expectErr := &FitError{
Pod: podWithTooBigResourceRequests,
FailedPredicates: FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{
predicates.NewInsufficientResourceError(api.ResourceCPU, 4000, 0, 2000),
predicates.NewInsufficientResourceError(api.ResourceMemory, 500, 0, 100),
}},
}
if !reflect.DeepEqual(expectErr, err) {
t.Errorf("err want=%+v, get=%+v", expectErr, err)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
}
示例13: NewConfigFactory
// Initializes the factory.
func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffinitySymmetricWeight int, failureDomains string) *ConfigFactory {
stopEverything := make(chan struct{})
schedulerCache := schedulercache.New(30*time.Second, stopEverything)
c := &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
ScheduledPodLister: &cache.StoreToPodLister{},
// Only nodes in the "Ready" condition with status == "True" are schedulable
NodeLister: &cache.StoreToNodeLister{},
PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ControllerLister: &cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
schedulerCache: schedulerCache,
StopEverything: stopEverything,
SchedulerName: schedulerName,
HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
FailureDomains: failureDomains,
}
c.PodLister = schedulerCache
// On add/delete to the scheduled pods, remove from the assumed pods.
// We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that
// they may need to call.
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = framework.NewIndexerInformer(
c.createAssignedNonTerminatedPodLW(),
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: c.addPodToCache,
UpdateFunc: c.updatePodInCache,
DeleteFunc: c.deletePodFromCache,
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
c.createNodeLW(),
&api.Node{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: c.addNodeToCache,
UpdateFunc: c.updateNodeInCache,
DeleteFunc: c.deleteNodeFromCache,
},
)
return c
}
示例14: NewRateLimitedFunction
// NewRateLimitedFunction creates a new rate limited function.
func NewRateLimitedFunction(keyFunc kcache.KeyFunc, interval int, handlerFunc HandlerFunc) *RateLimitedFunction {
fifo := kcache.NewFIFO(keyFunc)
qps := float32(1000.0) // Call rate per second (SLA).
if interval > 0 {
qps = float32(1.0 / float32(interval))
}
limiter := kutil.NewTokenBucketRateLimiter(qps, 1)
return &RateLimitedFunction{handlerFunc, fifo, limiter}
}
示例15: Create
// Create creates a DeploymentConfigController.
func (factory *DeploymentConfigControllerFactory) Create() controller.RunnableController {
deploymentConfigLW := &cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(options)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"})
configController := NewDeploymentConfigController(factory.KubeClient, factory.Client, factory.Codec, recorder)
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
config := obj.(*deployapi.DeploymentConfig)
// no retries for a fatal error
if _, isFatal := err.(fatalError); isFatal {
glog.V(4).Infof("Will not retry fatal error for deploymentConfig %s/%s: %v", config.Namespace, config.Name, err)
kutil.HandleError(err)
return false
}
// infinite retries for a transient error
if _, isTransient := err.(transientError); isTransient {
glog.V(4).Infof("Retrying deploymentConfig %s/%s with error: %v", config.Namespace, config.Name, err)
return true
}
kutil.HandleError(err)
// no retries for anything else
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
config := obj.(*deployapi.DeploymentConfig)
return configController.Handle(config)
},
}
}