本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache.NewFIFO函数的典型用法代码示例。如果您正苦于以下问题:Golang NewFIFO函数的具体用法?Golang NewFIFO怎么用?Golang NewFIFO使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewFIFO函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Create
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run()
imageChangeController := &buildcontroller.ImageChangeController{
BuildConfigStore: store,
BuildConfigInstantiator: factory.BuildConfigInstantiator,
Stop: factory.Stop,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal {
return false
}
return retries.Count < maxRetries
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
imageRepo := obj.(*imageapi.ImageStream)
return imageChangeController.HandleImageRepo(imageRepo)
},
}
}
示例2: TestDefaultErrorFunc
func TestDefaultErrorFunc(t *testing.T) {
testPod := &api.Pod{JSONBase: api.JSONBase{ID: "foo"}}
handler := util.FakeHandler{
StatusCode: 200,
ResponseBody: api.EncodeOrDie(testPod),
T: t,
}
mux := http.NewServeMux()
// FakeHandler musn't be sent requests other than the one you want to test.
mux.Handle("/api/v1beta1/pods/foo", &handler)
server := httptest.NewServer(mux)
factory := ConfigFactory{client.NewOrDie(server.URL, nil)}
queue := cache.NewFIFO()
errFunc := factory.makeDefaultErrorFunc(queue)
errFunc(testPod, nil)
for {
// This is a terrible way to do this but I plan on replacing this
// whole error handling system in the future. The test will time
// out if something doesn't work.
time.Sleep(10 * time.Millisecond)
got, exists := queue.Get("foo")
if !exists {
continue
}
handler.ValidateRequest(t, "/api/v1beta1/pods/foo", "GET", nil)
if e, a := testPod, got; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, got %v", e, a)
}
break
}
}
示例3: Create
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
lw := &cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()
c := &ImportController{
client: dockerregistry.NewClient(),
streams: f.Client,
mappings: f.Client,
}
return &controller.RetryController{
Queue: q,
RetryManager: controller.NewQueueRetryManager(
q,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
util.HandleError(err)
return retries.Count < 5
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
r := obj.(*api.ImageStream)
return c.Next(r)
},
}
}
示例4: NewConfigFactory
// NewConfigFactory initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
return &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(),
PodLister: PodLister,
MinionLister: MinionLister,
}
}
示例5: Create
// Create creates a scheduler and all support functions.
func (factory *ConfigFactory) Create() *scheduler.Config {
// Watch and queue pods that need scheduling.
podQueue := cache.NewFIFO()
cache.NewReflector(factory.createUnassignedPodLW(), &api.Pod{}, podQueue).Run()
// Watch and cache all running pods. Scheduler needs to find all pods
// so it knows where it's safe to place a pod. Cache this locally.
podCache := cache.NewStore()
cache.NewReflector(factory.createAssignedPodLW(), &api.Pod{}, podCache).Run()
// Watch minions.
// Minions may be listed frequently, so provide a local up-to-date cache.
minionCache := cache.NewStore()
if false {
// Disable this code until minions support watches.
cache.NewReflector(factory.createMinionLW(), &api.Minion{}, minionCache).Run()
} else {
cache.NewPoller(factory.pollMinions, 10*time.Second, minionCache).Run()
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
minionLister := &storeToMinionLister{minionCache}
algo := algorithm.NewGenericScheduler(
[]algorithm.FitPredicate{
// Fit is defined based on the absence of port conflicts.
algorithm.PodFitsPorts,
// Fit is determined by resource availability
algorithm.NewResourceFitPredicate(minionLister),
// Fit is determined by non-conflicting disk volumes
algorithm.NoDiskConflict,
// Fit is determined by node selector query
algorithm.NewSelectorMatchPredicate(minionLister),
},
// Prioritize nodes by least requested utilization.
algorithm.LeastRequestedPriority,
&storeToPodLister{podCache}, r)
podBackoff := podBackoff{
perPodBackoff: map[string]*backoffEntry{},
clock: realClock{},
}
return &scheduler.Config{
MinionLister: minionLister,
Algorithm: algo,
Binder: &binder{factory.Client},
NextPod: func() *api.Pod {
pod := podQueue.Pop().(*api.Pod)
glog.V(2).Infof("About to try and schedule pod %v\n"+
"\tknown minions: %v\n"+
"\tknown scheduled pods: %v\n",
pod.Name, minionCache.ContainedIDs(), podCache.ContainedIDs())
return pod
},
Error: factory.makeDefaultErrorFunc(&podBackoff, podQueue),
}
}
示例6: Create
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
deploymentConfigLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
changeController := &DeploymentConfigChangeController{
changeStrategy: &changeStrategyImpl{
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
return factory.KubeClient.ReplicationControllers(namespace).Get(name)
},
generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Generate(name)
},
updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Update(config)
},
},
decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
},
recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
if _, isFatal := err.(fatalError); isFatal {
return false
}
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
config := obj.(*deployapi.DeploymentConfig)
return changeController.Handle(config)
},
}
}
示例7: NewConfigFactory
// Initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
c := &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
ScheduledPodLister: &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
NodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
}
modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)
c.modeler = modeler
c.PodLister = modeler.PodLister()
return c
}
示例8: CreateRegistry
func CreateRegistry(c RegistryConfig) Registry {
metrics.Register()
return &offerStorage{
RegistryConfig: c,
offers: cache.NewFIFO(cache.KeyFunc(func(v interface{}) (string, error) {
if perishable, ok := v.(Perishable); !ok {
return "", fmt.Errorf("expected perishable offer, not '%+v'", v)
} else {
return perishable.Id(), nil
}
})),
listeners: queue.NewDelayFIFO(),
delayed: queue.NewDelayQueue(),
slaves: newSlaveStorage(),
}
}
示例9: TestSchedulerRateLimitsBinding
func TestSchedulerRateLimitsBinding(t *testing.T) {
scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore}
queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
queuedPodLister := &cache.StoreToPodLister{queuedPodStore}
modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)
algo := NewGenericScheduler(
map[string]algorithm.FitPredicate{},
[]algorithm.PriorityConfig{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))
// Rate limit to 1 pod
fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
c := &Config{
Modeler: modeler,
MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error {
return nil
}},
NextPod: func() *api.Pod {
return queuedPodStore.Pop().(*api.Pod)
},
Error: func(p *api.Pod, err error) {
t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
},
Recorder: &record.FakeRecorder{},
BindPodsRateLimiter: &fr,
}
s := New(c)
firstPod := podWithID("foo", "")
secondPod := podWithID("boo", "")
queuedPodStore.Add(firstPod)
queuedPodStore.Add(secondPod)
for i, hitRateLimit := range []bool{true, false} {
s.scheduleOne()
if fr.acceptValues[i] != hitRateLimit {
t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i])
}
}
}
示例10: NewConfigFactory
// Initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
c := &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
ScheduledPodLister: &cache.StoreToPodLister{},
// Only nodes in the "Ready" condition with status == "True" are schedulable
NodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
ControllerLister: &cache.StoreToReplicationControllerLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
StopEverything: make(chan struct{}),
}
modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)
c.modeler = modeler
c.PodLister = modeler.PodLister()
c.BindPodsRateLimiter = util.NewTokenBucketRateLimiter(BindPodsQps, BindPodsBurst)
// On add/delete to the scheduled pods, remove from the assumed pods.
// We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that
// they may need to call.
c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
c.createAssignedPodLW(),
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if pod, ok := obj.(*api.Pod); ok {
c.modeler.LockedAction(func() {
c.modeler.ForgetPod(pod)
})
}
},
DeleteFunc: func(obj interface{}) {
c.modeler.LockedAction(func() {
switch t := obj.(type) {
case *api.Pod:
c.modeler.ForgetPod(t)
case cache.DeletedFinalStateUnknown:
c.modeler.ForgetPodByKey(t.Key)
}
})
},
},
)
return c
}
示例11: TestDefaultErrorFunc
func TestDefaultErrorFunc(t *testing.T) {
testPod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
},
}
handler := util.FakeHandler{
StatusCode: 200,
ResponseBody: runtime.EncodeOrDie(latest.Codec, testPod),
T: t,
}
mux := http.NewServeMux()
// FakeHandler musn't be sent requests other than the one you want to test.
mux.Handle(testapi.ResourcePath("pods", "bar", "foo"), &handler)
server := httptest.NewServer(mux)
defer server.Close()
factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}))
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
podBackoff := podBackoff{
perPodBackoff: map[string]*backoffEntry{},
clock: &fakeClock{},
defaultDuration: 1 * time.Millisecond,
maxDuration: 1 * time.Second,
}
errFunc := factory.makeDefaultErrorFunc(&podBackoff, queue)
errFunc(testPod, nil)
for {
// This is a terrible way to do this but I plan on replacing this
// whole error handling system in the future. The test will time
// out if something doesn't work.
time.Sleep(10 * time.Millisecond)
got, exists, _ := queue.Get(testPod)
if !exists {
continue
}
handler.ValidateRequest(t, testapi.ResourcePath("pods", "bar", "foo"), "GET", nil)
if e, a := testPod, got; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, got %v", e, a)
}
break
}
}
示例12: NewPodWatch
// NewPodWatch creates a pod watching function which is backed by a
// FIFO/reflector pair. This avoids managing watches directly.
// A stop channel to close the watch's reflector is also returned.
// It is the caller's responsibility to defer closing the stop channel to prevent leaking resources.
func NewPodWatch(client kclient.Interface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
fieldSelector, _ := fields.ParseSelector("metadata.name=" + name)
podLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return client.Pods(namespace).List(labels.Everything(), fieldSelector)
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return client.Pods(namespace).Watch(labels.Everything(), fieldSelector, resourceVersion)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(podLW, &kapi.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)
return func() *kapi.Pod {
obj := queue.Pop()
return obj.(*kapi.Pod)
}
}
示例13: Create
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run()
imageChangeController := &buildcontroller.ImageChangeController{
BuildConfigStore: store,
BuildConfigInstantiator: factory.BuildConfigInstantiator,
Stop: factory.Stop,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
imageStream := obj.(*imageapi.ImageStream)
if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal {
glog.V(3).Infof("Will not retry fatal error for ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err)
kutil.HandleError(err)
return false
}
if maxRetries > retries.Count {
glog.V(3).Infof("Giving up retrying ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err)
kutil.HandleError(err)
return false
}
glog.V(4).Infof("Retrying ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err)
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
imageRepo := obj.(*imageapi.ImageStream)
return imageChangeController.HandleImageRepo(imageRepo)
},
}
}
示例14: Create
// Create creates a scheduler and all support functions.
func (factory *ConfigFactory) Create() *scheduler.Config {
// Watch and queue pods that need scheduling.
podQueue := cache.NewFIFO()
cache.NewReflector(factory.createUnassignedPodLW(), &api.Pod{}, podQueue).Run()
// Watch and cache all running pods. Scheduler needs to find all pods
// so it knows where it's safe to place a pod. Cache this locally.
podCache := cache.NewStore()
cache.NewReflector(factory.createAssignedPodLW(), &api.Pod{}, podCache).Run()
// Watch minions.
// Minions may be listed frequently, so provide a local up-to-date cache.
minionCache := cache.NewStore()
if false {
// Disable this code until minions support watches.
cache.NewReflector(factory.createMinionLW(), &api.Minion{}, minionCache).Run()
} else {
cache.NewPoller(factory.pollMinions, 10*time.Second, minionCache).Run()
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
algo := algorithm.NewRandomFitScheduler(
&storeToPodLister{podCache}, r)
return &scheduler.Config{
MinionLister: &storeToMinionLister{minionCache},
Algorithm: algo,
Binder: &binder{factory.Client},
NextPod: func() *api.Pod {
pod := podQueue.Pop().(*api.Pod)
// TODO: Remove or reduce verbosity by sep 6th, 2014. Leave until then to
// make it easy to find scheduling problems.
glog.Infof("About to try and schedule pod %v\n"+
"\tknown minions: %v\n"+
"\tknown scheduled pods: %v\n",
pod.ID, minionCache.Contains(), podCache.Contains())
return pod
},
Error: factory.makeDefaultErrorFunc(podQueue),
}
}
示例15: TestRetryController_ratelimit
// This test ensures that when events are retried, the
// requeue rate does not exceed the configured rate limit,
// including burst behavior.
func TestRetryController_ratelimit(t *testing.T) {
keyFunc := func(obj interface{}) (string, error) {
return "key", nil
}
fifo := kcache.NewFIFO(keyFunc)
limiter := &mockLimiter{}
retryManager := NewQueueRetryManager(fifo,
keyFunc,
func(_ interface{}, _ error, r Retry) bool {
return r.Count < 15
},
limiter,
)
for i := 0; i < 10; i++ {
retryManager.Retry("key", nil)
}
if limiter.count != 10 {
t.Fatalf("Retries did not invoke rate limiter, expected %d got %d", 10, limiter.count)
}
}