本文整理汇总了Golang中k8s/io/kubernetes/contrib/mesos/pkg/queue.NewHistorical函数的典型用法代码示例。如果您正苦于以下问题:Golang NewHistorical函数的具体用法?Golang NewHistorical怎么用?Golang NewHistorical使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewHistorical函数的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewRegistrator
func NewRegistrator(client *clientset.Clientset, lookupNode LookupFunc) *clientRegistrator {
return &clientRegistrator{
lookupNode: lookupNode,
client: client,
queue: queue.NewHistorical(nil),
}
}
示例2: NewRegistrator
func NewRegistrator(client unversionedcore.NodesGetter, lookupNode LookupFunc) *clientRegistrator {
return &clientRegistrator{
lookupNode: lookupNode,
client: client,
queue: queue.NewHistorical(nil),
}
}
示例3: New
func New(c *config.Config, fw framework.Framework, ps podschedulers.PodScheduler,
client *client.Client, recorder record.EventRecorder, terminate <-chan struct{}, mux *http.ServeMux, lw *cache.ListWatch) scheduler.Scheduler {
core := &sched{
framework: fw,
taskRegistry: podtask.NewInMemoryRegistry(),
}
// Watch and queue pods that need scheduling.
podUpdatesBypass := make(chan queue.Entry, c.UpdatesBacklog)
podUpdates := &podStoreAdapter{queue.NewHistorical(podUpdatesBypass)}
reflector := cache.NewReflector(lw, &api.Pod{}, podUpdates, 0)
q := queuer.New(queue.NewDelayFIFO(), podUpdates)
algorithm := algorithm.New(core, podUpdates, ps)
podDeleter := deleter.New(core, q)
core.podReconciler = podreconciler.New(core, client, q, podDeleter)
bo := backoff.New(c.InitialPodBackoff.Duration, c.MaxPodBackoff.Duration)
newBC := func(podKey string) queue.BreakChan {
return queue.BreakChan(core.Offers().Listen(podKey, func(offer *mesos.Offer) bool {
core.Lock()
defer core.Unlock()
switch task, state := core.Tasks().ForPod(podKey); state {
case podtask.StatePending:
// Assess fitness of pod with the current offer. The scheduler normally
// "backs off" when it can't find an offer that matches up with a pod.
// The backoff period for a pod can terminate sooner if an offer becomes
// available that matches up.
return !task.Has(podtask.Launched) && ps.FitPredicate()(task, offer, nil)
default:
// no point in continuing to check for matching offers
return true
}
}))
}
errorHandler := errorhandler.New(core, bo, q, newBC)
binder := binder.New(core)
startLatch := make(chan struct{})
runtime.On(startLatch, func() {
reflector.Run() // TODO(jdef) should listen for termination
podDeleter.Run(podUpdatesBypass, terminate)
q.Run(terminate)
q.InstallDebugHandlers(mux)
podtask.InstallDebugHandlers(core.Tasks(), mux)
})
core.controller = controller.New(client, algorithm, recorder, q.Yield, errorHandler.Error, binder, startLatch)
return core
}
示例4: NewPluginConfig
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux,
podsWatcher *cache.ListWatch) *PluginConfig {
// Watch and queue pods that need scheduling.
updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog)
podUpdates := &podStoreAdapter{queue.NewHistorical(updates)}
reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0)
// lock that guards critial sections that involve transferring pods from
// the store (cache) to the scheduling queue; its purpose is to maintain
// an ordering (vs interleaving) of operations that's easier to reason about.
kapi := &k8smScheduler{internal: k}
q := newQueuer(podUpdates)
podDeleter := &deleter{
api: kapi,
qr: q,
}
eh := &errorHandler{
api: kapi,
backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration),
qr: q,
}
startLatch := make(chan struct{})
eventBroadcaster := record.NewBroadcaster()
runtime.On(startLatch, func() {
eventBroadcaster.StartRecordingToSink(k.client.Events(""))
reflector.Run() // TODO(jdef) should listen for termination
podDeleter.Run(updates, terminate)
q.Run(terminate)
q.installDebugHandlers(mux)
podtask.InstallDebugHandlers(k.taskRegistry, mux)
})
return &PluginConfig{
Config: &plugin.Config{
MinionLister: nil,
Algorithm: &kubeScheduler{
api: kapi,
podUpdates: podUpdates,
defaultContainerCPULimit: k.defaultContainerCPULimit,
defaultContainerMemLimit: k.defaultContainerMemLimit,
},
Binder: &binder{api: kapi},
NextPod: q.yield,
Error: eh.handleSchedulingError,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
},
api: kapi,
client: k.client,
qr: q,
deleter: podDeleter,
starting: startLatch,
}
}