本文整理汇总了Golang中k8s/io/kubernetes/plugin/pkg/scheduler/algorithm.FakeNodeLister函数的典型用法代码示例。如果您正苦于以下问题:Golang FakeNodeLister函数的具体用法?Golang FakeNodeLister怎么用?Golang FakeNodeLister使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FakeNodeLister函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Schedule
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable
}
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, nodes)
if err != nil {
return "", err
}
priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeNodeLister(filteredNodes))
if err != nil {
return "", err
}
if len(priorityList) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
return g.selectHost(priorityList)
}
示例2: Schedule
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable
}
// TODO: we should compute this once and dynamically update it using Watch, not constantly re-compute.
// But at least we're now only doing it in one place
machinesToPods, err := predicates.MapPodsToMachines(g.pods)
if err != nil {
return "", err
}
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, machinesToPods, g.predicates, nodes, g.extenders)
if err != nil {
return "", err
}
priorityList, err := PrioritizeNodes(pod, machinesToPods, g.pods, g.prioritizers, algorithm.FakeNodeLister(filteredNodes), g.extenders)
if err != nil {
return "", err
}
if len(priorityList) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
return g.selectHost(priorityList)
}
示例3: Schedule
// Schedule tries to schedule the given pod to one of node in the node list.
// If it succeeds, it will return the name of the node.
// If it fails, it will return a Fiterror error with reasons.
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable
}
// Used for all fit and priority funcs.
nodeNameToInfo, err := g.cache.GetNodeNameToInfoMap()
if err != nil {
return "", err
}
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, nodeNameToInfo, g.predicates, nodes, g.extenders)
if err != nil {
return "", err
}
if len(filteredNodes.Items) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
priorityList, err := PrioritizeNodes(pod, nodeNameToInfo, g.prioritizers, algorithm.FakeNodeLister(filteredNodes), g.extenders)
if err != nil {
return "", err
}
return g.selectHost(priorityList)
}
示例4: TestSchedulerNoPhantomPodAfterDelete
func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(10*time.Minute, stop)
firstPod := podWithPort("pod.Name", "", 8080)
node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, firstPod, &node)
// We use conflicted pod ports to incur fit predicate failure.
secondPod := podWithPort("bar", "", 8080)
queuedPodStore.Add(secondPod)
// queuedPodStore: [bar:8080]
// cache: [(assumed)foo:8080]
scheduler.scheduleOne()
select {
case err := <-errChan:
expectErr := &FitError{
Pod: secondPod,
FailedPredicates: FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
}
if !reflect.DeepEqual(expectErr, err) {
t.Errorf("err want=%v, get=%v", expectErr, err)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
// We mimic the workflow of cache behavior when a pod is removed by user.
// Note: if the schedulercache timeout would be super short, the first pod would expire
// and would be removed itself (without any explicit actions on schedulercache). Even in that case,
// explicitly AddPod will as well correct the behavior.
firstPod.Spec.NodeName = node.Name
if err := scache.AddPod(firstPod); err != nil {
t.Fatalf("err: %v", err)
}
if err := scache.RemovePod(firstPod); err != nil {
t.Fatalf("err: %v", err)
}
queuedPodStore.Add(secondPod)
scheduler.scheduleOne()
select {
case b := <-bindingChan:
expectBinding := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "bar"},
Target: api.ObjectReference{Kind: "Node", Name: node.Name},
}
if !reflect.DeepEqual(expectBinding, b) {
t.Errorf("binding want=%v, get=%v", expectBinding, b)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
}
示例5: TestSchedulerNoPhantomPodAfterExpire
func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(100*time.Millisecond, stop)
pod := podWithPort("pod.Name", "", 8080)
node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, pod, &node)
waitPodExpireChan := make(chan struct{})
timeout := make(chan struct{})
go func() {
for {
select {
case <-timeout:
return
default:
}
pods, err := scache.List(labels.Everything())
if err != nil {
t.Fatalf("cache.List failed: %v", err)
}
if len(pods) == 0 {
close(waitPodExpireChan)
return
}
time.Sleep(100 * time.Millisecond)
}
}()
// waiting for the assumed pod to expire
select {
case <-waitPodExpireChan:
case <-time.After(wait.ForeverTestTimeout):
close(timeout)
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
// We use conflicted pod ports to incur fit predicate failure if first pod not removed.
secondPod := podWithPort("bar", "", 8080)
queuedPodStore.Add(secondPod)
scheduler.scheduleOne()
select {
case b := <-bindingChan:
expectBinding := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "bar"},
Target: api.ObjectReference{Kind: "Node", Name: node.Name},
}
if !reflect.DeepEqual(expectBinding, b) {
t.Errorf("binding want=%v, get=%v", expectBinding, b)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
}
示例6: TestSchedulerErrorWithLongBinding
// Scheduler should preserve predicate constraint even if binding was longer
// than cache ttl
func TestSchedulerErrorWithLongBinding(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
firstPod := podWithPort("foo", "", 8080)
conflictPod := podWithPort("bar", "", 8080)
pods := map[string]*v1.Pod{firstPod.Name: firstPod, conflictPod.Name: conflictPod}
for _, test := range []struct {
Expected map[string]bool
CacheTTL time.Duration
BindingDuration time.Duration
}{
{
Expected: map[string]bool{firstPod.Name: true},
CacheTTL: 100 * time.Millisecond,
BindingDuration: 300 * time.Millisecond,
},
{
Expected: map[string]bool{firstPod.Name: true},
CacheTTL: 10 * time.Second,
BindingDuration: 300 * time.Millisecond,
},
} {
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(test.CacheTTL, stop)
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1"}}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*v1.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan := setupTestSchedulerLongBindingWithRetry(
queuedPodStore, scache, nodeLister, predicateMap, stop, test.BindingDuration)
scheduler.Run()
queuedPodStore.Add(firstPod)
queuedPodStore.Add(conflictPod)
resultBindings := map[string]bool{}
waitChan := time.After(5 * time.Second)
for finished := false; !finished; {
select {
case b := <-bindingChan:
resultBindings[b.Name] = true
p := pods[b.Name]
p.Spec.NodeName = b.Target.Name
scache.AddPod(p)
case <-waitChan:
finished = true
}
}
if !reflect.DeepEqual(resultBindings, test.Expected) {
t.Errorf("Result binding are not equal to expected. %v != %v", resultBindings, test.Expected)
}
}
}
示例7: TestSchedulerFailedSchedulingReasons
func TestSchedulerFailedSchedulingReasons(t *testing.T) {
stop := make(chan struct{})
defer close(stop)
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := schedulercache.New(10*time.Minute, stop)
node := api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(2, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(100, resource.DecimalSI)),
api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(2, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(100, resource.DecimalSI)),
api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
}},
}
scache.AddNode(&node)
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
predicateMap := map[string]algorithm.FitPredicate{
"PodFitsResources": predicates.PodFitsResources,
}
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap)
podWithTooBigResourceRequests := podWithResources("bar", "", api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(4, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(500, resource.DecimalSI)),
}, api.ResourceList{
api.ResourceCPU: *(resource.NewQuantity(4, resource.DecimalSI)),
api.ResourceMemory: *(resource.NewQuantity(500, resource.DecimalSI)),
})
queuedPodStore.Add(podWithTooBigResourceRequests)
scheduler.scheduleOne()
select {
case err := <-errChan:
expectErr := &FitError{
Pod: podWithTooBigResourceRequests,
FailedPredicates: FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{
predicates.NewInsufficientResourceError(api.ResourceCPU, 4000, 0, 2000),
predicates.NewInsufficientResourceError(api.ResourceMemory, 500, 0, 100),
}},
}
if !reflect.DeepEqual(expectErr, err) {
t.Errorf("err want=%+v, get=%+v", expectErr, err)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
}
示例8: setupTestSchedulerWithOnePod
// queuedPodStore: pods queued before processing.
// cache: scheduler cache that might contain assumed pods.
func setupTestSchedulerWithOnePod(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, pod *api.Pod) (*Scheduler, chan *api.Binding, chan error) {
// Create the scheduler config
algo := NewGenericScheduler(
scache,
map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts},
[]algorithm.PriorityConfig{},
[]algorithm.SchedulerExtender{})
bindingChan := make(chan *api.Binding, 1)
errChan := make(chan error, 1)
cfg := &Config{
SchedulerCache: scache,
NodeLister: algorithm.FakeNodeLister(
[]*api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}},
),
Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error {
bindingChan <- b
return nil
}},
NextPod: func() *api.Pod {
return clientcache.Pop(queuedPodStore).(*api.Pod)
},
Error: func(p *api.Pod, err error) {
errChan <- err
},
Recorder: &record.FakeRecorder{},
PodConditionUpdater: fakePodConditionUpdater{},
}
scheduler := New(cfg)
queuedPodStore.Add(pod)
// queuedPodStore: [foo:8080]
// cache: []
scheduler.scheduleOne()
// queuedPodStore: []
// cache: [(assumed)foo:8080]
select {
case b := <-bindingChan:
expectBinding := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "pod.Name"},
Target: api.ObjectReference{Kind: "Node", Name: "machine1"},
}
if !reflect.DeepEqual(expectBinding, b) {
t.Errorf("binding want=%v, get=%v", expectBinding, b)
}
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
}
return scheduler, bindingChan, errChan
}
示例9: TestSchedulerRateLimitsBinding
func TestSchedulerRateLimitsBinding(t *testing.T) {
scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore}
queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore}
modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)
algo := NewGenericScheduler(
map[string]algorithm.FitPredicate{},
[]algorithm.PriorityConfig{},
[]algorithm.SchedulerExtender{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))
// Rate limit to 1 pod
fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
c := &Config{
Modeler: modeler,
NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error {
return nil
}},
NextPod: func() *api.Pod {
return queuedPodStore.Pop().(*api.Pod)
},
Error: func(p *api.Pod, err error) {
t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
},
Recorder: &record.FakeRecorder{},
BindPodsRateLimiter: &fr,
}
s := New(c)
firstPod := podWithID("foo", "")
secondPod := podWithID("boo", "")
queuedPodStore.Add(firstPod)
queuedPodStore.Add(secondPod)
for i, hitRateLimit := range []bool{true, false} {
s.scheduleOne()
if fr.acceptValues[i] != hitRateLimit {
t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i])
}
}
}
示例10: Schedule
// Schedule tries to schedule the given pod to one of node in the node list.
// If it succeeds, it will return the name of the node.
// If it fails, it will return a Fiterror error with reasons.
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
var trace *util.Trace
if pod != nil {
trace = util.NewTrace(fmt.Sprintf("Scheduling %s/%s", pod.Namespace, pod.Name))
} else {
trace = util.NewTrace("Scheduling <nil> pod")
}
defer trace.LogIfLong(20 * time.Millisecond)
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable
}
// Used for all fit and priority funcs.
nodeNameToInfo, err := g.cache.GetNodeNameToInfoMap()
if err != nil {
return "", err
}
trace.Step("Computing predicates")
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, nodeNameToInfo, g.predicates, nodes, g.extenders)
if err != nil {
return "", err
}
if len(filteredNodes.Items) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
trace.Step("Prioritizing")
priorityList, err := PrioritizeNodes(pod, nodeNameToInfo, g.prioritizers, algorithm.FakeNodeLister(filteredNodes), g.extenders)
if err != nil {
return "", err
}
trace.Step("Selecting host")
return g.selectHost(priorityList)
}
示例11: TestScheduler
func TestScheduler(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
defer eventBroadcaster.StartLogging(t.Logf).Stop()
errS := errors.New("scheduler")
errB := errors.New("binder")
table := []struct {
injectBindError error
sendPod *api.Pod
algo algorithm.ScheduleAlgorithm
expectErrorPod *api.Pod
expectAssumedPod *api.Pod
expectError error
expectBind *api.Binding
eventReason string
}{
{
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
expectAssumedPod: podWithID("foo", "machine1"),
eventReason: "Scheduled",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", errS},
expectError: errS,
expectErrorPod: podWithID("foo", ""),
eventReason: "FailedScheduling",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
injectBindError: errB,
expectError: errB,
expectErrorPod: podWithID("foo", ""),
eventReason: "FailedScheduling",
},
}
for i, item := range table {
var gotError error
var gotPod *api.Pod
var gotAssumedPod *api.Pod
var gotBinding *api.Binding
c := &Config{
Modeler: &FakeModeler{
AssumePodFunc: func(pod *api.Pod) {
gotAssumedPod = pod
},
},
NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: item.algo,
Binder: fakeBinder{func(b *api.Binding) error {
gotBinding = b
return item.injectBindError
}},
Error: func(p *api.Pod, err error) {
gotPod = p
gotError = err
},
NextPod: func() *api.Pod {
return item.sendPod
},
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
}
s := New(c)
called := make(chan struct{})
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
if e, a := item.eventReason, e.Reason; e != a {
t.Errorf("%v: expected %v, got %v", i, e, a)
}
close(called)
})
s.scheduleOne()
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: wanted %v, got %v", i, e, a)
}
if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: %s", i, util.ObjectDiff(e, a))
}
<-called
events.Stop()
}
}
示例12: TestSchedulerForgetAssumedPodAfterDelete
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
defer eventBroadcaster.StartLogging(t.Logf).Stop()
// Setup modeler so we control the contents of all 3 stores: assumed,
// scheduled and queued
scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore}
queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore}
modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)
// Create a fake clock used to timestamp entries and calculate ttl. Nothing
// will expire till we flip to something older than the ttl, at which point
// all entries inserted with fakeTime will expire.
ttl := 30 * time.Second
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
fakeClock := &util.FakeClock{Time: fakeTime}
ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
assumedPodsStore := cache.NewFakeExpirationStore(
cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock)
modeler.assumedPods = &cache.StoreToPodLister{Store: assumedPodsStore}
// Port is the easiest way to cause a fit predicate failure
podPort := 8080
firstPod := podWithPort("foo", "", podPort)
// Create the scheduler config
algo := NewGenericScheduler(
map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts},
[]algorithm.PriorityConfig{},
[]algorithm.SchedulerExtender{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))
var gotBinding *api.Binding
c := &Config{
Modeler: modeler,
NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error {
scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
gotBinding = b
return nil
}},
NextPod: func() *api.Pod {
return queuedPodStore.Pop().(*api.Pod)
},
Error: func(p *api.Pod, err error) {
t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
},
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
}
// First scheduling pass should schedule the pod
s := New(c)
called := make(chan struct{})
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
if e, a := "Scheduled", e.Reason; e != a {
t.Errorf("expected %v, got %v", e, a)
}
close(called)
})
queuedPodStore.Add(firstPod)
// queuedPodStore: [foo:8080]
// scheduledPodStore: []
// assumedPods: []
s.scheduleOne()
// queuedPodStore: []
// scheduledPodStore: [foo:8080]
// assumedPods: [foo:8080]
pod, exists, _ := scheduledPodStore.GetByKey("foo")
if !exists {
t.Errorf("Expected scheduled pod store to contain pod")
}
pod, exists, _ = queuedPodStore.GetByKey("foo")
if exists {
t.Errorf("Did not expect a queued pod, found %+v", pod)
}
pod, exists, _ = assumedPodsStore.GetByKey("foo")
if !exists {
t.Errorf("Assumed pod store should contain stale pod")
}
expectBind := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Target: api.ObjectReference{Kind: "Node", Name: "machine1"},
}
if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
}
<-called
//.........这里部分代码省略.........
示例13: TestZoneSpreadPriority
//.........这里部分代码省略.........
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 0}, {"machine22", 0},
{"machine01", 0}, {"machine02", 0}},
test: "three pods, one service pod",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 5}, {"machine12", 5},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
test: "three pods, two service pods on different machines",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 0}, {"machine12", 0},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
test: "three service label match pods in different namespaces",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 6}, {"machine12", 6},
{"machine21", 3}, {"machine22", 3},
{"machine01", 0}, {"machine02", 0}},
test: "four pods, three service pods",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 3}, {"machine12", 3},
{"machine21", 6}, {"machine22", 6},
{"machine01", 0}, {"machine02", 0}},
test: "service with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{{"machine11", 7}, {"machine12", 7},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
test: "service pod on non-zoned node",
},
}
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
zoneSpread := ServiceAntiAffinity{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"}
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeLabeledNodeList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例14: TestSelectorSpreadPriority
//.........这里部分代码省略.........
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "service with partial pod label matches with service and replication controller",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "disjoined service and replication controller should be treated equally",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "disjoined service and replication controller should be treated equally",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Both Nodes have one pod from the given RC, hence both get 0 score.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "Replication controller with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "Replication controller with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "Replication controller with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "Replication controller with partial pod label matches",
},
}
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
selectorSpread := SelectorSpread{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs), replicaSetLister: algorithm.FakeReplicaSetLister(test.rss)}
list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeNodeList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例15: TestZoneSelectorSpreadPriority
//.........这里部分代码省略.........
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone1, labels2),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine2Zone2, labels1),
buildPod(nodeMachine1Zone3, labels2),
buildPod(nodeMachine2Zone3, labels1),
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 10},
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 0}, // Pod on node
{nodeMachine1Zone3, 6}, // Pod in zone
{nodeMachine2Zone3, 3}, // Pod on node
{nodeMachine3Zone3, 6}, // Pod in zone
},
test: "five pods, 3 matching (z2=2, z3=1)",
},
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone1, labels1),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine2Zone2, labels2),
buildPod(nodeMachine1Zone3, labels1),
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 0}, // Pod on node
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 3}, // Pod in zone
{nodeMachine1Zone3, 0}, // Pod on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
},
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone1, labels1),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine1Zone3, labels1),
buildPod(nodeMachine2Zone2, labels2),
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 0}, // Pod on node
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 3}, // Pod in zone
{nodeMachine1Zone3, 0}, // Pod on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
},
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone3, labels1),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine1Zone3, labels1),
},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
// Note that because we put two pods on the same node (nodeMachine1Zone3),
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
// However they kind of make sense; zone1 is still most-highly favored.
// zone3 is in general least favored, and m1.z3 particularly low priority.
// We would probably prefer to see a bigger gap between putting a second
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
// This is also consistent with what we have already.
{nodeMachine1Zone1, 10}, // No pods in zone
{nodeMachine1Zone2, 5}, // Pod on node
{nodeMachine2Zone2, 6}, // Pod in zone
{nodeMachine1Zone3, 0}, // Two pods on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
},
test: "Replication controller spreading (z1=0, z2=1, z3=2)",
},
}
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
selectorSpread := SelectorSpread{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs), replicaSetLister: algorithm.FakeReplicaSetLister(test.rss)}
list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeLabeledNodeList(labeledNodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}