本文整理汇总了Golang中k8s/io/kubernetes/plugin/pkg/scheduler/algorithm.FakeMinionLister函数的典型用法代码示例。如果您正苦于以下问题:Golang FakeMinionLister函数的具体用法?Golang FakeMinionLister怎么用?Golang FakeMinionLister使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FakeMinionLister函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Schedule
func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionLister) (string, error) {
minions, err := minionLister.List()
if err != nil {
return "", err
}
if len(minions.Items) == 0 {
return "", ErrNoNodesAvailable
}
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, minions)
if err != nil {
return "", err
}
priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes))
if err != nil {
return "", err
}
if len(priorityList) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
return g.selectHost(priorityList)
}
示例2: TestSchedulerRateLimitsBinding
func TestSchedulerRateLimitsBinding(t *testing.T) {
scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore}
queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore}
modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)
algo := NewGenericScheduler(
map[string]algorithm.FitPredicate{},
[]algorithm.PriorityConfig{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))
// Rate limit to 1 pod
fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
c := &Config{
Modeler: modeler,
MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error {
return nil
}},
NextPod: func() *api.Pod {
return queuedPodStore.Pop().(*api.Pod)
},
Error: func(p *api.Pod, err error) {
t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
},
Recorder: &record.FakeRecorder{},
BindPodsRateLimiter: &fr,
}
s := New(c)
firstPod := podWithID("foo", "")
secondPod := podWithID("boo", "")
queuedPodStore.Add(firstPod)
queuedPodStore.Add(secondPod)
for i, hitRateLimit := range []bool{true, false} {
s.scheduleOne()
if fr.acceptValues[i] != hitRateLimit {
t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i])
}
}
}
示例3: TestScheduler
func TestScheduler(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
defer eventBroadcaster.StartLogging(t.Logf).Stop()
errS := errors.New("scheduler")
errB := errors.New("binder")
table := []struct {
injectBindError error
sendPod *api.Pod
algo algorithm.ScheduleAlgorithm
expectErrorPod *api.Pod
expectAssumedPod *api.Pod
expectError error
expectBind *api.Binding
eventReason string
}{
{
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
expectAssumedPod: podWithID("foo", "machine1"),
eventReason: "Scheduled",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", errS},
expectError: errS,
expectErrorPod: podWithID("foo", ""),
eventReason: "FailedScheduling",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
injectBindError: errB,
expectError: errB,
expectErrorPod: podWithID("foo", ""),
eventReason: "FailedScheduling",
},
}
for i, item := range table {
var gotError error
var gotPod *api.Pod
var gotAssumedPod *api.Pod
var gotBinding *api.Binding
c := &Config{
Modeler: &FakeModeler{
AssumePodFunc: func(pod *api.Pod) {
gotAssumedPod = pod
},
},
MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: item.algo,
Binder: fakeBinder{func(b *api.Binding) error {
gotBinding = b
return item.injectBindError
}},
Error: func(p *api.Pod, err error) {
gotPod = p
gotError = err
},
NextPod: func() *api.Pod {
return item.sendPod
},
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
}
s := New(c)
called := make(chan struct{})
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
if e, a := item.eventReason, e.Reason; e != a {
t.Errorf("%v: expected %v, got %v", i, e, a)
}
close(called)
})
s.scheduleOne()
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: wanted %v, got %v", i, e, a)
}
if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: %s", i, util.ObjectDiff(e, a))
}
<-called
events.Stop()
}
}
示例4: TestSchedulerForgetAssumedPodAfterDelete
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
defer eventBroadcaster.StartLogging(t.Logf).Stop()
// Setup modeler so we control the contents of all 3 stores: assumed,
// scheduled and queued
scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
scheduledPodLister := &cache.StoreToPodLister{Store: scheduledPodStore}
queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
queuedPodLister := &cache.StoreToPodLister{Store: queuedPodStore}
modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)
// Create a fake clock used to timestamp entries and calculate ttl. Nothing
// will expire till we flip to something older than the ttl, at which point
// all entries inserted with fakeTime will expire.
ttl := 30 * time.Second
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
fakeClock := &util.FakeClock{Time: fakeTime}
ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
assumedPodsStore := cache.NewFakeExpirationStore(
cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock)
modeler.assumedPods = &cache.StoreToPodLister{Store: assumedPodsStore}
// Port is the easiest way to cause a fit predicate failure
podPort := 8080
firstPod := podWithPort("foo", "", podPort)
// Create the scheduler config
algo := NewGenericScheduler(
map[string]algorithm.FitPredicate{"PodFitsPorts": predicates.PodFitsPorts},
[]algorithm.PriorityConfig{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))
var gotBinding *api.Binding
c := &Config{
Modeler: modeler,
MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error {
scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
gotBinding = b
return nil
}},
NextPod: func() *api.Pod {
return queuedPodStore.Pop().(*api.Pod)
},
Error: func(p *api.Pod, err error) {
t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
},
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
}
// First scheduling pass should schedule the pod
s := New(c)
called := make(chan struct{})
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
if e, a := "Scheduled", e.Reason; e != a {
t.Errorf("expected %v, got %v", e, a)
}
close(called)
})
queuedPodStore.Add(firstPod)
// queuedPodStore: [foo:8080]
// scheduledPodStore: []
// assumedPods: []
s.scheduleOne()
// queuedPodStore: []
// scheduledPodStore: [foo:8080]
// assumedPods: [foo:8080]
pod, exists, _ := scheduledPodStore.GetByKey("foo")
if !exists {
t.Errorf("Expected scheduled pod store to contain pod")
}
pod, exists, _ = queuedPodStore.GetByKey("foo")
if exists {
t.Errorf("Did not expect a queued pod, found %+v", pod)
}
pod, exists, _ = assumedPodsStore.GetByKey("foo")
if !exists {
t.Errorf("Assumed pod store should contain stale pod")
}
expectBind := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Target: api.ObjectReference{Kind: "Node", Name: "machine1"},
}
if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
}
<-called
events.Stop()
//.........这里部分代码省略.........
示例5: TestSelectorSpreadPriority
//.........这里部分代码省略.........
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "three pods, two service pods on different machines",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 0}},
test: "four pods, three service pods",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "service with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
// do spreading between all pods. The result should be exactly as above.
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "service with partial pod label matches with service and replication controller",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "disjoined service and replication controller should be treated equally",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Both Nodes have one pod from the given RC, hence both get 0 score.
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "Replication controller with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "Replication controller with partial pod label matches",
},
}
for _, test := range tests {
selectorSpread := SelectorSpread{serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs)}
list, err := selectorSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例6: TestZoneSpreadPriority
//.........这里部分代码省略.........
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 0}, {"machine22", 0},
{"machine01", 0}, {"machine02", 0}},
test: "three pods, one service pod",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []algorithm.HostPriority{{"machine11", 5}, {"machine12", 5},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
test: "three pods, two service pods on different machines",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []algorithm.HostPriority{{"machine11", 0}, {"machine12", 0},
{"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}},
test: "three service label match pods in different namespaces",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []algorithm.HostPriority{{"machine11", 6}, {"machine12", 6},
{"machine21", 3}, {"machine22", 3},
{"machine01", 0}, {"machine02", 0}},
test: "four pods, three service pods",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []algorithm.HostPriority{{"machine11", 3}, {"machine12", 3},
{"machine21", 6}, {"machine22", 6},
{"machine01", 0}, {"machine02", 0}},
test: "service with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7},
{"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}},
test: "service pod on non-zoned minion",
},
}
for _, test := range tests {
zoneSpread := ServiceAntiAffinity{serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"}
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeLabeledMinionList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例7: TestBalancedResourceAllocation
//.........这里部分代码省略.........
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25%
Minion2 Score: 10 - (0.6-0.25)*10 = 6
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}},
test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
},
{
/*
Minion1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25%
Minion1 Score: 10 - (0.6-0.25)*10 = 6
Minion2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 10000 / 20000 = 50%
Minion2 Score: 10 - (0.6-0.5)*10 = 9
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}},
test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Minion1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25%
Minion1 Score: 10 - (0.6-0.25)*10 = 6
Minion2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 10000 / 50000 = 20%
Minion2 Score: 10 - (0.6-0.2)*10 = 6
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Minion1 scores on 0-10 scale
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
Memory Fraction: 0 / 10000 = 0
Minion1 Score: 0
Minion2 scores on 0-10 scale
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
Memory Fraction 5000 / 10000 = 50%
Minion2 Score: 0
*/
pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "requested resources exceed minion capacity",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
}
for _, test := range tests {
list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例8: TestZeroLimit
func TestZeroLimit(t *testing.T) {
// A pod with no resources. We expect spreading to count it as having the default resources.
noResources := api.PodSpec{
Containers: []api.Container{
{},
},
}
noResources1 := noResources
noResources1.NodeName = "machine1"
// A pod with the same resources as a 0-limit pod gets by default as its resources (for spreading).
small := api.PodSpec{
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(defaultMilliCpuLimit, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(defaultMemoryLimit, 10)),
},
},
},
},
}
small2 := small
small2.NodeName = "machine2"
// A larger pod.
large := api.PodSpec{
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(defaultMilliCpuLimit*3, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(defaultMemoryLimit*3, 10)),
},
},
},
},
}
large1 := large
large1.NodeName = "machine1"
large2 := large
large2.NodeName = "machine2"
tests := []struct {
pod *api.Pod
pods []*api.Pod
nodes []api.Node
test string
}{
// The point of these next two tests is to show you get the same priority for a zero-limit pod
// as for a pod with the defaults limits, both when the zero-limit pod is already on the machine
// and when the zero-limit pod is the one being scheduled.
{
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
test: "test priority of zero-limit pod with machine with zero-limit pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
{
pod: &api.Pod{Spec: small},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
test: "test priority of nonzero-limit pod with machine with zero-limit pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{
pod: &api.Pod{Spec: large},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
test: "test priority of larger pod with machine with zero-limit pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
}
const expectedPriority int = 25
for _, test := range tests {
list, err := scheduler.PrioritizeNodes(
test.pod,
algorithm.FakePodLister(test.pods),
// This should match the configuration in defaultPriorities() in
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
// to test what's actually in production.
[]algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{})), Weight: 1}},
algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, hp := range list {
if test.test == "test priority of larger pod with machine with zero-limit pod" {
if hp.Score == expectedPriority {
//.........这里部分代码省略.........
示例9: TestNewNodeLabelPriority
func TestNewNodeLabelPriority(t *testing.T) {
label1 := map[string]string{"foo": "bar"}
label2 := map[string]string{"bar": "foo"}
label3 := map[string]string{"bar": "baz"}
tests := []struct {
nodes []api.Node
label string
presence bool
expectedList algorithm.HostPriorityList
test string
}{
{
nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
label: "baz",
presence: true,
test: "no match found, presence true",
},
{
nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
label: "baz",
presence: false,
test: "no match found, presence false",
},
{
nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
label: "foo",
presence: true,
test: "one match found, presence true",
},
{
nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
label: "foo",
presence: false,
test: "one match found, presence false",
},
{
nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
label: "bar",
presence: true,
test: "two matches found, presence true",
},
{
nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
label: "bar",
presence: false,
test: "two matches found, presence false",
},
}
for _, test := range tests {
prioritizer := NodeLabelPrioritizer{
label: test.label,
presence: test.presence,
}
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例10: TestLeastRequested
//.........这里部分代码省略.........
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Minion2 Score: (4 + 7.5) / 2 = 5
*/
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}},
test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
},
{
/*
Minion1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Minion1 Score: (4 + 7.5) / 2 = 5
Minion2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 10000) *10) / 20000 = 5
Minion2 Score: (4 + 5) / 2 = 4
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}},
test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Minion1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Minion1 Score: (4 + 7.5) / 2 = 5
Minion2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((50000 - 10000) *10) / 50000 = 8
Minion2 Score: (4 + 8) / 2 = 6
*/
pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Minion1 scores on 0-10 scale
CPU Score: ((4000 - 6000) *10) / 4000 = 0
Memory Score: ((10000 - 0) *10) / 10000 = 10
Minion1 Score: (0 + 10) / 2 = 5
Minion2 scores on 0-10 scale
CPU Score: ((4000 - 6000) *10) / 4000 = 0
Memory Score: ((10000 - 5000) *10) / 10000 = 5
Minion2 Score: (0 + 5) / 2 = 2
*/
pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}},
test: "requested resources exceed minion capacity",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources",
pods: []*api.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
}
for _, test := range tests {
list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例11: TestGenericScheduler
//.........这里部分代码省略.........
name: "test 4",
},
{
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
expectedHost: "2",
name: "test 5",
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
expectedHost: "1",
name: "test 6",
},
{
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"},
expectsErr: true,
name: "test 7",
},
{
predicates: map[string]algorithm.FitPredicate{
"nopods": hasNoPodsPredicate,
"matches": matchesPredicate,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{Name: "2"},
Spec: api.PodSpec{
NodeName: "2",
},
Status: api.PodStatus{
Phase: api.PodRunning,
},
},
},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"1", "2"},
expectsErr: true,
name: "test 8",
},
{
predicates: map[string]algorithm.FitPredicate{
"nopods": hasNoPodsPredicate,
"matches": matchesPredicate,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{Name: "2"},
Spec: api.PodSpec{
NodeName: "2",
},
Status: api.PodStatus{
Phase: api.PodFailed,
},
},
{
ObjectMeta: api.ObjectMeta{Name: "3"},
Spec: api.PodSpec{
NodeName: "2",
},
Status: api.PodStatus{
Phase: api.PodSucceeded,
},
},
},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"1", "2"},
expectedHost: "2",
name: "test 9",
},
}
for _, test := range tests {
random := rand.New(rand.NewSource(0))
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, algorithm.FakePodLister(test.pods), random)
machine, err := scheduler.Schedule(test.pod, algorithm.FakeMinionLister(makeNodeList(test.nodes)))
if test.expectsErr {
if err == nil {
t.Error("Unexpected non-error")
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if test.expectedHost != machine {
t.Errorf("Failed : %s, Expected: %s, Saw: %s", test.name, test.expectedHost, machine)
}
}
}
}