本文整理汇总了Golang中k8s/io/kubernetes/plugin/pkg/scheduler/algorithm.FakeReplicaSetLister函数的典型用法代码示例。如果您正苦于以下问题:Golang FakeReplicaSetLister函数的具体用法?Golang FakeReplicaSetLister怎么用?Golang FakeReplicaSetLister使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FakeReplicaSetLister函数的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestZeroRequest
//.........这里部分代码省略.........
},
},
},
}
small2 := small
small2.NodeName = "machine2"
// A larger pod.
large := api.PodSpec{
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
},
},
},
},
}
large1 := large
large1.NodeName = "machine1"
large2 := large
large2.NodeName = "machine2"
tests := []struct {
pod *api.Pod
pods []*api.Pod
nodes []*api.Node
test string
}{
// The point of these next two tests is to show you get the same priority for a zero-request pod
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
// and when the zero-request pod is the one being scheduled.
{
pod: &api.Pod{Spec: noResources},
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of zero-request pod with machine with zero-request pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
{
pod: &api.Pod{Spec: small},
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{
pod: &api.Pod{Spec: large},
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of larger pod with machine with zero-request pod",
pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
},
}
const expectedPriority int = 25
for _, test := range tests {
// This should match the configuration in defaultPriorities() in
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
// to test what's actually in production.
priorityConfigs := []algorithm.PriorityConfig{
{Map: algorithmpriorities.LeastRequestedPriorityMap, Weight: 1},
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
{
Function: algorithmpriorities.NewSelectorSpreadPriority(
algorithm.FakeServiceLister([]*api.Service{}),
algorithm.FakeControllerLister([]*api.ReplicationController{}),
algorithm.FakeReplicaSetLister([]*extensions.ReplicaSet{})),
Weight: 1,
},
}
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
list, err := PrioritizeNodes(
test.pod, nodeNameToInfo, algorithm.EmptyMetadataProducer, priorityConfigs,
algorithm.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, hp := range list {
if test.test == "test priority of larger pod with machine with zero-request pod" {
if hp.Score == expectedPriority {
t.Errorf("%s: expected non-%d for all priorities, got list %#v", test.test, expectedPriority, list)
}
} else {
if hp.Score != expectedPriority {
t.Errorf("%s: expected %d for all priorities, got list %#v", test.test, expectedPriority, list)
}
}
}
}
}
示例2: TestSelectorSpreadPriority
//.........这里部分代码省略.........
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "service with partial pod label matches with service and replication controller",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "disjoined service and replication controller should be treated equally",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "disjoined service and replication controller should be treated equally",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Both Nodes have one pod from the given RC, hence both get 0 score.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "Replication controller with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "Replication controller with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "Replication controller with partial pod label matches",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
rss: []extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "Replication controller with partial pod label matches",
},
}
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
selectorSpread := SelectorSpread{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs), replicaSetLister: algorithm.FakeReplicaSetLister(test.rss)}
list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeNodeList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例3: TestNodePreferAvoidPriority
//.........这里部分代码省略.........
"uid": "qwert12345",
"controller": true
}
},
"reason": "some reason",
"message": "some message"
}
]
}`,
}
testNodes := []*api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "machine1", Annotations: annotations1},
},
{
ObjectMeta: api.ObjectMeta{Name: "machine2", Annotations: annotations2},
},
{
ObjectMeta: api.ObjectMeta{Name: "machine3"},
},
}
tests := []struct {
pod *api.Pod
rcs []api.ReplicationController
rss []extensions.ReplicaSet
nodes []*api.Node
expectedList schedulerapi.HostPriorityList
test string
}{
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "default", Labels: label1}},
rcs: []api.ReplicationController{
{
ObjectMeta: api.ObjectMeta{
Namespace: "default",
Name: "foo",
UID: "abcdef123456",
},
Spec: api.ReplicationControllerSpec{Selector: label1},
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "default", Labels: label2}},
rss: []extensions.ReplicaSet{
{
TypeMeta: unversioned.TypeMeta{
APIVersion: "v1",
Kind: "ReplicaSet",
},
ObjectMeta: api.ObjectMeta{
Namespace: "default",
Name: "bar",
UID: "qwert12345",
},
Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: label2}},
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 10}},
test: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
},
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "default"}},
rcs: []api.ReplicationController{
{
ObjectMeta: api.ObjectMeta{
Namespace: "default",
Name: "foo",
UID: "abcdef123456",
},
Spec: api.ReplicationControllerSpec{Selector: label1},
},
},
nodes: testNodes,
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
test: "pod should not avoid these nodes, all nodes get highest priority score",
},
}
for _, test := range tests {
prioritizer := NodePreferAvoidPod{
controllerLister: algorithm.FakeControllerLister(test.rcs),
replicaSetLister: algorithm.FakeReplicaSetLister(test.rss),
}
list, err := prioritizer.CalculateNodePreferAvoidPodsPriority(test.pod, map[string]*schedulercache.NodeInfo{}, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
示例4: TestZoneSelectorSpreadPriority
//.........这里部分代码省略.........
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone1, labels2),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine2Zone2, labels1),
buildPod(nodeMachine1Zone3, labels2),
buildPod(nodeMachine2Zone3, labels1),
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 10},
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 0}, // Pod on node
{nodeMachine1Zone3, 6}, // Pod in zone
{nodeMachine2Zone3, 3}, // Pod on node
{nodeMachine3Zone3, 6}, // Pod in zone
},
test: "five pods, 3 matching (z2=2, z3=1)",
},
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone1, labels1),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine2Zone2, labels2),
buildPod(nodeMachine1Zone3, labels1),
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 0}, // Pod on node
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 3}, // Pod in zone
{nodeMachine1Zone3, 0}, // Pod on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
},
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone1, labels1),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine1Zone3, labels1),
buildPod(nodeMachine2Zone2, labels2),
},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
{nodeMachine1Zone1, 0}, // Pod on node
{nodeMachine1Zone2, 0}, // Pod on node
{nodeMachine2Zone2, 3}, // Pod in zone
{nodeMachine1Zone3, 0}, // Pod on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
},
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
{
pod: buildPod("", labels1),
pods: []*api.Pod{
buildPod(nodeMachine1Zone3, labels1),
buildPod(nodeMachine1Zone2, labels1),
buildPod(nodeMachine1Zone3, labels1),
},
rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: labels1}}},
expectedList: []schedulerapi.HostPriority{
// Note that because we put two pods on the same node (nodeMachine1Zone3),
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
// However they kind of make sense; zone1 is still most-highly favored.
// zone3 is in general least favored, and m1.z3 particularly low priority.
// We would probably prefer to see a bigger gap between putting a second
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
// This is also consistent with what we have already.
{nodeMachine1Zone1, 10}, // No pods in zone
{nodeMachine1Zone2, 5}, // Pod on node
{nodeMachine2Zone2, 6}, // Pod in zone
{nodeMachine1Zone3, 0}, // Two pods on node
{nodeMachine2Zone3, 3}, // Pod in zone
{nodeMachine3Zone3, 3}, // Pod in zone
},
test: "Replication controller spreading (z1=0, z2=1, z3=2)",
},
}
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
selectorSpread := SelectorSpread{podLister: algorithm.FakePodLister(test.pods), serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs), replicaSetLister: algorithm.FakeReplicaSetLister(test.rss)}
list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(makeLabeledNodeList(labeledNodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// sort the two lists to avoid failures on account of different ordering
sort.Sort(test.expectedList)
sort.Sort(list)
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}