当前位置: 首页>>代码示例>>Golang>>正文


Golang schedulercache.CreateNodeNameToInfoMap函数代码示例

本文整理汇总了Golang中k8s/io/kubernetes/plugin/pkg/scheduler/schedulercache.CreateNodeNameToInfoMap函数的典型用法代码示例。如果您正苦于以下问题:Golang CreateNodeNameToInfoMap函数的具体用法?Golang CreateNodeNameToInfoMap怎么用?Golang CreateNodeNameToInfoMap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了CreateNodeNameToInfoMap函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: FindNodeToRemove

// FindNodeToRemove finds a node that can be removed.
func FindNodeToRemove(nodes []*kube_api.Node, pods []*kube_api.Pod, client *kube_client.Client) (*kube_api.Node, error) {
	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)

	//TODO: Interate over underutulized nodes first.
	for _, node := range nodes {
		nodeInfo, found := nodeNameToNodeInfo[node.Name]
		if !found {
			glog.Errorf("Node info for %s not found", node.Name)
			continue
		}

		reservation, err := calculateReservation(node, nodeInfo)

		if err != nil {
			glog.Warningf("Failed to calculate reservation for %s: %v", node.Name, err)
		}
		glog.V(4).Infof("Node %s - reservation %f", node.Name, reservation)

		if reservation > unusedThreshold {
			glog.Infof("Node %s is not suitable for removal - reservation to big (%f)", node.Name, reservation)
			continue
		}

		// Let's try to remove this one.
		glog.V(2).Infof("Considering %s for removal", node.Name)

		podsToRemoveList, _, _, err := cmd.GetPodsForDeletionOnNodeDrain(client, node.Name,
			kube_api.Codecs.UniversalDecoder(), false, true)

		if err != nil {
			glog.V(1).Infof("Node %s cannot be removed: %v", node.Name, err)
			continue
		}

		tempNodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)
		delete(tempNodeNameToNodeInfo, node.Name)
		for _, node := range nodes {
			if nodeInfo, found := tempNodeNameToNodeInfo[node.Name]; found {
				nodeInfo.SetNode(node)
			}
		}
		ptrPodsToRemove := make([]*kube_api.Pod, 0, len(podsToRemoveList))
		for i := range podsToRemoveList {
			ptrPodsToRemove = append(ptrPodsToRemove, &podsToRemoveList[i])
		}

		findProblems := findPlaceFor(ptrPodsToRemove, nodes, tempNodeNameToNodeInfo)
		if findProblems == nil {
			return node, nil
		}
		glog.Infof("Node %s is not suitable for removal %v", node.Name, err)
	}
	return nil, nil
}
开发者ID:danehans,项目名称:contrib,代码行数:55,代码来源:cluster.go

示例2: Schedule

// Schedule tries to schedule the given pod to one of node in the node list.
// If it succeeds, it will return the name of the node.
// If it fails, it will return a Fiterror error with reasons.
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
	nodes, err := nodeLister.List()
	if err != nil {
		return "", err
	}
	if len(nodes.Items) == 0 {
		return "", ErrNoNodesAvailable
	}

	// TODO: we should compute this once and dynamically update it using Watch, not constantly re-compute.
	// But at least we're now only doing it in one place
	pods, err := g.pods.List(labels.Everything())
	if err != nil {
		return "", err
	}
	nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(pods)
	filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, nodeNameToInfo, g.predicates, nodes, g.extenders)
	if err != nil {
		return "", err
	}

	if len(filteredNodes.Items) == 0 {
		return "", &FitError{
			Pod:              pod,
			FailedPredicates: failedPredicateMap,
		}
	}

	priorityList, err := PrioritizeNodes(pod, nodeNameToInfo, g.pods, g.prioritizers, algorithm.FakeNodeLister(filteredNodes), g.extenders)
	if err != nil {
		return "", err
	}

	return g.selectHost(priorityList)
}
开发者ID:ethernetdan,项目名称:kubernetes,代码行数:38,代码来源:generic_scheduler.go

示例3: FindNodesToRemove

// FindNodesToRemove finds nodes that can be removed. Returns also an information about good
// rescheduling location for each of the pods.
func FindNodesToRemove(candidates []*kube_api.Node, allNodes []*kube_api.Node, pods []*kube_api.Pod,
	client *kube_client.Client, predicateChecker *PredicateChecker, maxCount int,
	fastCheck bool, oldHints map[string]string, usageTracker *UsageTracker,
	timestamp time.Time) (nodesToRemove []NodeToBeRemoved, podReschedulingHints map[string]string, finalError error) {

	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)
	for _, node := range allNodes {
		if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
			nodeInfo.SetNode(node)
		}
	}
	result := make([]NodeToBeRemoved, 0)

	evaluationType := "Detailed evaluation"
	if fastCheck {
		evaluationType = "Fast evaluation"
	}
	newHints := make(map[string]string, len(oldHints))

candidateloop:
	for _, node := range candidates {
		glog.V(2).Infof("%s: %s for removal", evaluationType, node.Name)

		var podsToRemove []*kube_api.Pod
		var err error

		if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
			if fastCheck {
				podsToRemove, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage)
			} else {
				podsToRemove, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, client, int32(*minReplicaCount))
			}
			if err != nil {
				glog.V(2).Infof("%s: node %s cannot be removed: %v", evaluationType, node.Name, err)
				continue candidateloop
			}
		} else {
			glog.V(2).Infof("%s: nodeInfo for %s not found", evaluationType, node.Name)
			continue candidateloop
		}
		findProblems := findPlaceFor(node.Name, podsToRemove, allNodes, nodeNameToNodeInfo, predicateChecker, oldHints, newHints,
			usageTracker, timestamp)

		if findProblems == nil {
			result = append(result, NodeToBeRemoved{
				Node:             node,
				PodsToReschedule: podsToRemove,
			})
			glog.V(2).Infof("%s: node %s may be removed", evaluationType, node.Name)
			if len(result) >= maxCount {
				break candidateloop
			}
		} else {
			glog.V(2).Infof("%s: node %s is not suitable for removal %v", evaluationType, node.Name, err)
		}
	}
	return result, newHints, nil
}
开发者ID:Q-Lee,项目名称:contrib,代码行数:60,代码来源:cluster.go

示例4: FindUnneededNodes

// FindUnneededNodes calculates which nodes are not needed, i.e. all pods can be scheduled somewhere else,
// and updates unneededNodes map accordingly. It also returns information where pods can be rescheduld and
// node utilization level.
func FindUnneededNodes(
	context AutoscalingContext,
	nodes []*apiv1.Node,
	unneededNodes map[string]time.Time,
	pods []*apiv1.Pod,
	oldHints map[string]string,
	tracker *simulator.UsageTracker,
	timestamp time.Time) (unnededTimeMap map[string]time.Time, podReschedulingHints map[string]string, utilizationMap map[string]float64) {

	currentlyUnneededNodes := make([]*apiv1.Node, 0)
	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods, nodes)
	utilizationMap = make(map[string]float64)

	// Phase1 - look at the nodes utilization.
	for _, node := range nodes {
		nodeInfo, found := nodeNameToNodeInfo[node.Name]
		if !found {
			glog.Errorf("Node info for %s not found", node.Name)
			continue
		}
		utilization, err := simulator.CalculateUtilization(node, nodeInfo)

		if err != nil {
			glog.Warningf("Failed to calculate utilization for %s: %v", node.Name, err)
		}
		glog.V(4).Infof("Node %s - utilization %f", node.Name, utilization)
		utilizationMap[node.Name] = utilization

		if utilization >= context.ScaleDownUtilizationThreshold {
			glog.V(4).Infof("Node %s is not suitable for removal - utilization too big (%f)", node.Name, utilization)
			continue
		}
		currentlyUnneededNodes = append(currentlyUnneededNodes, node)
	}

	// Phase2 - check which nodes can be probably removed using fast drain.
	nodesToRemove, newHints, err := simulator.FindNodesToRemove(currentlyUnneededNodes, nodes, pods,
		nil, context.PredicateChecker,
		len(currentlyUnneededNodes), true, oldHints, tracker, timestamp)
	if err != nil {
		glog.Errorf("Error while simulating node drains: %v", err)
		return map[string]time.Time{}, oldHints, map[string]float64{}
	}

	// Update the timestamp map.
	now := time.Now()
	result := make(map[string]time.Time)
	for _, node := range nodesToRemove {
		name := node.Node.Name
		if val, found := unneededNodes[name]; !found {
			result[name] = now
		} else {
			result[name] = val
		}
	}
	return result, newHints, utilizationMap
}
开发者ID:kubernetes,项目名称:contrib,代码行数:60,代码来源:scale_down.go

示例5: createNodeNameToInfoMap

// TODO: move this function to scheduler utils.
func createNodeNameToInfoMap(pods []*kube_api.Pod, nodes []*kube_api.Node) map[string]*schedulercache.NodeInfo {
	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)
	for _, node := range nodes {
		if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
			nodeInfo.SetNode(node)
		}
	}
	return nodeNameToNodeInfo
}
开发者ID:krousey,项目名称:contrib,代码行数:10,代码来源:utils.go

示例6: CalculateUnderutilizedNodes

// CalculateUnderutilizedNodes calculates which nodes are underutilized.
func CalculateUnderutilizedNodes(nodes []*kube_api.Node,
	underutilizedNodes map[string]time.Time,
	utilizationThreshold float64,
	pods []*kube_api.Pod,
	client *kube_client.Client,
	predicateChecker *simulator.PredicateChecker) map[string]time.Time {

	currentlyUnderutilizedNodes := make([]*kube_api.Node, 0)
	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)

	// Phase1 - look at the nodes reservation.
	for _, node := range nodes {
		nodeInfo, found := nodeNameToNodeInfo[node.Name]
		if !found {
			glog.Errorf("Node info for %s not found", node.Name)
			continue
		}
		reservation, err := simulator.CalculateReservation(node, nodeInfo)

		if err != nil {
			glog.Warningf("Failed to calculate reservation for %s: %v", node.Name, err)
		}
		glog.V(4).Infof("Node %s - reservation %f", node.Name, reservation)

		if reservation >= utilizationThreshold {
			glog.V(4).Infof("Node %s is not suitable for removal - reservation to big (%f)", node.Name, reservation)
			continue
		}
		currentlyUnderutilizedNodes = append(currentlyUnderutilizedNodes, node)
	}

	// Phase2 - check which nodes can be probably removed using fast drain.
	nodesToRemove, err := simulator.FindNodesToRemove(currentlyUnderutilizedNodes, nodes, pods,
		client, predicateChecker,
		len(currentlyUnderutilizedNodes), true)
	if err != nil {
		glog.Errorf("Error while evaluating node utilization: %v", err)
		return map[string]time.Time{}
	}

	// Update the timestamp map.
	now := time.Now()
	result := make(map[string]time.Time)
	for _, node := range nodesToRemove {
		name := node.Name
		if val, found := underutilizedNodes[name]; !found {
			result[name] = now
		} else {
			result[name] = val
		}
	}
	return result
}
开发者ID:krousey,项目名称:contrib,代码行数:54,代码来源:scale_down.go

示例7: FindNodesToRemove

// FindNodesToRemove finds nodes that can be removed.
func FindNodesToRemove(candidates []*kube_api.Node, allNodes []*kube_api.Node, pods []*kube_api.Pod,
	client *kube_client.Client, predicateChecker *PredicateChecker, maxCount int,
	fastCheck bool) ([]*kube_api.Node, error) {

	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)
	for _, node := range allNodes {
		if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
			nodeInfo.SetNode(node)
		}
	}
	result := make([]*kube_api.Node, 0)

candidateloop:
	for _, node := range candidates {
		glog.V(2).Infof("Considering %s for removal", node.Name)

		var podsToRemove []*kube_api.Pod
		var err error

		if fastCheck {
			if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
				podsToRemove, err = FastGetPodsToMove(nodeInfo, false, true, kube_api.Codecs.UniversalDecoder())
			}
		} else {
			drainResult, _, _, err := cmd.GetPodsForDeletionOnNodeDrain(client, node.Name,
				kube_api.Codecs.UniversalDecoder(), false, true)

			if err != nil {
				glog.V(2).Infof("Node %s cannot be removed: %v", node.Name, err)
				continue
			}
			podsToRemove = make([]*kube_api.Pod, 0, len(drainResult))
			for i := range drainResult {
				podsToRemove = append(podsToRemove, &drainResult[i])
			}
		}
		findProblems := findPlaceFor(node.Name, podsToRemove, allNodes, nodeNameToNodeInfo, predicateChecker)
		if findProblems == nil {
			result = append(result, node)
			if len(result) >= maxCount {
				break candidateloop
			}
		} else {
			glog.V(2).Infof("Node %s is not suitable for removal %v", node.Name, err)
		}
	}
	return result, nil
}
开发者ID:krousey,项目名称:contrib,代码行数:49,代码来源:cluster.go

示例8: createNodeNameToInfoMap

// TODO: move this function to scheduler utils.
func createNodeNameToInfoMap(pods []*kube_api.Pod, nodes []*kube_api.Node) map[string]*schedulercache.NodeInfo {
	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)
	for _, node := range nodes {
		if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
			nodeInfo.SetNode(node)
		}
	}

	// Some pods may be out of sync with node lists. Removing incomplete node infos.
	keysToRemove := make([]string, 0)
	for key, nodeInfo := range nodeNameToNodeInfo {
		if nodeInfo.Node() == nil {
			keysToRemove = append(keysToRemove, key)
		}
	}
	for _, key := range keysToRemove {
		delete(nodeNameToNodeInfo, key)
	}

	return nodeNameToNodeInfo
}
开发者ID:spxtr,项目名称:contrib,代码行数:22,代码来源:utils.go

示例9: FindEmptyNodesToRemove

// FindEmptyNodesToRemove finds empty nodes that can be removed.
func FindEmptyNodesToRemove(candidates []*kube_api.Node, pods []*kube_api.Pod) []*kube_api.Node {
	nodeNameToNodeInfo := schedulercache.CreateNodeNameToInfoMap(pods)
	for _, node := range candidates {
		if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
			nodeInfo.SetNode(node)
		}
	}
	result := make([]*kube_api.Node, 0)
	for _, node := range candidates {
		if nodeInfo, found := nodeNameToNodeInfo[node.Name]; found {
			// Should block on all pods.
			podsToRemove, err := FastGetPodsToMove(nodeInfo, true, true)
			if err == nil && len(podsToRemove) == 0 {
				result = append(result, node)
			}
		} else {
			// Node without pods.
			result = append(result, node)
		}
	}
	return result
}
开发者ID:Q-Lee,项目名称:contrib,代码行数:23,代码来源:cluster.go

示例10: UpdateNodeNameToInfoMap

func (p PodsToCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
	infoMap = schedulercache.CreateNodeNameToInfoMap(p)
	return nil
}
开发者ID:apcera,项目名称:kubernetes,代码行数:4,代码来源:pods_to_cache.go

示例11: TestNodeAffinityPriority


//.........这里部分代码省略.........
					Preference: v1.NodeSelectorTerm{
						MatchExpressions: []v1.NodeSelectorRequirement{
							{
								Key:      "foo",
								Operator: v1.NodeSelectorOpIn,
								Values:   []string{"bar"},
							},
							{
								Key:      "key",
								Operator: v1.NodeSelectorOpIn,
								Values:   []string{"value"},
							},
							{
								Key:      "az",
								Operator: v1.NodeSelectorOpIn,
								Values:   []string{"az1"},
							},
						},
					},
				},
			},
		},
	}

	tests := []struct {
		pod          *v1.Pod
		nodes        []*v1.Node
		expectedList schedulerapi.HostPriorityList
		test         string
	}{
		{
			pod: &v1.Pod{
				ObjectMeta: metav1.ObjectMeta{
					Annotations: map[string]string{},
				},
			},
			nodes: []*v1.Node{
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
			},
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
			test:         "all machines are same priority as NodeAffinity is nil",
		},
		{
			pod: &v1.Pod{
				Spec: v1.PodSpec{
					Affinity: affinity1,
				},
			},
			nodes: []*v1.Node{
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label4}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
			},
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
			test:         "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
		},
		{
			pod: &v1.Pod{
				Spec: v1.PodSpec{
					Affinity: affinity1,
				},
			},
			nodes: []*v1.Node{
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
			},
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
			test:         "only machine1 matches the preferred scheduling requirements of pod",
		},
		{
			pod: &v1.Pod{
				Spec: v1.PodSpec{
					Affinity: affinity2,
				},
			},
			nodes: []*v1.Node{
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
			},
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}},
			test:         "all machines matches the preferred scheduling requirements of pod but with different priorities ",
		},
	}

	for _, test := range tests {
		nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
		nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce)
		list, err := nap(test.pod, nodeNameToInfo, test.nodes)
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}
		if !reflect.DeepEqual(test.expectedList, list) {
			t.Errorf("%s: \nexpected %#v, \ngot      %#v", test.test, test.expectedList, list)
		}
	}
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:101,代码来源:node_affinity_test.go

示例12: TestMostRequested


//.........这里部分代码省略.........
		},
	}
	tests := []struct {
		pod          *api.Pod
		pods         []*api.Pod
		nodes        []*api.Node
		expectedList schedulerapi.HostPriorityList
		test         string
	}{
		{
			/*
				Node1 scores (used resources) on 0-10 scale
				CPU Score: (0 * 10  / 4000 = 0
				Memory Score: (0 * 10) / 10000 = 0
				Node1 Score: (0 + 0) / 2 = 0

				Node2 scores (used resources) on 0-10 scale
				CPU Score: (0 * 10 / 4000 = 0
				Memory Score: (0 * 10 / 10000 = 0
				Node2 Score: (0 + 0) / 2 = 0
			*/
			pod:          &api.Pod{Spec: noResources},
			nodes:        []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
			test:         "nothing scheduled, nothing requested",
		},
		{
			/*
				Node1 scores on 0-10 scale
				CPU Score: (3000 * 10 / 4000 = 7.5
				Memory Score: (5000 * 10) / 10000 = 5
				Node1 Score: (7.5 + 5) / 2 = 6

				Node2 scores on 0-10 scale
				CPU Score: (3000 * 10 / 6000 = 5
				Memory Score: (5000 * 10 / 10000 = 5
				Node2 Score: (5 + 5) / 2 = 5
			*/
			pod:          &api.Pod{Spec: cpuAndMemory},
			nodes:        []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 5}},
			test:         "nothing scheduled, resources requested, differently sized machines",
		},
		{
			/*
				Node1 scores on 0-10 scale
				CPU Score: (6000 * 10) / 10000 = 6
				Memory Score: (0 * 10) / 20000 = 10
				Node1 Score: (6 + 0) / 2 = 3

				Node2 scores on 0-10 scale
				CPU Score: (6000 * 10) / 10000 = 6
				Memory Score: (5000 * 10) / 20000 = 2.5
				Node2 Score: (6 + 2.5) / 2 = 4
			*/
			pod:          &api.Pod{Spec: noResources},
			nodes:        []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 3}, {"machine2", 4}},
			test:         "no resources requested, pods scheduled with resources",
			pods: []*api.Pod{
				{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
				{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
				{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
				{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
			},
		},
		{
			/*
				Node1 scores on 0-10 scale
				CPU Score: (6000 * 10) / 10000 = 6
				Memory Score: (5000 * 10) / 20000 = 2.5
				Node1 Score: (6 + 2.5) / 2 = 4

				Node2 scores on 0-10 scale
				CPU Score: (6000 * 10) / 10000 = 6
				Memory Score: (10000 * 10) / 20000 = 5
				Node2 Score: (6 + 5) / 2 = 5
			*/
			pod:          &api.Pod{Spec: cpuAndMemory},
			nodes:        []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 4}, {"machine2", 5}},
			test:         "resources requested, pods scheduled with resources",
			pods: []*api.Pod{
				{Spec: cpuOnly},
				{Spec: cpuAndMemory},
			},
		},
	}

	for _, test := range tests {
		nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
		list, err := priorityFunction(MostRequestedPriorityMap, nil)(test.pod, nodeNameToInfo, test.nodes)
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}
		if !reflect.DeepEqual(test.expectedList, list) {
			t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
		}
	}
}
开发者ID:huang195,项目名称:kubernetes,代码行数:101,代码来源:priorities_test.go

示例13: TestImageLocalityPriority


//.........这里部分代码省略.........
					"gcr.io/140:v1",
				},
				SizeBytes: int64(140 * mb),
			},
			{
				Names: []string{
					"gcr.io/2000",
				},
				SizeBytes: int64(2000 * mb),
			},
		},
	}

	node_250_10 := api.NodeStatus{
		Images: []api.ContainerImage{
			{
				Names: []string{
					"gcr.io/250",
				},
				SizeBytes: int64(250 * mb),
			},
			{
				Names: []string{
					"gcr.io/10",
					"gcr.io/10:v1",
				},
				SizeBytes: int64(10 * mb),
			},
		},
	}

	tests := []struct {
		pod          *api.Pod
		pods         []*api.Pod
		nodes        []api.Node
		expectedList schedulerapi.HostPriorityList
		test         string
	}{
		{
			// Pod: gcr.io/40 gcr.io/250

			// Node1
			// Image: gcr.io/40 40MB
			// Score: (40M-23M)/97.7M + 1 = 1

			// Node2
			// Image: gcr.io/250 250MB
			// Score: (250M-23M)/97.7M + 1 = 3
			pod:          &api.Pod{Spec: test_40_250},
			nodes:        []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine2", 3}},
			test:         "two images spread on two nodes, prefer the larger image one",
		},
		{
			// Pod: gcr.io/40 gcr.io/140

			// Node1
			// Image: gcr.io/40 40MB, gcr.io/140 140MB
			// Score: (40M+140M-23M)/97.7M + 1 = 2

			// Node2
			// Image: not present
			// Score: 0
			pod:          &api.Pod{Spec: test_40_140},
			nodes:        []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 0}},
			test:         "two images on one node, prefer this node",
		},
		{
			// Pod: gcr.io/2000 gcr.io/10

			// Node1
			// Image: gcr.io/2000 2000MB
			// Score: 2000 > max score = 10

			// Node2
			// Image: gcr.io/10 10MB
			// Score: 10 < min score = 0
			pod:          &api.Pod{Spec: test_min_max},
			nodes:        []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
			test:         "if exceed limit, use limit",
		},
	}

	for _, test := range tests {
		nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
		list, err := ImageLocalityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}

		sort.Sort(test.expectedList)
		sort.Sort(list)

		if !reflect.DeepEqual(test.expectedList, list) {
			t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
		}
	}
}
开发者ID:XbinZh,项目名称:kubernetes,代码行数:101,代码来源:priorities_test.go

示例14: TestLeastRequested


//.........这里部分代码省略.........
				Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
				Node2 Score: (4 + 7.5) / 2 = 5
			*/
			pod:          &api.Pod{Spec: noResources},
			nodes:        []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 5}},
			test:         "no resources requested, pods scheduled with resources",
			pods: []*api.Pod{
				{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
				{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
				{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
				{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
			},
		},
		{
			/*
				Node1 scores on 0-10 scale
				CPU Score: ((10000 - 6000) *10) / 10000 = 4
				Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
				Node1 Score: (4 + 7.5) / 2 = 5

				Node2 scores on 0-10 scale
				CPU Score: ((10000 - 6000) *10) / 10000 = 4
				Memory Score: ((20000 - 10000) *10) / 20000 = 5
				Node2 Score: (4 + 5) / 2 = 4
			*/
			pod:          &api.Pod{Spec: cpuAndMemory},
			nodes:        []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 4}},
			test:         "resources requested, pods scheduled with resources",
			pods: []*api.Pod{
				{Spec: cpuOnly},
				{Spec: cpuAndMemory},
			},
		},
		{
			/*
				Node1 scores on 0-10 scale
				CPU Score: ((10000 - 6000) *10) / 10000 = 4
				Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
				Node1 Score: (4 + 7.5) / 2 = 5

				Node2 scores on 0-10 scale
				CPU Score: ((10000 - 6000) *10) / 10000 = 4
				Memory Score: ((50000 - 10000) *10) / 50000 = 8
				Node2 Score: (4 + 8) / 2 = 6
			*/
			pod:          &api.Pod{Spec: cpuAndMemory},
			nodes:        []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 6}},
			test:         "resources requested, pods scheduled with resources, differently sized machines",
			pods: []*api.Pod{
				{Spec: cpuOnly},
				{Spec: cpuAndMemory},
			},
		},
		{
			/*
				Node1 scores on 0-10 scale
				CPU Score: ((4000 - 6000) *10) / 4000 = 0
				Memory Score: ((10000 - 0) *10) / 10000 = 10
				Node1 Score: (0 + 10) / 2 = 5

				Node2 scores on 0-10 scale
				CPU Score: ((4000 - 6000) *10) / 4000 = 0
				Memory Score: ((10000 - 5000) *10) / 10000 = 5
				Node2 Score: (0 + 5) / 2 = 2
			*/
			pod:          &api.Pod{Spec: cpuOnly},
			nodes:        []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 2}},
			test:         "requested resources exceed node capacity",
			pods: []*api.Pod{
				{Spec: cpuOnly},
				{Spec: cpuAndMemory},
			},
		},
		{
			pod:          &api.Pod{Spec: noResources},
			nodes:        []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
			expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
			test:         "zero node resources, pods scheduled with resources",
			pods: []*api.Pod{
				{Spec: cpuOnly},
				{Spec: cpuAndMemory},
			},
		},
	}

	for _, test := range tests {
		nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
		list, err := LeastRequestedPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}
		if !reflect.DeepEqual(test.expectedList, list) {
			t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
		}
	}
}
开发者ID:XbinZh,项目名称:kubernetes,代码行数:101,代码来源:priorities_test.go

示例15: GetNodeNameToInfoMap

func (p PodsToCache) GetNodeNameToInfoMap() (map[string]*schedulercache.NodeInfo, error) {
	return schedulercache.CreateNodeNameToInfoMap(p), nil
}
开发者ID:RomainVabre,项目名称:origin,代码行数:3,代码来源:pods_to_cache.go


注:本文中的k8s/io/kubernetes/plugin/pkg/scheduler/schedulercache.CreateNodeNameToInfoMap函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。