本文整理汇总了Golang中k8s/io/kubernetes/plugin/pkg/scheduler/algorithm.NodeLister类的典型用法代码示例。如果您正苦于以下问题:Golang NodeLister类的具体用法?Golang NodeLister怎么用?Golang NodeLister使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NodeLister类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: CalculateNodeLabelPriority
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
// If presence is false, prioritizes nodes that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
var score int
nodes, err := nodeLister.List()
if err != nil {
return nil, err
}
labeledNodes := map[string]bool{}
for _, node := range nodes.Items {
exists := labels.Set(node.Labels).Has(n.label)
labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence)
}
result := []schedulerapi.HostPriority{}
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for nodeName, success := range labeledNodes {
if success {
score = 10
} else {
score = 0
}
result = append(result, schedulerapi.HostPriority{Host: nodeName, Score: score})
}
return result, nil
}
示例2: ImageLocalityPriority
// ImageLocalityPriority is a priority function that favors nodes that already have requested pod container's images.
// It will detect whether the requested images are present on a node, and then calculate a score ranging from 0 to 10
// based on the total size of those images.
// - If none of the images are present, this node will be given the lowest priority.
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
func ImageLocalityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
sumSizeMap := make(map[string]int64)
nodes, err := nodeLister.List()
if err != nil {
return nil, err
}
for _, container := range pod.Spec.Containers {
for _, node := range nodes.Items {
// Check if this container's image is present and get its size.
imageSize := checkContainerImageOnNode(node, container)
// Add this size to the total result of this node.
sumSizeMap[node.Name] += imageSize
}
}
result := []schedulerapi.HostPriority{}
// score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest.
for nodeName, sumSize := range sumSizeMap {
result = append(result, schedulerapi.HostPriority{Host: nodeName,
Score: calculateScoreFromSize(sumSize)})
}
return result, nil
}
示例3: Schedule
// Schedule tries to schedule the given pod to one of node in the node list.
// If it succeeds, it will return the name of the node.
// If it fails, it will return a Fiterror error with reasons.
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable
}
// Used for all fit and priority funcs.
nodeNameToInfo, err := g.cache.GetNodeNameToInfoMap()
if err != nil {
return "", err
}
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, nodeNameToInfo, g.predicates, nodes, g.extenders)
if err != nil {
return "", err
}
if len(filteredNodes.Items) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
priorityList, err := PrioritizeNodes(pod, nodeNameToInfo, g.prioritizers, algorithm.FakeNodeLister(filteredNodes), g.extenders)
if err != nil {
return "", err
}
return g.selectHost(priorityList)
}
示例4: Schedule
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable
}
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, nodes)
if err != nil {
return "", err
}
priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeNodeLister(filteredNodes))
if err != nil {
return "", err
}
if len(priorityList) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
return g.selectHost(priorityList)
}
示例5: Schedule
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
nodes, err := nodeLister.List()
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable
}
// TODO: we should compute this once and dynamically update it using Watch, not constantly re-compute.
// But at least we're now only doing it in one place
machinesToPods, err := predicates.MapPodsToMachines(g.pods)
if err != nil {
return "", err
}
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, machinesToPods, g.predicates, nodes, g.extenders)
if err != nil {
return "", err
}
priorityList, err := PrioritizeNodes(pod, machinesToPods, g.pods, g.prioritizers, algorithm.FakeNodeLister(filteredNodes), g.extenders)
if err != nil {
return "", err
}
if len(priorityList) == 0 {
return "", &FitError{
Pod: pod,
FailedPredicates: failedPredicateMap,
}
}
return g.selectHost(priorityList)
}
示例6: CalculateNodeAffinityPriority
// CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
// score the node gets.
func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return nil, err
}
var maxCount float64
counts := make(map[string]float64, len(nodes.Items))
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
if err != nil {
return nil, err
}
// A nil element of PreferredDuringSchedulingIgnoredDuringExecution matches no objects.
// An element of PreferredDuringSchedulingIgnoredDuringExecution that refers to an
// empty PreferredSchedulingTerm matches all objects.
if affinity.NodeAffinity != nil && affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
// Match PreferredDuringSchedulingIgnoredDuringExecution term by term.
for _, preferredSchedulingTerm := range affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
if preferredSchedulingTerm.Weight == 0 {
continue
}
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
if nodeSelector.Matches(labels.Set(node.Labels)) {
counts[node.Name] += float64(preferredSchedulingTerm.Weight)
}
if counts[node.Name] > maxCount {
maxCount = counts[node.Name]
}
}
}
}
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
for i := range nodes.Items {
node := &nodes.Items[i]
if maxCount > 0 {
fScore := 10 * (counts[node.Name] / maxCount)
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
if glog.V(10) {
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it.
glog.Infof("%v -> %v: NodeAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
}
} else {
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: 0})
}
}
return result, nil
}
示例7: ComputeTaintTolerationPriority
// ComputeTaintTolerationPriority prepares the priority list for all the nodes based on the number of intolerable taints on the node
func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return nil, err
}
// the max value of counts
var maxCount float64
// counts hold the count of intolerable taints of a pod for a given node
counts := make(map[string]float64, len(nodes))
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
if err != nil {
return nil, err
}
// Fetch a list of all toleration with effect PreferNoSchedule
tolerationList := getAllTolerationPreferNoSchedule(tolerations)
// calculate the intolerable taints for all the nodes
for _, node := range nodes {
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
if err != nil {
return nil, err
}
count := countIntolerableTaintsPreferNoSchedule(taints, tolerationList)
if count > 0 {
// 0 is default value, so avoid unnecessary map operations.
counts[node.Name] = count
if count > maxCount {
maxCount = count
}
}
}
// The maximum priority value to give to a node
// Priority values range from 0 - maxPriority
const maxPriority = float64(10)
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
for _, node := range nodes {
fScore := maxPriority
if maxCount > 0 {
fScore = (1.0 - counts[node.Name]/maxCount) * 10
}
if glog.V(10) {
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it.
glog.Infof("%v -> %v: Taint Toleration Priority, Score: (%d)", pod.Name, node.Name, int(fScore))
}
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
}
return result, nil
}
示例8: CalculateNodeAffinityPriority
// CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
// score the node gets.
func (s *NodeAffinity) CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
var maxCount int
counts := map[string]int{}
nodes, err := nodeLister.List()
if err != nil {
return nil, err
}
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
if err != nil {
return nil, err
}
// A nil element of PreferredDuringSchedulingIgnoredDuringExecution matches no objects.
// An element of PreferredDuringSchedulingIgnoredDuringExecution that refers to an
// empty PreferredSchedulingTerm matches all objects.
if affinity.NodeAffinity != nil && affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
// Match PreferredDuringSchedulingIgnoredDuringExecution term by term.
for _, preferredSchedulingTerm := range affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
if preferredSchedulingTerm.Weight == 0 {
continue
}
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
if nodeSelector.Matches(labels.Set(node.Labels)) {
counts[node.Name] += preferredSchedulingTerm.Weight
}
if counts[node.Name] > maxCount {
maxCount = counts[node.Name]
}
}
}
}
result := []schedulerapi.HostPriority{}
for _, node := range nodes.Items {
fScore := float64(0)
if maxCount > 0 {
fScore = 10 * (float64(counts[node.Name]) / float64(maxCount))
}
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
glog.V(10).Infof("%v -> %v: NodeAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
}
return result, nil
}
示例9: BalancedResourceAllocation
// BalancedResourceAllocation favors nodes with balanced resource usage rate.
// BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.
// It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how
// close the two metrics are to each other.
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
func BalancedResourceAllocation(pod *api.Pod, machinesToPods map[string][]*api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return schedulerapi.HostPriorityList{}, err
}
list := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items {
list = append(list, calculateBalancedResourceAllocation(pod, node, machinesToPods[node.Name]))
}
return list, nil
}
示例10: BalancedResourceAllocation
// BalancedResourceAllocation favors nodes with balanced resource usage rate.
// BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.
// It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how
// close the two metrics are to each other.
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
func BalancedResourceAllocation(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return schedulerapi.HostPriorityList{}, err
}
list := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items {
list = append(list, calculateBalancedResourceAllocation(pod, node, nodeNameToInfo[node.Name].Pods()))
}
return list, nil
}
示例11: LeastRequestedPriority
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the minimum of the average of the fraction of requested to capacity.
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
func LeastRequestedPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return schedulerapi.HostPriorityList{}, err
}
list := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items {
list = append(list, calculateResourceOccupancy(pod, node, nodeNameToInfo[node.Name]))
}
return list, nil
}
示例12: LeastRequestedPriority
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the minimum of the average of the fraction of requested to capacity.
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return schedulerapi.HostPriorityList{}, err
}
podsToMachines, err := predicates.MapPodsToMachines(podLister)
list := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items {
list = append(list, calculateResourceOccupancy(pod, node, podsToMachines[node.Name]))
}
return list, nil
}
示例13: BalancedResourceAllocation
// BalancedResourceAllocation favors nodes with balanced resource usage rate.
// BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.
// It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how
// close the two metrics are to each other.
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return algorithm.HostPriorityList{}, err
}
podsToMachines, err := predicates.MapPodsToMachines(podLister)
list := algorithm.HostPriorityList{}
for _, node := range nodes.Items {
list = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name]))
}
return list, nil
}
示例14: LeastRequestedPriority
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the minimum of the average of the fraction of requested to capacity.
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
func LeastRequestedPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
nodes, err := nodeLister.List()
if err != nil {
return schedulerapi.HostPriorityList{}, err
}
podResources := getNonZeroRequests(pod)
list := make(schedulerapi.HostPriorityList, 0, len(nodes))
for _, node := range nodes {
list = append(list, calculateResourceOccupancy(pod, podResources, node, nodeNameToInfo[node.Name]))
}
return list, nil
}
示例15: ComputeTaintTolerationPriority
// ComputeTaintTolerationPriority prepares the priority list for all the nodes based on the number of intolerable taints on the node
func (s *TaintToleration) ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
// counts hold the count of intolerable taints of a pod for a given node
counts := make(map[string]int)
// the max value of counts
var maxCount int
nodes, err := nodeLister.List()
if err != nil {
return nil, err
}
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
if err != nil {
return nil, err
}
// Fetch a list of all toleration with effect PreferNoSchedule
tolerationList := getAllTolerationPreferNoSchedule(tolerations)
// calculate the intolerable taints for all the nodes
for i := range nodes.Items {
node := &nodes.Items[i]
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
if err != nil {
return nil, err
}
count := countIntolerableTaintsPreferNoSchedule(taints, tolerationList)
counts[node.Name] = count
if count > maxCount {
maxCount = count
}
}
// The maximum priority value to give to a node
// Priority values range from 0 - maxPriority
const maxPriority = 10
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
for _, node := range nodes.Items {
fScore := float64(maxPriority)
if maxCount > 0 {
fScore = (1.0 - float64(counts[node.Name])/float64(maxCount)) * 10
}
glog.V(10).Infof("%v -> %v: Taint Toleration Priority, Score: (%d)", pod.Name, node.Name, int(fScore))
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
}
return result, nil
}