當前位置: 首頁>>代碼示例>>Golang>>正文


Golang CloudProvider.NodeGroupForNode方法代碼示例

本文整理匯總了Golang中k8s/io/contrib/cluster-autoscaler/cloudprovider.CloudProvider.NodeGroupForNode方法的典型用法代碼示例。如果您正苦於以下問題:Golang CloudProvider.NodeGroupForNode方法的具體用法?Golang CloudProvider.NodeGroupForNode怎麽用?Golang CloudProvider.NodeGroupForNode使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在k8s/io/contrib/cluster-autoscaler/cloudprovider.CloudProvider的用法示例。


在下文中一共展示了CloudProvider.NodeGroupForNode方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。

示例1: CheckGroupsAndNodes

// CheckGroupsAndNodes checks if all node groups have all required nodes.
func CheckGroupsAndNodes(nodes []*kube_api.Node, cloudProvider cloudprovider.CloudProvider) error {
    groupCount := make(map[string]int)
    for _, node := range nodes {

        group, err := cloudProvider.NodeGroupForNode(node)
        if err != nil {
            return err
        }
        if group == nil || reflect.ValueOf(group).IsNil() {
            continue
        }
        id := group.Id()
        count, _ := groupCount[id]
        groupCount[id] = count + 1
    }
    for _, nodeGroup := range cloudProvider.NodeGroups() {
        size, err := nodeGroup.TargetSize()
        if err != nil {
            return err
        }
        count := groupCount[nodeGroup.Id()]
        if size != count {
            return fmt.Errorf("wrong number of nodes for node group: %s expected: %d actual: %d", nodeGroup.Id(), size, count)
        }
    }
    return nil
}
開發者ID:spxtr,項目名稱:contrib,代碼行數:28,代碼來源:utils.go

示例2: deleteNodeFromCloudProvider

// Removes the given node from cloud provider. No extra pre-deletion actions are executed on
// the Kubernetes side.
func deleteNodeFromCloudProvider(node *apiv1.Node, cloudProvider cloudprovider.CloudProvider, recorder kube_record.EventRecorder) error {
    nodeGroup, err := cloudProvider.NodeGroupForNode(node)
    if err != nil {
        return fmt.Errorf("failed to node group for %s: %v", node.Name, err)
    }
    if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
        return fmt.Errorf("picked node that doesn't belong to a node group: %s", node.Name)
    }
    if err = nodeGroup.DeleteNodes([]*apiv1.Node{node}); err != nil {
        return fmt.Errorf("failed to delete %s: %v", node.Name, err)
    }
    recorder.Eventf(node, apiv1.EventTypeNormal, "ScaleDown", "node removed by cluster autoscaler")
    return nil
}
開發者ID:kubernetes,項目名稱:contrib,代碼行數:16,代碼來源:scale_down.go

示例3: GetNodeInfosForGroups

// GetNodeInfosForGroups finds NodeInfos for all node groups used to manage the given nodes. It also returns a node group to sample node mapping.
// TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key.
func GetNodeInfosForGroups(nodes []*kube_api.Node, cloudProvider cloudprovider.CloudProvider, kubeClient *kube_client.Client) (map[string]*schedulercache.NodeInfo, error) {
    result := make(map[string]*schedulercache.NodeInfo)
    for _, node := range nodes {

        nodeGroup, err := cloudProvider.NodeGroupForNode(node)
        if err != nil {
            return map[string]*schedulercache.NodeInfo{}, err
        }
        if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
            continue
        }
        id := nodeGroup.Id()
        if _, found := result[id]; !found {
            nodeInfo, err := simulator.BuildNodeInfoForNode(node, kubeClient)
            if err != nil {
                return map[string]*schedulercache.NodeInfo{}, err
            }
            result[id] = nodeInfo
        }
    }
    return result, nil
}
開發者ID:spxtr,項目名稱:contrib,代碼行數:24,代碼來源:utils.go

示例4: getEmptyNodes

// This functions finds empty nodes among passed candidates and returns a list of empty nodes
// that can be deleted at the same time.
func getEmptyNodes(candidates []*apiv1.Node, pods []*apiv1.Pod, maxEmptyBulkDelete int, cloudProvider cloudprovider.CloudProvider) []*apiv1.Node {
    emptyNodes := simulator.FindEmptyNodesToRemove(candidates, pods)
    availabilityMap := make(map[string]int)
    result := make([]*apiv1.Node, 0)
    for _, node := range emptyNodes {
        nodeGroup, err := cloudProvider.NodeGroupForNode(node)
        if err != nil {
            glog.Errorf("Failed to get group for %s", node.Name)
            continue
        }
        if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
            continue
        }
        var available int
        var found bool
        if _, found = availabilityMap[nodeGroup.Id()]; !found {
            size, err := nodeGroup.TargetSize()
            if err != nil {
                glog.Errorf("Failed to get size for %s: %v ", nodeGroup.Id(), err)
                continue
            }
            available = size - nodeGroup.MinSize()
            if available < 0 {
                available = 0
            }
            availabilityMap[nodeGroup.Id()] = available
        }
        if available > 0 {
            available -= 1
            availabilityMap[nodeGroup.Id()] = available
            result = append(result, node)
        }
    }
    limit := maxEmptyBulkDelete
    if len(result) < limit {
        limit = len(result)
    }
    return result[:limit]
}
開發者ID:kubernetes,項目名稱:contrib,代碼行數:41,代碼來源:scale_down.go

示例5: ScaleDown

// ScaleDown tries to scale down the cluster. It returns ScaleDownResult indicating if any node was
// removed and error if such occured.
func ScaleDown(
    nodes []*kube_api.Node,
    unneededNodes map[string]time.Time,
    unneededTime time.Duration,
    pods []*kube_api.Pod,
    cloudProvider cloudprovider.CloudProvider,
    client *kube_client.Client,
    predicateChecker *simulator.PredicateChecker,
    oldHints map[string]string,
    usageTracker *simulator.UsageTracker,
    recorder kube_record.EventRecorder) (ScaleDownResult, error) {

    now := time.Now()
    candidates := make([]*kube_api.Node, 0)
    for _, node := range nodes {
        if val, found := unneededNodes[node.Name]; found {

            glog.V(2).Infof("%s was unneeded for %s", node.Name, now.Sub(val).String())

            // Check how long the node was underutilized.
            if !val.Add(unneededTime).Before(now) {
                continue
            }

            nodeGroup, err := cloudProvider.NodeGroupForNode(node)
            if err != nil {
                glog.Errorf("Error while checking node group for %s: %v", node.Name, err)
                continue
            }
            if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
                glog.V(4).Infof("Skipping %s - no node group config", node.Name)
                continue
            }

            size, err := nodeGroup.TargetSize()
            if err != nil {
                glog.Errorf("Error while checking node group size %s: %v", nodeGroup.Id(), err)
                continue
            }

            if size <= nodeGroup.MinSize() {
                glog.V(1).Infof("Skipping %s - node group min size reached", node.Name)
                continue
            }

            candidates = append(candidates, node)
        }
    }
    if len(candidates) == 0 {
        glog.Infof("No candidates for scale down")
        return ScaleDownNoUnneeded, nil
    }

    // We look for only 1 node so new hints may be incomplete.
    nodesToRemove, _, err := simulator.FindNodesToRemove(candidates, nodes, pods, client, predicateChecker, 1, false,
        oldHints, usageTracker, time.Now())

    if err != nil {
        return ScaleDownError, fmt.Errorf("Find node to remove failed: %v", err)
    }
    if len(nodesToRemove) == 0 {
        glog.V(1).Infof("No node to remove")
        return ScaleDownNoNodeDeleted, nil
    }
    nodeToRemove := nodesToRemove[0]
    glog.Infof("Removing %s", nodeToRemove.Name)

    nodeGroup, err := cloudProvider.NodeGroupForNode(nodeToRemove)
    if err != nil {
        return ScaleDownError, fmt.Errorf("failed to node group for %s: %v", nodeToRemove.Name, err)
    }
    if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
        return ScaleDownError, fmt.Errorf("picked node that doesn't belong to a node group: %s", nodeToRemove.Name)
    }

    err = nodeGroup.DeleteNodes([]*kube_api.Node{nodeToRemove})
    simulator.RemoveNodeFromTracker(usageTracker, nodeToRemove.Name, unneededNodes)

    if err != nil {
        return ScaleDownError, fmt.Errorf("Failed to delete %s: %v", nodeToRemove.Name, err)
    }

    recorder.Eventf(nodeToRemove, kube_api.EventTypeNormal, "ScaleDown",
        "node removed by cluster autoscaler")

    return ScaleDownNodeDeleted, nil
}
開發者ID:spxtr,項目名稱:contrib,代碼行數:89,代碼來源:scale_down.go

示例6: ScaleDown

// ScaleDown tries to scale down the cluster. It returns ScaleDownResult indicating if any node was
// removed and error if such occured.
func ScaleDown(
    nodes []*kube_api.Node,
    unneededNodes map[string]time.Time,
    unneededTime time.Duration,
    pods []*kube_api.Pod,
    cloudProvider cloudprovider.CloudProvider,
    client *kube_client.Client,
    predicateChecker *simulator.PredicateChecker) (ScaleDownResult, error) {

    now := time.Now()
    candidates := make([]*kube_api.Node, 0)
    for _, node := range nodes {
        if val, found := unneededNodes[node.Name]; found {

            glog.V(2).Infof("%s was unneeded for %s", node.Name, now.Sub(val).String())

            // Check how long the node was underutilized.
            if !val.Add(unneededTime).Before(now) {
                continue
            }

            nodeGroup, err := cloudProvider.NodeGroupForNode(node)
            if err != nil {
                glog.Errorf("Error while checking node group for %s: %v", node.Name, err)
                continue
            }
            if nodeGroup == nil {
                glog.V(4).Infof("Skipping %s - no node group config", node.Name)
                continue
            }

            size, err := nodeGroup.TargetSize()
            if err != nil {
                glog.Errorf("Error while checking node group size %s: %v", nodeGroup.Id(), err)
                continue
            }

            if size <= nodeGroup.MinSize() {
                glog.V(1).Infof("Skipping %s - node group min size reached", node.Name)
                continue
            }

            candidates = append(candidates, node)
        }
    }
    if len(candidates) == 0 {
        glog.Infof("No candidates for scale down")
        return ScaleDownNoUnneeded, nil
    }

    nodesToRemove, err := simulator.FindNodesToRemove(candidates, nodes, pods, client, predicateChecker, 1, false)
    if err != nil {
        return ScaleDownError, fmt.Errorf("Find node to remove failed: %v", err)
    }
    if len(nodesToRemove) == 0 {
        glog.V(1).Infof("No node to remove")
        return ScaleDownNoNodeDeleted, nil
    }
    nodeToRemove := nodesToRemove[0]
    glog.Infof("Removing %s", nodeToRemove.Name)

    nodeGroup, err := cloudProvider.NodeGroupForNode(nodeToRemove)
    if err != nil {
        return ScaleDownError, fmt.Errorf("failed to node group for %s: %v", nodeToRemove.Name, err)
    }
    if nodeGroup == nil {
        return ScaleDownError, fmt.Errorf("picked node that doesn't belong to a node group: %s", nodeToRemove.Name)
    }

    err = nodeGroup.DeleteNodes([]*kube_api.Node{nodeToRemove})
    if err != nil {
        return ScaleDownError, fmt.Errorf("Failed to delete %s: %v", nodeToRemove.Name, err)
    }

    return ScaleDownNodeDeleted, nil
}
開發者ID:gmarek,項目名稱:contrib,代碼行數:78,代碼來源:scale_down.go

示例7: ScaleDown

// ScaleDown tries to scale down the cluster. It returns ScaleDownResult indicating if any node was
// removed and error if such occured.
func ScaleDown(
    nodes []*kube_api.Node,
    lastUtilizationMap map[string]float64,
    unneededNodes map[string]time.Time,
    unneededTime time.Duration,
    pods []*kube_api.Pod,
    cloudProvider cloudprovider.CloudProvider,
    client *kube_client.Client,
    predicateChecker *simulator.PredicateChecker,
    oldHints map[string]string,
    usageTracker *simulator.UsageTracker,
    recorder kube_record.EventRecorder,
    maxEmptyBulkDelete int) (ScaleDownResult, error) {

    now := time.Now()
    candidates := make([]*kube_api.Node, 0)
    for _, node := range nodes {
        if val, found := unneededNodes[node.Name]; found {

            glog.V(2).Infof("%s was unneeded for %s", node.Name, now.Sub(val).String())

            // Check how long the node was underutilized.
            if !val.Add(unneededTime).Before(now) {
                continue
            }

            nodeGroup, err := cloudProvider.NodeGroupForNode(node)
            if err != nil {
                glog.Errorf("Error while checking node group for %s: %v", node.Name, err)
                continue
            }
            if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() {
                glog.V(4).Infof("Skipping %s - no node group config", node.Name)
                continue
            }

            size, err := nodeGroup.TargetSize()
            if err != nil {
                glog.Errorf("Error while checking node group size %s: %v", nodeGroup.Id(), err)
                continue
            }

            if size <= nodeGroup.MinSize() {
                glog.V(1).Infof("Skipping %s - node group min size reached", node.Name)
                continue
            }

            candidates = append(candidates, node)
        }
    }
    if len(candidates) == 0 {
        glog.Infof("No candidates for scale down")
        return ScaleDownNoUnneeded, nil
    }

    // Trying to delete empty nodes in bulk. If there are no empty nodes then CA will
    // try to delete not-so-empty nodes, possibly killing some pods and allowing them
    // to recreate on other nodes.
    emptyNodes := getEmptyNodes(candidates, pods, maxEmptyBulkDelete, cloudProvider)
    if len(emptyNodes) > 0 {
        confirmation := make(chan error, len(emptyNodes))
        for _, node := range emptyNodes {
            glog.V(0).Infof("Scale-down: removing empty node %s", node.Name)
            simulator.RemoveNodeFromTracker(usageTracker, node.Name, unneededNodes)
            go func(nodeToDelete *kube_api.Node) {
                confirmation <- deleteNodeFromCloudProvider(nodeToDelete, cloudProvider, recorder)
            }(node)
        }
        var finalError error
        for range emptyNodes {
            if err := <-confirmation; err != nil {
                glog.Errorf("Problem with empty node deletion: %v", err)
                finalError = err
            }
        }
        if finalError == nil {
            return ScaleDownNodeDeleted, nil
        }
        return ScaleDownError, fmt.Errorf("failed to delete at least one empty node: %v", finalError)
    }

    // We look for only 1 node so new hints may be incomplete.
    nodesToRemove, _, err := simulator.FindNodesToRemove(candidates, nodes, pods, client, predicateChecker, 1, false,
        oldHints, usageTracker, time.Now())

    if err != nil {
        return ScaleDownError, fmt.Errorf("Find node to remove failed: %v", err)
    }
    if len(nodesToRemove) == 0 {
        glog.V(1).Infof("No node to remove")
        return ScaleDownNoNodeDeleted, nil
    }
    toRemove := nodesToRemove[0]
    utilization := lastUtilizationMap[toRemove.Node.Name]
    podNames := make([]string, 0, len(toRemove.PodsToReschedule))
    for _, pod := range toRemove.PodsToReschedule {
        podNames = append(podNames, pod.Namespace+"/"+pod.Name)
    }
//.........這裏部分代碼省略.........
開發者ID:Q-Lee,項目名稱:contrib,代碼行數:101,代碼來源:scale_down.go


注:本文中的k8s/io/contrib/cluster-autoscaler/cloudprovider.CloudProvider.NodeGroupForNode方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。