當前位置: 首頁>>代碼示例>>Golang>>正文


Golang api.IsNodeReady函數代碼示例

本文整理匯總了Golang中k8s/io/kubernetes/pkg/api.IsNodeReady函數的典型用法代碼示例。如果您正苦於以下問題:Golang IsNodeReady函數的具體用法?Golang IsNodeReady怎麽用?Golang IsNodeReady使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。


在下文中一共展示了IsNodeReady函數的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。

示例1: waitForNodeReady

func waitForNodeReady() {
    const (
        // nodeReadyTimeout is the time to wait for node to become ready.
        nodeReadyTimeout = 2 * time.Minute
        // nodeReadyPollInterval is the interval to check node ready.
        nodeReadyPollInterval = 1 * time.Second
    )
    config, err := framework.LoadConfig()
    Expect(err).NotTo(HaveOccurred())
    client, err := clientset.NewForConfig(config)
    Expect(err).NotTo(HaveOccurred())
    Eventually(func() error {
        nodes, err := client.Nodes().List(api.ListOptions{})
        Expect(err).NotTo(HaveOccurred())
        if nodes == nil {
            return fmt.Errorf("the node list is nil.")
        }
        Expect(len(nodes.Items) > 1).NotTo(BeTrue())
        if len(nodes.Items) == 0 {
            return fmt.Errorf("empty node list: %+v", nodes)
        }
        node := nodes.Items[0]
        if !api.IsNodeReady(&node) {
            return fmt.Errorf("node is not ready: %+v", node)
        }
        return nil
    }, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
}
開發者ID:oszi,項目名稱:kubernetes,代碼行數:28,代碼來源:e2e_node_suite_test.go

示例2: nodeShouldRunDaemonPod

func nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool {
    // Check if the node satisfies the daemon set's node selector.
    nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector()
    shouldRun := nodeSelector.Matches(labels.Set(node.Labels))
    // If the daemon set specifies a node name, check that it matches with node.Name.
    shouldRun = shouldRun && (ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name)
    // If the node is not ready, don't run on it.
    // TODO(mikedanese): remove this once daemonpods forgive nodes
    shouldRun = shouldRun && api.IsNodeReady(node)

    // If the node is unschedulable, don't run it
    // TODO(mikedanese): remove this once we have the right node admitance levels.
    // See https://github.com/kubernetes/kubernetes/issues/17297#issuecomment-156857375.
    shouldRun = shouldRun && !node.Spec.Unschedulable
    return shouldRun
}
開發者ID:yghannam,項目名稱:kubernetes,代碼行數:16,代碼來源:controller.go

示例3: nodeShouldRunDaemonPod

func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool {
    // Check if the node satisfies the daemon set's node selector.
    nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector()
    if !nodeSelector.Matches(labels.Set(node.Labels)) {
        return false
    }
    // If the daemon set specifies a node name, check that it matches with node.Name.
    if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) {
        return false
    }
    // If the node is not ready, don't run on it.
    // TODO(mikedanese): remove this once daemonpods forgive nodes
    if !api.IsNodeReady(node) {
        return false
    }

    for _, c := range node.Status.Conditions {
        if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue {
            return false
        }
    }

    newPod := &api.Pod{Spec: ds.Spec.Template.Spec}
    newPod.Spec.NodeName = node.Name
    pods := []*api.Pod{newPod}

    for _, m := range dsc.podStore.Store.List() {
        pod := m.(*api.Pod)
        if pod.Spec.NodeName != node.Name {
            continue
        }
        pods = append(pods, pod)
    }
    _, notFittingCPU, notFittingMemory := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable)
    if len(notFittingCPU)+len(notFittingMemory) != 0 {
        return false
    }
    ports := sets.String{}
    for _, pod := range pods {
        if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
            return false
        }
    }
    return true
}
開發者ID:initlove,項目名稱:kubernetes,代碼行數:45,代碼來源:controller.go

示例4: waitForNodeReady

func waitForNodeReady() {
    const (
        // nodeReadyTimeout is the time to wait for node to become ready.
        nodeReadyTimeout = 2 * time.Minute
        // nodeReadyPollInterval is the interval to check node ready.
        nodeReadyPollInterval = 1 * time.Second
    )
    client, err := getAPIServerClient()
    Expect(err).NotTo(HaveOccurred(), "should be able to get apiserver client.")
    Eventually(func() error {
        node, err := getNode(client)
        if err != nil {
            return fmt.Errorf("failed to get node: %v", err)
        }
        if !api.IsNodeReady(node) {
            return fmt.Errorf("node is not ready: %+v", node)
        }
        return nil
    }, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
}
開發者ID:eljefedelrodeodeljefe,項目名稱:kubernetes,代碼行數:20,代碼來源:e2e_node_suite_test.go

示例5: CreateClientAndWaitForAPI

func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Clientset, error) {
    adminClientConfig, err := clientcmd.NewDefaultClientConfig(
        *adminConfig,
        &clientcmd.ConfigOverrides{},
    ).ClientConfig()
    if err != nil {
        return nil, fmt.Errorf("<master/apiclient> failed to create API client configuration [%v]", err)
    }

    fmt.Println("<master/apiclient> created API client configuration")

    client, err := clientset.NewForConfig(adminClientConfig)
    if err != nil {
        return nil, fmt.Errorf("<master/apiclient> failed to create API client [%v]", err)
    }

    fmt.Println("<master/apiclient> created API client, waiting for the control plane to become ready")

    start := time.Now()
    wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
        cs, err := client.ComponentStatuses().List(api.ListOptions{})
        if err != nil {
            return false, nil
        }
        // TODO(phase2) must revisit this when we implement HA
        if len(cs.Items) < 3 {
            fmt.Println("<master/apiclient> not all control plane components are ready yet")
            return false, nil
        }
        for _, item := range cs.Items {
            for _, condition := range item.Conditions {
                if condition.Type != api.ComponentHealthy {
                    fmt.Printf("<master/apiclient> control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
                    return false, nil
                }
            }
        }

        fmt.Printf("<master/apiclient> all control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
        return true, nil
    })

    fmt.Println("<master/apiclient> waiting for at least one node to register and become ready")
    start = time.Now()
    wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
        nodeList, err := client.Nodes().List(api.ListOptions{})
        if err != nil {
            fmt.Println("<master/apiclient> temporarily unable to list nodes (will retry)")
            return false, nil
        }
        if len(nodeList.Items) < 1 {
            return false, nil
        }
        n := &nodeList.Items[0]
        if !api.IsNodeReady(n) {
            fmt.Println("<master/apiclient> first node has registered, but is not ready yet")
            return false, nil
        }

        fmt.Printf("<master/apiclient> first node is ready after %f seconds\n", time.Since(start).Seconds())
        return true, nil
    })

    return client, nil
}
開發者ID:ncdc,項目名稱:kubernetes,代碼行數:65,代碼來源:apiclient.go

示例6: manage

func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
    // Find out which nodes are running the daemon pods selected by ds.
    nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
    if err != nil {
        glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err)
    }

    // For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
    // pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
    nodeList, err := dsc.nodeStore.List()
    if err != nil {
        glog.Errorf("Couldn't get list of nodes when syncing daemon set %+v: %v", ds, err)
    }
    var nodesNeedingDaemonPods, podsToDelete []string
    for i, node := range nodeList.Items {
        // Check if the node satisfies the daemon set's node selector.
        nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector()
        shouldRun := nodeSelector.Matches(labels.Set(nodeList.Items[i].Labels))
        // If the daemon set specifies a node name, check that it matches with nodeName.
        nodeName := nodeList.Items[i].Name
        shouldRun = shouldRun && (ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == nodeName)

        // If the node is not ready, don't run on it.
        // TODO(mikedanese): remove this once daemonpods forgive nodes
        shouldRun = shouldRun && api.IsNodeReady(&node)

        daemonPods, isRunning := nodeToDaemonPods[nodeName]
        if shouldRun && !isRunning {
            // If daemon pod is supposed to be running on node, but isn't, create daemon pod.
            nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodeName)
        } else if shouldRun && len(daemonPods) > 1 {
            // If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
            // Sort the daemon pods by creation time, so the the oldest is preserved.
            sort.Sort(podByCreationTimestamp(daemonPods))
            for i := 1; i < len(daemonPods); i++ {
                podsToDelete = append(podsToDelete, daemonPods[i].Name)
            }
        } else if !shouldRun && isRunning {
            // If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
            for i := range daemonPods {
                podsToDelete = append(podsToDelete, daemonPods[i].Name)
            }
        }
    }

    // We need to set expectations before creating/deleting pods to avoid race conditions.
    dsKey, err := controller.KeyFunc(ds)
    if err != nil {
        glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
        return
    }
    dsc.expectations.SetExpectations(dsKey, len(nodesNeedingDaemonPods), len(podsToDelete))

    glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v", ds.Name, nodesNeedingDaemonPods)
    for i := range nodesNeedingDaemonPods {
        if err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[i], ds.Namespace, ds.Spec.Template, ds); err != nil {
            glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
            dsc.expectations.CreationObserved(dsKey)
            util.HandleError(err)
        }
    }

    glog.V(4).Infof("Pods to delete for daemon set %s: %+v", ds.Name, podsToDelete)
    for i := range podsToDelete {
        if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[i]); err != nil {
            glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
            dsc.expectations.DeletionObserved(dsKey)
            util.HandleError(err)
        }
    }
}
開發者ID:johnmccawley,項目名稱:origin,代碼行數:71,代碼來源:controller.go


注:本文中的k8s/io/kubernetes/pkg/api.IsNodeReady函數示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。