當前位置: 首頁>>代碼示例>>Golang>>正文


Golang framework.GetReadySchedulableNodesOrDie函數代碼示例

本文整理匯總了Golang中k8s/io/kubernetes/test/e2e/framework.GetReadySchedulableNodesOrDie函數的典型用法代碼示例。如果您正苦於以下問題:Golang GetReadySchedulableNodesOrDie函數的具體用法?Golang GetReadySchedulableNodesOrDie怎麽用?Golang GetReadySchedulableNodesOrDie使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。


在下文中一共展示了GetReadySchedulableNodesOrDie函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。

示例1: createNetProxyPods

func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
    framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
    nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client)

    // create pods, one for each node
    createdPods := make([]*api.Pod, 0, len(nodes.Items))
    for i, n := range nodes.Items {
        podName := fmt.Sprintf("%s-%d", podName, i)
        pod := config.createNetShellPodSpec(podName, n.Name)
        pod.ObjectMeta.Labels = selector
        createdPod := config.createPod(pod)
        createdPods = append(createdPods, createdPod)
    }

    // wait that all of them are up
    runningPods := make([]*api.Pod, 0, len(nodes.Items))
    for _, p := range createdPods {
        framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
        rp, err := config.getPodClient().Get(p.Name)
        framework.ExpectNoError(err)
        runningPods = append(runningPods, rp)
    }

    return runningPods
}
開發者ID:FlyWings,項目名稱:kubernetes,代碼行數:25,代碼來源:kubeproxy.go

示例2: testReboot

func testReboot(c *client.Client, rebootCmd string) {
    // Get all nodes, and kick off the test on each.
    nodelist := framework.GetReadySchedulableNodesOrDie(c)
    result := make([]bool, len(nodelist.Items))
    wg := sync.WaitGroup{}
    wg.Add(len(nodelist.Items))

    failed := false
    for ix := range nodelist.Items {
        go func(ix int) {
            defer wg.Done()
            n := nodelist.Items[ix]
            result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
            if !result[ix] {
                failed = true
            }
        }(ix)
    }

    // Wait for all to finish and check the final result.
    wg.Wait()

    if failed {
        for ix := range nodelist.Items {
            n := nodelist.Items[ix]
            if !result[ix] {
                framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
            }
        }
        framework.Failf("Test failed; at least one node failed to reboot in the time given.")
    }
}
開發者ID:ncdc,項目名稱:kubernetes,代碼行數:32,代碼來源:reboot.go

示例3: isTestEnabled

func isTestEnabled(c clientset.Interface) bool {
    // Enable the test on node e2e if the node image is GCI.
    nodeName := framework.TestContext.NodeName
    if nodeName != "" {
        if strings.Contains(nodeName, "-gci-dev-") {
            gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-")
            matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName)
            if len(matches) == 2 {
                version, err := strconv.Atoi(matches[1])
                if err != nil {
                    glog.Errorf("Error parsing GCI version from NodeName %q: %v", nodeName, err)
                    return false
                }
                return version >= 54
            }
        }
        return false
    }

    // For cluster e2e test, because nodeName is empty, retrieve the node objects from api server
    // and check their images. Only run NFSv4 and GlusterFS if nodes are using GCI image for now.
    nodes := framework.GetReadySchedulableNodesOrDie(c)
    for _, node := range nodes.Items {
        if !strings.Contains(node.Status.NodeInfo.OSImage, "Google Container-VM") {
            return false
        }
    }
    return true
}
開發者ID:kubernetes,項目名稱:kubernetes,代碼行數:29,代碼來源:volumes.go

示例4: setup

func (config *KubeProxyTestConfig) setup() {
    By("creating a selector")
    selectorName := "selector-" + string(util.NewUUID())
    serviceSelector := map[string]string{
        selectorName: "true",
    }

    By("Getting node addresses")
    framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
    nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
    config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
    if len(config.externalAddrs) < 2 {
        // fall back to legacy IPs
        config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
    }
    Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
    config.nodes = nodeList.Items

    if enableLoadBalancerTest {
        By("Creating the LoadBalancer Service on top of the pods in kubernetes")
        config.createLoadBalancerService(serviceSelector)
    }

    By("Creating the service pods in kubernetes")
    podName := "netserver"
    config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

    By("Creating the service on top of the pods in kubernetes")
    config.createNodePortService(serviceSelector)

    By("Creating test pods")
    config.createTestPods()
}
開發者ID:FlyWings,項目名稱:kubernetes,代碼行數:33,代碼來源:kubeproxy.go

示例5: setup

// setup includes setupCore and also sets up services
func (config *NetworkingTestConfig) setup(selector map[string]string) {
    config.setupCore(selector)

    By("Getting node addresses")
    framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
    nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
    config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
    if len(config.ExternalAddrs) < 2 {
        // fall back to legacy IPs
        config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
    }
    Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
    config.Nodes = nodeList.Items

    By("Creating the service on top of the pods in kubernetes")
    config.createNodePortService(selector)

    for _, p := range config.NodePortService.Spec.Ports {
        switch p.Protocol {
        case api.ProtocolUDP:
            config.NodeUdpPort = int(p.NodePort)
        case api.ProtocolTCP:
            config.NodeHttpPort = int(p.NodePort)
        default:
            continue
        }
    }
    config.ClusterIP = config.NodePortService.Spec.ClusterIP
    config.NodeIP = config.ExternalAddrs[0]
}
開發者ID:olegshaldybin,項目名稱:kubernetes,代碼行數:31,代碼來源:networking_utils.go

示例6: createNetProxyPods

func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
    framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
    nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)

    // To make this test work reasonably fast in large clusters,
    // we limit the number of NetProxyPods to no more than 100 ones
    // on random nodes.
    nodes := shuffleNodes(nodeList.Items)
    if len(nodes) > maxNetProxyPodsCount {
        nodes = nodes[:maxNetProxyPodsCount]
    }

    // create pods, one for each node
    createdPods := make([]*api.Pod, 0, len(nodes))
    for i, n := range nodes {
        podName := fmt.Sprintf("%s-%d", podName, i)
        pod := config.createNetShellPodSpec(podName, n.Name)
        pod.ObjectMeta.Labels = selector
        createdPod := config.createPod(pod)
        createdPods = append(createdPods, createdPod)
    }

    // wait that all of them are up
    runningPods := make([]*api.Pod, 0, len(nodes))
    for _, p := range createdPods {
        framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
        rp, err := config.getPodClient().Get(p.Name)
        framework.ExpectNoError(err)
        runningPods = append(runningPods, rp)
    }

    return runningPods
}
開發者ID:olegshaldybin,項目名稱:kubernetes,代碼行數:33,代碼來源:networking_utils.go

示例7: pickNode

func pickNode(c *client.Client) (string, error) {
    // TODO: investigate why it doesn't work on master Node.
    nodes := framework.GetReadySchedulableNodesOrDie(c)
    if len(nodes.Items) == 0 {
        return "", fmt.Errorf("no nodes exist, can't test node proxy")
    }
    return nodes.Items[0].Name, nil
}
開發者ID:invenfantasy,項目名稱:kubernetes,代碼行數:8,代碼來源:proxy.go

示例8: nodeHasDiskPressure

func nodeHasDiskPressure(cs clientset.Interface) bool {
    nodeList := framework.GetReadySchedulableNodesOrDie(cs)
    for _, condition := range nodeList.Items[0].Status.Conditions {
        if condition.Type == api.NodeDiskPressure {
            return condition.Status == api.ConditionTrue
        }
    }
    return false
}
開發者ID:Q-Lee,項目名稱:kubernetes,代碼行數:9,代碼來源:disk_eviction_test.go

示例9: CleanupNodes

func (p *IntegrationTestNodePreparer) CleanupNodes() error {
    nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
    for i := range nodes.Items {
        if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
            glog.Errorf("Error while deleting Node: %v", err)
        }
    }
    return nil
}
開發者ID:alex-mohr,項目名稱:kubernetes,代碼行數:9,代碼來源:perf_utils.go

示例10: nodesWithPoolLabel

// nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool"
// label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
    nodeCount := 0
    nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
    for _, node := range nodeList.Items {
        if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
            nodeCount++
        }
    }
    return nodeCount
}
開發者ID:CodeJuan,項目名稱:kubernetes,代碼行數:12,代碼來源:gke_node_pools.go

示例11: RunLogPodsWithSleepOf

// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once.  The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {

    nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
    totalPods := len(nodes.Items)
    Expect(totalPods).NotTo(Equal(0))

    kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.

    appName := "logging-soak" + podname
    podlables := f.CreatePodsPerNodeForSimpleApp(
        appName,
        func(n v1.Node) v1.PodSpec {
            return v1.PodSpec{
                Containers: []v1.Container{{
                    Name:  "logging-soak",
                    Image: "gcr.io/google_containers/busybox:1.24",
                    Args: []string{
                        "/bin/sh",
                        "-c",
                        fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
                    },
                }},
                NodeName:      n.Name,
                RestartPolicy: v1.RestartPolicyAlways,
            }
        },
        totalPods,
    )

    logSoakVerification := f.NewClusterVerification(
        f.Namespace,
        framework.PodStateVerification{
            Selectors:   podlables,
            ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
            // we don't validate total log data, since there is no gaurantee all logs will be stored forever.
            // instead, we just validate that some logs are being created in std out.
            Verify: func(p v1.Pod) (bool, error) {
                s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
                return s != "", err
            },
        },
    )

    largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
    pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)

    if err != nil {
        framework.Failf("Error in wait... %v", err)
    } else if len(pods) < totalPods {
        framework.Failf("Only got %v out of %v", len(pods), totalPods)
    }
}
開發者ID:alex-mohr,項目名稱:kubernetes,代碼行數:54,代碼來源:logging_soak.go

示例12: getExpectReplicasFuncLinear

func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc {
    return func(c clientset.Interface) int {
        var replicasFromNodes float64
        var replicasFromCores float64
        nodes := framework.GetReadySchedulableNodesOrDie(c).Items
        if params.nodesPerReplica > 0 {
            replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica)
        }
        if params.coresPerReplica > 0 {
            replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica)
        }
        return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
    }
}
開發者ID:kubernetes,項目名稱:kubernetes,代碼行數:14,代碼來源:dns_autoscaling.go

示例13: PrepareNodes

func (p *IntegrationTestNodePreparer) PrepareNodes() error {
    numNodes := 0
    for _, v := range p.countToStrategy {
        numNodes += v.Count
    }

    glog.Infof("Making %d nodes", numNodes)
    baseNode := &v1.Node{
        ObjectMeta: v1.ObjectMeta{
            GenerateName: p.nodeNamePrefix,
        },
        Spec: v1.NodeSpec{
            // TODO: investigate why this is needed.
            ExternalID: "foo",
        },
        Status: v1.NodeStatus{
            Capacity: v1.ResourceList{
                v1.ResourcePods:   *resource.NewQuantity(110, resource.DecimalSI),
                v1.ResourceCPU:    resource.MustParse("4"),
                v1.ResourceMemory: resource.MustParse("32Gi"),
            },
            Phase: v1.NodeRunning,
            Conditions: []v1.NodeCondition{
                {Type: v1.NodeReady, Status: v1.ConditionTrue},
            },
        },
    }
    for i := 0; i < numNodes; i++ {
        if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
            glog.Fatalf("Error creating node: %v", err)
        }
    }

    nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
    index := 0
    sum := 0
    for _, v := range p.countToStrategy {
        sum += v.Count
        for ; index < sum; index++ {
            if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
                glog.Errorf("Aborting node preparation: %v", err)
                return err
            }
        }
    }
    return nil
}
開發者ID:alex-mohr,項目名稱:kubernetes,代碼行數:47,代碼來源:perf_utils.go

示例14: setup

func (config *NetworkingTestConfig) setup() {
    By("creating a selector")
    selectorName := "selector-" + string(uuid.NewUUID())
    serviceSelector := map[string]string{
        selectorName: "true",
    }

    By("Getting node addresses")
    framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
    nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
    config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
    if len(config.externalAddrs) < 2 {
        // fall back to legacy IPs
        config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
    }
    Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
    config.nodes = nodeList.Items

    By("Creating the service pods in kubernetes")
    podName := "netserver"
    config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

    By("Creating the service on top of the pods in kubernetes")
    config.createNodePortService(serviceSelector)

    By("Creating test pods")
    config.createTestPods()
    for _, p := range config.nodePortService.Spec.Ports {
        switch p.Protocol {
        case api.ProtocolUDP:
            config.nodeUdpPort = int(p.NodePort)
        case api.ProtocolTCP:
            config.nodeHttpPort = int(p.NodePort)
        default:
            continue
        }
    }

    epCount := len(config.endpointPods)
    config.maxTries = epCount*epCount + testTries
    config.clusterIP = config.nodePortService.Spec.ClusterIP
    config.nodeIP = config.externalAddrs[0]
}
開發者ID:Bhaal22,項目名稱:kubernetes,代碼行數:43,代碼來源:networking_utils.go

示例15: checkNodesVersions

func checkNodesVersions(cs clientset.Interface, want string) error {
    l := framework.GetReadySchedulableNodesOrDie(cs)
    for _, n := range l.Items {
        // We do prefix trimming and then matching because:
        // want   looks like:  0.19.3-815-g50e67d4
        // kv/kvp look  like: v0.19.3-815-g50e67d4034e858-dirty
        kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
            strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
        if !strings.HasPrefix(kv, want) {
            return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
                n.ObjectMeta.Name, kv, want)
        }
        if !strings.HasPrefix(kpv, want) {
            return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
                n.ObjectMeta.Name, kpv, want)
        }
    }
    return nil
}
開發者ID:alex-mohr,項目名稱:kubernetes,代碼行數:19,代碼來源:cluster_upgrade.go


注:本文中的k8s/io/kubernetes/test/e2e/framework.GetReadySchedulableNodesOrDie函數示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。