本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.ListSchedulableNodesOrDie函数的典型用法代码示例。如果您正苦于以下问题:Golang ListSchedulableNodesOrDie函数的具体用法?Golang ListSchedulableNodesOrDie怎么用?Golang ListSchedulableNodesOrDie使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ListSchedulableNodesOrDie函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: setup
func (config *KubeProxyTestConfig) setup() {
By("creating a selector")
selectorName := "selector-" + string(util.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
By("Getting node addresses")
nodeList := framework.ListSchedulableNodesOrDie(config.f.Client)
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
}
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items
if enableLoadBalancerTest {
By("Creating the LoadBalancer Service on top of the pods in kubernetes")
config.createLoadBalancerService(serviceSelector)
}
By("Creating the service pods in kubernetes")
podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector)
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector)
By("Creating test pods")
config.createTestPods()
}
示例2: testReboot
func testReboot(c *client.Client, rebootCmd string) {
// Get all nodes, and kick off the test on each.
nodelist := framework.ListSchedulableNodesOrDie(c)
result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items))
failed := false
for ix := range nodelist.Items {
go func(ix int) {
defer wg.Done()
n := nodelist.Items[ix]
result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
if !result[ix] {
failed = true
}
}(ix)
}
// Wait for all to finish and check the final result.
wg.Wait()
if failed {
for ix := range nodelist.Items {
n := nodelist.Items[ix]
if !result[ix] {
framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
}
}
framework.Failf("Test failed; at least one node failed to reboot in the time given.")
}
}
示例3: pickNode
func pickNode(c *client.Client) (string, error) {
// TODO: investigate why it doesn't work on master Node.
nodes := framework.ListSchedulableNodesOrDie(c)
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy")
}
return nodes.Items[0].Name, nil
}
示例4: clearDaemonSetNodeLabels
func clearDaemonSetNodeLabels(c *client.Client) error {
nodeList := framework.ListSchedulableNodesOrDie(c)
for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil {
return err
}
}
return nil
}
示例5: checkNodesVersions
func checkNodesVersions(c *client.Client, want string) error {
l := framework.ListSchedulableNodesOrDie(c)
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
// kv/kvp look like: v0.19.3-815-g50e67d4034e858-dirty
kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
if !strings.HasPrefix(kv, want) {
return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
n.ObjectMeta.Name, kv, want)
}
if !strings.HasPrefix(kpv, want) {
return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
n.ObjectMeta.Name, kpv, want)
}
}
return nil
}
示例6: createNetProxyPods
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes := framework.ListSchedulableNodesOrDie(config.f.Client)
// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))
for i, n := range nodes.Items {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod)
createdPods = append(createdPods, createdPod)
}
// wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes.Items))
for _, p := range createdPods {
framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name)
framework.ExpectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
}
示例7:
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
By("Initially, daemon pods should not be running on any nodes.")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.")
nodeList := framework.ListSchedulableNodesOrDie(f.Client)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
Expect(len(daemonSetLabels)).To(Equal(1))
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
By("remove the node selector and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
By("We should now be able to delete the daemon set.")
示例8:
framework.ExpectNoError(err)
})
It("should grab all metrics from API server.", func() {
By("Connecting to /metrics endpoint")
unknownMetrics := sets.NewString()
response, err := grabber.GrabFromApiServer(unknownMetrics)
framework.ExpectNoError(err)
Expect(unknownMetrics).To(BeEmpty())
checkMetrics(metrics.Metrics(response), metrics.KnownApiServerMetrics)
})
It("should grab all metrics from a Kubelet.", func() {
By("Proxying to Node through the API server")
nodes := framework.ListSchedulableNodesOrDie(c)
Expect(nodes.Items).NotTo(BeEmpty())
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
framework.ExpectNoError(err)
checkNecessaryMetrics(metrics.Metrics(response), metrics.NecessaryKubeletMetrics)
})
It("should grab all metrics from a Scheduler.", func() {
By("Proxying to Pod through the API server")
// Check if master Node is registered
nodes, err := c.Nodes().List(api.ListOptions{})
framework.ExpectNoError(err)
var masterRegistered = false
for _, node := range nodes.Items {
if strings.HasSuffix(node.Name, "master") {
示例9: runClientServerBandwidthMeasurement
func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int, maxBandwidthBits int64) {
// TODO: Make this a function parameter, once we distribute iperf endpoints, possibly via session affinity.
numServer := 1
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.ListSchedulableNodesOrDie(f.Client)
totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
Expect(totalPods).NotTo(Equal(0))
appName := "iperf-e2e"
err, _ := f.CreateServiceForSimpleAppWithPods(
8001,
8002,
appName,
func(n api.Node) api.PodSpec {
return api.PodSpec{
Containers: []api.Container{{
Name: "iperf-server",
Image: "gcr.io/google_containers/iperf:e2e",
Args: []string{
"/bin/sh",
"-c",
"/usr/local/bin/iperf -s -p 8001 ",
},
Ports: []api.ContainerPort{{ContainerPort: 8001}},
}},
NodeName: n.Name,
RestartPolicy: api.RestartPolicyOnFailure,
}
},
// this will be used to generate the -service name which all iperf clients point at.
numServer, // Generally should be 1 server unless we do affinity or use a version of iperf that supports LB
true, // Make sure we wait, otherwise all the clients will die and need to restart.
)
if err != nil {
framework.Failf("Fatal error waiting for iperf server endpoint : %v", err)
}
iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
"iperf-e2e-cli",
func(n api.Node) api.PodSpec {
return api.PodSpec{
Containers: []api.Container{
{
Name: "iperf-client",
Image: "gcr.io/google_containers/iperf:e2e",
Args: []string{
"/bin/sh",
"-c",
"/usr/local/bin/iperf -c service-for-" + appName + " -p 8002 --reportstyle C && sleep 5",
},
},
},
RestartPolicy: api.RestartPolicyOnFailure, // let them successfully die.
}
},
numClient,
)
framework.Logf("Reading all perf results to stdout.")
framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
// Calculate expected number of clients based on total nodes.
expectedCli := func() int {
nodes, err := framework.GetReadyNodes(f)
framework.ExpectNoError(err)
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}()
// Extra 1/10 second per client.
iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second)
iperfResults := &IPerfResults{}
iperfClusterVerification := f.NewClusterVerification(
framework.PodStateVerification{
Selectors: iperfClientPodLabels,
ValidPhases: []api.PodPhase{api.PodSucceeded},
},
)
pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
if err2 != nil {
framework.Failf("Error in wait...")
} else if len(pods) < expectedCli {
framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
} else {
// For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach(
func(p api.Pod) {
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
if err == nil {
framework.Logf(resultS)
iperfResults.Add(NewIPerf(resultS))
} else {
framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
}
})
}
//.........这里部分代码省略.........
示例10: ClusterLevelLoggingWithElasticsearch
//.........这里部分代码省略.........
body, err = proxyRequest.Namespace(api.NamespaceSystem).
Name("elasticsearch-logging").
Suffix("_cluster/health").
Param("level", "indices").
DoRaw()
if err != nil {
continue
}
health, err := bodyToJSON(body)
if err != nil {
framework.Logf("Bad json response from elasticsearch: %v", err)
continue
}
statusIntf, ok := health["status"]
if !ok {
framework.Logf("No status field found in cluster health response: %v", health)
continue
}
status := statusIntf.(string)
if status != "green" && status != "yellow" {
framework.Logf("Cluster health has bad status: %v", health)
continue
}
if err == nil && ok {
healthy = true
break
}
}
if !healthy {
framework.Failf("After %v elasticsearch cluster is not healthy", graceTime)
}
// Obtain a list of nodes so we can place one synthetic logger on each node.
nodes := framework.ListSchedulableNodesOrDie(f.Client)
nodeCount := len(nodes.Items)
if nodeCount == 0 {
framework.Failf("Failed to find any nodes")
}
framework.Logf("Found %d nodes.", len(nodes.Items))
// Filter out unhealthy nodes.
// Previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
framework.FilterNodes(nodes, func(node api.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
if len(nodes.Items) < 2 {
framework.Failf("Less than two nodes were found Ready: %d", len(nodes.Items))
}
framework.Logf("Found %d healthy nodes.", len(nodes.Items))
// Wait for the Fluentd pods to enter the running state.
By("Checking to make sure the Fluentd pod are running on each healthy node")
label = labels.SelectorFromSet(labels.Set(map[string]string{k8sAppKey: fluentdValue}))
options = api.ListOptions{LabelSelector: label}
fluentdPods, err := f.Client.Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range fluentdPods.Items {
if nodeInNodeList(pod.Spec.NodeName, nodes) {
err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
Expect(err).NotTo(HaveOccurred())
}
}
// Check if each healthy node has fluentd running on it
for _, node := range nodes.Items {