本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.GetReadySchedulableNodesOrDie函数的典型用法代码示例。如果您正苦于以下问题:Golang GetReadySchedulableNodesOrDie函数的具体用法?Golang GetReadySchedulableNodesOrDie怎么用?Golang GetReadySchedulableNodesOrDie使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GetReadySchedulableNodesOrDie函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createNetProxyPods
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client)
// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))
for i, n := range nodes.Items {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod)
createdPods = append(createdPods, createdPod)
}
// wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes.Items))
for _, p := range createdPods {
framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name)
framework.ExpectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
}
示例2: testReboot
func testReboot(c *client.Client, rebootCmd string) {
// Get all nodes, and kick off the test on each.
nodelist := framework.GetReadySchedulableNodesOrDie(c)
result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items))
failed := false
for ix := range nodelist.Items {
go func(ix int) {
defer wg.Done()
n := nodelist.Items[ix]
result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
if !result[ix] {
failed = true
}
}(ix)
}
// Wait for all to finish and check the final result.
wg.Wait()
if failed {
for ix := range nodelist.Items {
n := nodelist.Items[ix]
if !result[ix] {
framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
}
}
framework.Failf("Test failed; at least one node failed to reboot in the time given.")
}
}
示例3: isTestEnabled
func isTestEnabled(c clientset.Interface) bool {
// Enable the test on node e2e if the node image is GCI.
nodeName := framework.TestContext.NodeName
if nodeName != "" {
if strings.Contains(nodeName, "-gci-dev-") {
gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-")
matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName)
if len(matches) == 2 {
version, err := strconv.Atoi(matches[1])
if err != nil {
glog.Errorf("Error parsing GCI version from NodeName %q: %v", nodeName, err)
return false
}
return version >= 54
}
}
return false
}
// For cluster e2e test, because nodeName is empty, retrieve the node objects from api server
// and check their images. Only run NFSv4 and GlusterFS if nodes are using GCI image for now.
nodes := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodes.Items {
if !strings.Contains(node.Status.NodeInfo.OSImage, "Google Container-VM") {
return false
}
}
return true
}
示例4: setup
func (config *KubeProxyTestConfig) setup() {
By("creating a selector")
selectorName := "selector-" + string(util.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
}
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items
if enableLoadBalancerTest {
By("Creating the LoadBalancer Service on top of the pods in kubernetes")
config.createLoadBalancerService(serviceSelector)
}
By("Creating the service pods in kubernetes")
podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector)
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector)
By("Creating test pods")
config.createTestPods()
}
示例5: setup
// setup includes setupCore and also sets up services
func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector)
By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.ExternalAddrs) < 2 {
// fall back to legacy IPs
config.ExternalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
}
Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.Nodes = nodeList.Items
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(selector)
for _, p := range config.NodePortService.Spec.Ports {
switch p.Protocol {
case api.ProtocolUDP:
config.NodeUdpPort = int(p.NodePort)
case api.ProtocolTCP:
config.NodeHttpPort = int(p.NodePort)
default:
continue
}
}
config.ClusterIP = config.NodePortService.Spec.ClusterIP
config.NodeIP = config.ExternalAddrs[0]
}
示例6: createNetProxyPods
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
// To make this test work reasonably fast in large clusters,
// we limit the number of NetProxyPods to no more than 100 ones
// on random nodes.
nodes := shuffleNodes(nodeList.Items)
if len(nodes) > maxNetProxyPodsCount {
nodes = nodes[:maxNetProxyPodsCount]
}
// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes))
for i, n := range nodes {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod)
createdPods = append(createdPods, createdPod)
}
// wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes))
for _, p := range createdPods {
framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name)
framework.ExpectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
}
示例7: pickNode
func pickNode(c *client.Client) (string, error) {
// TODO: investigate why it doesn't work on master Node.
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy")
}
return nodes.Items[0].Name, nil
}
示例8: nodeHasDiskPressure
func nodeHasDiskPressure(cs clientset.Interface) bool {
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
for _, condition := range nodeList.Items[0].Status.Conditions {
if condition.Type == api.NodeDiskPressure {
return condition.Status == api.ConditionTrue
}
}
return false
}
示例9: CleanupNodes
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}
示例10: nodesWithPoolLabel
// nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool"
// label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
nodeCount := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
for _, node := range nodeList.Items {
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
nodeCount++
}
}
return nodeCount
}
示例11: RunLogPodsWithSleepOf
// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0))
kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.
appName := "logging-soak" + podname
podlables := f.CreatePodsPerNodeForSimpleApp(
appName,
func(n v1.Node) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "logging-soak",
Image: "gcr.io/google_containers/busybox:1.24",
Args: []string{
"/bin/sh",
"-c",
fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
},
}},
NodeName: n.Name,
RestartPolicy: v1.RestartPolicyAlways,
}
},
totalPods,
)
logSoakVerification := f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: podlables,
ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err
},
},
)
largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)
if err != nil {
framework.Failf("Error in wait... %v", err)
} else if len(pods) < totalPods {
framework.Failf("Only got %v out of %v", len(pods), totalPods)
}
}
示例12: getExpectReplicasFuncLinear
func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc {
return func(c clientset.Interface) int {
var replicasFromNodes float64
var replicasFromCores float64
nodes := framework.GetReadySchedulableNodesOrDie(c).Items
if params.nodesPerReplica > 0 {
replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica)
}
if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica)
}
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
}
}
示例13: PrepareNodes
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for _, v := range p.countToStrategy {
numNodes += v.Count
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &v1.Node{
ObjectMeta: v1.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: v1.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for _, v := range p.countToStrategy {
sum += v.Count
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
示例14: setup
func (config *NetworkingTestConfig) setup() {
By("creating a selector")
selectorName := "selector-" + string(uuid.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
}
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items
By("Creating the service pods in kubernetes")
podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector)
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector)
By("Creating test pods")
config.createTestPods()
for _, p := range config.nodePortService.Spec.Ports {
switch p.Protocol {
case api.ProtocolUDP:
config.nodeUdpPort = int(p.NodePort)
case api.ProtocolTCP:
config.nodeHttpPort = int(p.NodePort)
default:
continue
}
}
epCount := len(config.endpointPods)
config.maxTries = epCount*epCount + testTries
config.clusterIP = config.nodePortService.Spec.ClusterIP
config.nodeIP = config.externalAddrs[0]
}
示例15: checkNodesVersions
func checkNodesVersions(cs clientset.Interface, want string) error {
l := framework.GetReadySchedulableNodesOrDie(cs)
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
// kv/kvp look like: v0.19.3-815-g50e67d4034e858-dirty
kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
if !strings.HasPrefix(kv, want) {
return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
n.ObjectMeta.Name, kv, want)
}
if !strings.HasPrefix(kpv, want) {
return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
n.ObjectMeta.Name, kpv, want)
}
}
return nil
}