本文整理汇总了Golang中k8s/io/kubernetes/pkg/client.Client.Nodes方法的典型用法代码示例。如果您正苦于以下问题:Golang Client.Nodes方法的具体用法?Golang Client.Nodes怎么用?Golang Client.Nodes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/client.Client
的用法示例。
在下文中一共展示了Client.Nodes方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: CheckCadvisorHealthOnAllNodes
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
By("getting list of nodes")
nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
var errors []error
retries := maxRetries
for {
errors = []error{}
for _, node := range nodeList.Items {
// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
_, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
if err != nil {
errors = append(errors, err)
}
}
if len(errors) == 0 {
return
}
if retries--; retries <= 0 {
break
}
Logf("failed to retrieve kubelet stats -\n %v", errors)
time.Sleep(sleepDuration)
}
Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}
示例2: getAllNodesInCluster
func getAllNodesInCluster(c *client.Client) ([]string, error) {
nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
return nil, err
}
result := []string{}
for _, node := range nodeList.Items {
result = append(result, node.Name)
}
return result, nil
}
示例3: getMinionPublicIps
func getMinionPublicIps(c *client.Client) ([]string, error) {
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
return nil, err
}
ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 {
ips = collectAddresses(nodes, api.NodeLegacyHostIP)
}
return ips, nil
}
示例4: waitForNode
func waitForNode(client *kclient.Client, t *testing.T) *kapi.NodeList {
for i := 0; i < 25; i++ {
time.Sleep(200 * time.Millisecond)
nodeList, err := client.Nodes().List(labels.LabelSelector{}, fields.Everything())
if err != nil {
t.Fatalf("unexpected error fetching node list: %v", err)
}
if len(nodeList.Items) == 0 {
continue
}
return nodeList
}
t.Fatal("Waited 5 seconds for all-in-one node to register itseld; giving up")
return nil
}
示例5: waitForClusterSize
func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out not-ready nodes.
filterNodes(nodes, func(node api.Node) bool {
return isNodeReadySetAsExpected(&node, true)
})
if len(nodes.Items) == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d", size, len(nodes.Items))
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
示例6:
return
}
// This tests does nothing except checking current resource usage of containers defained in kubelet_stats systemContainers variable.
// Test fails if an average container resource consumption over datapointAmount tries exceeds amount defined in allowedUsage.
var _ = Describe("ResourceUsage", func() {
var c *client.Client
BeforeEach(func() {
var err error
c, err = loadClient()
expectNoError(err)
})
It("should not exceed expected amount.", func() {
By("Getting ResourceConsumption on all nodes")
nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
resourceUsagePerNode := make(map[string][]resourceUsagePerContainer)
for i := 0; i < datapointAmount; i++ {
for _, node := range nodeList.Items {
resourceUsage, err := getOneTimeResourceUsageOnNode(c, node.Name, 5*time.Second)
expectNoError(err)
resourceUsagePerNode[node.Name] = append(resourceUsagePerNode[node.Name], resourceUsage)
}
time.Sleep(3 * time.Second)
}
averageResourceUsagePerNode := make(map[string]resourceUsagePerContainer)
for _, node := range nodeList.Items {
示例7:
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := c.Pods(ns).List(label, fields.Everything()) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Nodes().Get(nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("block network traffic from node %s", node.Name))
performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node)
Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !waitForNodeToBe(c, node.Name, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
By("verify whether new pods can be created on the re-attached node")
// increasing the RC size is not a valid way to test this
// since we have no guarantees the pod will be scheduled on our node.
additionalPod := "additionalpod"
err = newPodOnNode(c, ns, additionalPod, node.Name)
Expect(err).NotTo(HaveOccurred())
示例8:
// the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
var _ = Describe("Density", func() {
var c *client.Client
var minionCount int
var RCName string
var additionalPodsPrefix string
var ns string
var uuid string
BeforeEach(func() {
var err error
c, err = loadClient()
expectNoError(err)
minions, err := c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
minionCount = len(minions.Items)
Expect(minionCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err = deleteTestingNS(c)
expectNoError(err)
nsForTesting, err := createTestingNS("density", c)
ns = nsForTesting.Name
expectNoError(err)
uuid = string(util.NewUUID())
示例9: DoTestUnschedulableNodes
func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore cache.Store) {
goodCondition := api.NodeCondition{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: fmt.Sprintf("schedulable condition"),
LastHeartbeatTime: util.Time{time.Now()},
}
badCondition := api.NodeCondition{
Type: api.NodeReady,
Status: api.ConditionUnknown,
Reason: fmt.Sprintf("unschedulable condition"),
LastHeartbeatTime: util.Time{time.Now()},
}
// Create a new schedulable node, since we're first going to apply
// the unschedulable condition and verify that pods aren't scheduled.
node := &api.Node{
ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-node"},
Spec: api.NodeSpec{Unschedulable: false},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
},
Conditions: []api.NodeCondition{goodCondition},
},
}
nodeKey, err := cache.MetaNamespaceKeyFunc(node)
if err != nil {
t.Fatalf("Couldn't retrieve key for node %v", node.Name)
}
// The test does the following for each nodeStateManager in this list:
// 1. Create a new node
// 2. Apply the makeUnSchedulable function
// 3. Create a new pod
// 4. Check that the pod doesn't get assigned to the node
// 5. Apply the schedulable function
// 6. Check that the pod *does* get assigned to the node
// 7. Delete the pod and node.
nodeModifications := []nodeStateManager{
// Test node.Spec.Unschedulable=true/false
{
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
n.Spec.Unschedulable = true
if _, err := c.Nodes().Update(n); err != nil {
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
}
err = waitForReflection(s, nodeKey, func(node interface{}) bool {
// An unschedulable node should get deleted from the store
return node == nil
})
if err != nil {
t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
}
},
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
n.Spec.Unschedulable = false
if _, err := c.Nodes().Update(n); err != nil {
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
}
err = waitForReflection(s, nodeKey, func(node interface{}) bool {
return node != nil && node.(*api.Node).Spec.Unschedulable == false
})
if err != nil {
t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err)
}
},
},
// Test node.Status.Conditions=ConditionTrue/Unknown
{
makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
n.Status = api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
},
Conditions: []api.NodeCondition{badCondition},
}
if _, err = c.Nodes().UpdateStatus(n); err != nil {
t.Fatalf("Failed to update node with bad status condition: %v", err)
}
err = waitForReflection(s, nodeKey, func(node interface{}) bool {
return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionUnknown
})
if err != nil {
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
}
},
makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
n.Status = api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
},
Conditions: []api.NodeCondition{goodCondition},
}
if _, err = c.Nodes().UpdateStatus(n); err != nil {
t.Fatalf("Failed to update node with healthy status condition: %v", err)
}
waitForReflection(s, nodeKey, func(node interface{}) bool {
return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionTrue
})
//.........这里部分代码省略.........
示例10: rebootNode
// rebootNode takes node name on provider through the following steps using c:
// - ensures the node is ready
// - ensures all pods on the node are running and ready
// - reboots the node (by executing rebootCmd over ssh)
// - ensures the node reaches some non-ready state
// - ensures the node becomes ready again
// - ensures all pods on the node become running and ready again
//
// It returns true through result only if all of the steps pass; at the first
// failed step, it will return false through result and not run the rest.
func rebootNode(c *client.Client, provider, name, rebootCmd string, result chan bool) {
// Setup
ns := api.NamespaceSystem
ps := newPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(client.PodHost, name))
defer ps.Stop()
// Get the node initially.
Logf("Getting %s", name)
node, err := c.Nodes().Get(name)
if err != nil {
Logf("Couldn't get node %s", name)
result <- false
return
}
// Node sanity check: ensure it is "ready".
if !waitForNodeToBeReady(c, name, nodeReadyInitialTimeout) {
result <- false
return
}
// Get all the pods on the node that don't have liveness probe set.
// Liveness probe may cause restart of a pod during node reboot, and the pod may not be running.
pods := ps.List()
podNames := []string{}
for _, p := range pods {
probe := false
for _, c := range p.Spec.Containers {
if c.LivenessProbe != nil {
probe = true
break
}
}
if !probe {
podNames = append(podNames, p.ObjectMeta.Name)
}
}
Logf("Node %s has %d pods: %v", name, len(podNames), podNames)
// For each pod, we do a sanity check to ensure it's running / healthy
// now, as that's what we'll be checking later.
if !checkPodsRunningReady(c, ns, podNames, podReadyBeforeTimeout) {
result <- false
return
}
// Reboot the node.
if err = issueSSHCommand(node, provider, rebootCmd); err != nil {
Logf("Error while issuing ssh command: %v", err)
result <- false
return
}
// Wait for some kind of "not ready" status.
if !waitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
result <- false
return
}
// Wait for some kind of "ready" status.
if !waitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
result <- false
return
}
// Ensure all of the pods that we found on this node before the reboot are
// running / healthy.
if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
result <- false
return
}
Logf("Reboot successful on node %s", name)
result <- true
}