本文整理汇总了Golang中k8s/io/kubernetes/pkg/api.Node类的典型用法代码示例。如果您正苦于以下问题:Golang Node类的具体用法?Golang Node怎么用?Golang Node使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Node类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: generateNodeAndTaintedNode
func generateNodeAndTaintedNode(oldTaints []api.Taint, newTaints []api.Taint) (*api.Node, *api.Node) {
var taintedNode *api.Node
oldTaintsData, _ := json.Marshal(oldTaints)
// Create a node.
node := &api.Node{
ObjectMeta: api.ObjectMeta{
Name: "node-name",
CreationTimestamp: unversioned.Time{Time: time.Now()},
Annotations: map[string]string{
api.TaintsAnnotationKey: string(oldTaintsData),
},
},
Spec: api.NodeSpec{
ExternalID: "node-name",
},
Status: api.NodeStatus{},
}
clone, _ := conversion.NewCloner().DeepCopy(node)
newTaintsData, _ := json.Marshal(newTaints)
// A copy of the same node, but tainted.
taintedNode = clone.(*api.Node)
taintedNode.Annotations = map[string]string{
api.TaintsAnnotationKey: string(newTaintsData),
}
return node, taintedNode
}
示例2: Create
// Create creates a new node api object with the given hostname,
// slave attribute labels and annotations
func Create(
client *client.Client,
hostName string,
slaveAttrLabels,
annotations map[string]string,
) (*api.Node, error) {
n := api.Node{
ObjectMeta: api.ObjectMeta{
Name: hostName,
},
Spec: api.NodeSpec{
ExternalID: hostName,
},
Status: api.NodeStatus{
Phase: api.NodePending,
},
}
n.Labels = mergeMaps(
map[string]string{"kubernetes.io/hostname": hostName},
slaveAttrLabels,
)
n.Annotations = annotations
// try to create
return client.Nodes().Create(&n)
}
示例3: newFakeNodeBuilder
func newFakeNodeBuilder(uid types.UID, name string) *FakeNodeBuilder {
node := new(api.Node)
node.UID = uid
node.Name = name
return &FakeNodeBuilder{
node: node,
}
}
示例4: reconcileCMADAnnotationWithExistingNode
// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
// attach-detach annotation on a new node and the existing node, returning
// whether the existing node must be updated.
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *api.Node) bool {
var (
existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]
newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation]
)
if newCMAAnnotation == existingCMAAnnotation {
return false
}
// If the just-constructed node and the existing node do
// not have the same value, update the existing node with
// the correct value of the annotation.
if !newSet {
glog.Info("Controller attach-detach setting changed to false; updating existing Node")
delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation)
} else {
glog.Info("Controller attach-detach setting changed to true; updating existing Node")
if existingNode.Annotations == nil {
existingNode.Annotations = make(map[string]string)
}
existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation
}
return true
}
示例5: addTaint
func addTaint(client *kube_client.Client, node *kube_api.Node, value string) error {
taints, err := kube_api.GetTaintsFromNodeAnnotations(node.Annotations)
if err != nil {
return err
}
taint := kube_api.Taint{
Key: criticalAddonsOnlyTaintKey,
Value: value,
Effect: kube_api.TaintEffectNoSchedule,
}
taints = append(taints, taint)
taintsJson, err := json.Marshal(taints)
if err != nil {
return err
}
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[kube_api.TaintsAnnotationKey] = string(taintsJson)
_, err = client.Nodes().Update(node)
if err != nil {
return err
}
return nil
}
示例6: Create
// Create creates a new node api object with the given hostname,
// slave attribute labels and annotations
func Create(
client unversionedcore.NodesGetter,
hostName string,
slaveAttrLabels,
annotations map[string]string,
) (*api.Node, error) {
n := api.Node{
ObjectMeta: api.ObjectMeta{
Name: hostName,
},
Spec: api.NodeSpec{
ExternalID: hostName,
},
Status: api.NodeStatus{
Phase: api.NodePending,
// WORKAROUND(sttts): make sure that the Ready condition is the
// first one. The kube-ui v3 depends on this assumption.
// TODO(sttts): remove this workaround when kube-ui v4 is used or we
// merge this with the statusupdate in the controller manager.
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: slaveReadyReason,
Message: slaveReadyMessage,
LastHeartbeatTime: unversioned.Now(),
},
},
},
}
n.Labels = mergeMaps(
map[string]string{"kubernetes.io/hostname": hostName},
slaveAttrLabels,
)
n.Annotations = annotations
// try to create
return client.Nodes().Create(&n)
}
示例7: updateNode
func updateNode(c *gin.Context) {
nodename := c.Param("no")
nodejson := c.PostForm("json")
var node api.Node
err := json.Unmarshal([]byte(nodejson), &node)
if err != nil {
c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
return
}
r, _ := kubeclient.Get().Nodes().Get(node.Name)
node.ResourceVersion = r.ResourceVersion
_, err = kubeclient.Get().Nodes().Update(&node)
if err != nil {
c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
return
}
c.Redirect(http.StatusMovedPermanently, fmt.Sprintf("/nodes/%s/edit", nodename))
}
示例8: TestBuildSummary
func TestBuildSummary(t *testing.T) {
node := api.Node{}
node.Name = "FooNode"
nodeConfig := cm.NodeConfig{
RuntimeCgroupsName: "/docker-daemon",
SystemCgroupsName: "/system",
KubeletCgroupsName: "/kubelet",
}
const (
namespace0 = "test0"
namespace2 = "test2"
)
const (
seedRoot = 0
seedRuntime = 100
seedKubelet = 200
seedMisc = 300
seedPod0Infra = 1000
seedPod0Container0 = 2000
seedPod0Container1 = 2001
seedPod1Infra = 3000
seedPod1Container = 4000
seedPod2Infra = 5000
seedPod2Container = 6000
)
const (
pName0 = "pod0"
pName1 = "pod1"
pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace
)
const (
cName00 = "c0"
cName01 = "c1"
cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod
cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace
)
prf0 := kubestats.PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0}
prf1 := kubestats.PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1}
prf2 := kubestats.PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2}
infos := map[string]v2.ContainerInfo{
"/": summaryTestContainerInfo(seedRoot, "", "", ""),
"/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""),
"/kubelet": summaryTestContainerInfo(seedKubelet, "", "", ""),
"/system": summaryTestContainerInfo(seedMisc, "", "", ""),
// Pod0 - Namespace0
"/pod0-i": summaryTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName),
"/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00),
"/pod0-c1": summaryTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01),
// Pod1 - Namespace0
"/pod1-i": summaryTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName),
"/pod1-c0": summaryTestContainerInfo(seedPod1Container, pName1, namespace0, cName10),
// Pod2 - Namespace2
"/pod2-i": summaryTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName),
"/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20),
}
rootfs := v2.FsInfo{}
imagefs := v2.FsInfo{}
// memory limit overrides for each container (used to test available bytes if a memory limit is known)
memoryLimitOverrides := map[string]uint64{
"/": uint64(1 << 30),
"/pod2-c0": uint64(1 << 15),
}
for name, memoryLimitOverride := range memoryLimitOverrides {
info, found := infos[name]
if !found {
t.Errorf("No container defined with name %v", name)
}
info.Spec.Memory.Limit = memoryLimitOverride
infos[name] = info
}
sb := &summaryBuilder{
newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5), &node, nodeConfig, rootfs, imagefs, container.ImageStats{}, infos}
summary, err := sb.build()
assert.NoError(t, err)
nodeStats := summary.Node
assert.Equal(t, "FooNode", nodeStats.NodeName)
assert.EqualValues(t, testTime(creationTime, seedRoot).Unix(), nodeStats.StartTime.Time.Unix())
checkCPUStats(t, "Node", seedRoot, nodeStats.CPU)
checkMemoryStats(t, "Node", seedRoot, infos["/"], nodeStats.Memory)
checkNetworkStats(t, "Node", seedRoot, nodeStats.Network)
systemSeeds := map[string]int{
kubestats.SystemContainerRuntime: seedRuntime,
kubestats.SystemContainerKubelet: seedKubelet,
kubestats.SystemContainerMisc: seedMisc,
}
systemContainerToNodeCgroup := map[string]string{
kubestats.SystemContainerRuntime: nodeConfig.RuntimeCgroupsName,
kubestats.SystemContainerKubelet: nodeConfig.KubeletCgroupsName,
kubestats.SystemContainerMisc: nodeConfig.SystemCgroupsName,
}
for _, sys := range nodeStats.SystemContainers {
name := sys.Name
info := infos[systemContainerToNodeCgroup[name]]
seed, found := systemSeeds[name]
//.........这里部分代码省略.........
示例9: TestBuildSummary
func TestBuildSummary(t *testing.T) {
node := api.Node{}
node.Name = "FooNode"
nodeConfig := cm.NodeConfig{
RuntimeCgroupsName: "/docker-daemon",
SystemCgroupsName: "/system",
KubeletCgroupsName: "/kubelet",
}
const (
namespace0 = "test0"
namespace2 = "test2"
)
const (
seedRoot = 0
seedRuntime = 100
seedKubelet = 200
seedMisc = 300
seedPod0Infra = 1000
seedPod0Container0 = 2000
seedPod0Container1 = 2001
seedPod1Infra = 3000
seedPod1Container = 4000
seedPod2Infra = 5000
seedPod2Container = 6000
)
const (
pName0 = "pod0"
pName1 = "pod1"
pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace
)
const (
cName00 = "c0"
cName01 = "c1"
cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod
cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace
)
prf0 := PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0}
prf1 := PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1}
prf2 := PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2}
infos := map[string]v2.ContainerInfo{
"/": summaryTestContainerInfo(seedRoot, "", "", ""),
"/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""),
"/kubelet": summaryTestContainerInfo(seedKubelet, "", "", ""),
"/system": summaryTestContainerInfo(seedMisc, "", "", ""),
// Pod0 - Namespace0
"/pod0-i": summaryTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName),
"/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00),
"/pod0-c2": summaryTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01),
// Pod1 - Namespace0
"/pod1-i": summaryTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName),
"/pod1-c0": summaryTestContainerInfo(seedPod1Container, pName1, namespace0, cName10),
// Pod2 - Namespace2
"/pod2-i": summaryTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName),
"/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20),
}
rootfs := v2.FsInfo{}
imagefs := v2.FsInfo{}
sb := &summaryBuilder{
newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5), &node, nodeConfig, rootfs, imagefs, infos}
summary, err := sb.build()
assert.NoError(t, err)
nodeStats := summary.Node
assert.Equal(t, "FooNode", nodeStats.NodeName)
checkCPUStats(t, "Node", seedRoot, nodeStats.CPU)
checkMemoryStats(t, "Node", seedRoot, nodeStats.Memory)
checkNetworkStats(t, "Node", seedRoot, nodeStats.Network)
systemSeeds := map[string]int{
SystemContainerRuntime: seedRuntime,
SystemContainerKubelet: seedKubelet,
SystemContainerMisc: seedMisc,
}
for _, sys := range nodeStats.SystemContainers {
name := sys.Name
seed, found := systemSeeds[name]
if !found {
t.Errorf("Unknown SystemContainer: %q", name)
}
checkCPUStats(t, name, seed, sys.CPU)
checkMemoryStats(t, name, seed, sys.Memory)
}
assert.Equal(t, 3, len(summary.Pods))
indexPods := make(map[PodReference]PodStats, len(summary.Pods))
for _, pod := range summary.Pods {
indexPods[pod.PodRef] = pod
}
// Validate Pod0 Results
ps, found := indexPods[prf0]
assert.True(t, found)
assert.Len(t, ps.Containers, 2)
indexCon := make(map[string]ContainerStats, len(ps.Containers))
for _, con := range ps.Containers {
indexCon[con.Name] = con
}
//.........这里部分代码省略.........
示例10: testDaemonSets
func testDaemonSets(f *Framework) {
ns := f.Namespace.Name
c := f.Client
simpleDSName := "simple-daemon-set"
image := "gcr.io/google_containers/serve_hostname:1.1"
label := map[string]string{"name": simpleDSName}
retryTimeout := 1 * time.Minute
retryInterval := 5 * time.Second
Logf("Creating simple daemon set %s", simpleDSName)
_, err := c.DaemonSets(ns).Create(&experimental.DaemonSet{
ObjectMeta: api.ObjectMeta{
Name: simpleDSName,
},
Spec: experimental.DaemonSetSpec{
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: label,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: simpleDSName,
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("Check that reaper kills all daemon pods for %s", simpleDSName)
dsReaper, err := kubectl.ReaperFor("DaemonSet", c)
Expect(err).NotTo(HaveOccurred())
_, err = dsReaper.Stop(ns, simpleDSName, 0, nil)
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
}()
By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
By("Stop a daemon pod, check that the daemon pod is revived.")
podClient := c.Pods(ns)
podList, err := podClient.List(labels.Set(label).AsSelector(), fields.Everything())
Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items)).To(BeNumerically(">", 0))
pod := podList.Items[0]
err = podClient.Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
complexDSName := "complex-daemon-set"
complexLabel := map[string]string{"name": complexDSName}
nodeSelector := map[string]string{"color": "blue"}
Logf("Creating daemon with a node selector %s", complexDSName)
_, err = c.DaemonSets(ns).Create(&experimental.DaemonSet{
ObjectMeta: api.ObjectMeta{
Name: complexDSName,
},
Spec: experimental.DaemonSetSpec{
Selector: complexLabel,
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: complexLabel,
},
Spec: api.PodSpec{
NodeSelector: nodeSelector,
Containers: []api.Container{
{
Name: complexDSName,
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
By("Initially, daemon pods should not be running on any nodes.")
err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.")
nodeClient := c.Nodes()
nodeList, err := nodeClient.List(labels.Everything(), fields.Everything())
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
nodeList.Items[0].Labels = nodeSelector
var newNode *api.Node
err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
newNode, err = nodeClient.Update(&nodeList.Items[0])
//.........这里部分代码省略.........