本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.Client.DaemonSets方法的典型用法代码示例。如果您正苦于以下问题:Golang Client.DaemonSets方法的具体用法?Golang Client.DaemonSets怎么用?Golang Client.DaemonSets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/client/unversioned.Client
的用法示例。
在下文中一共展示了Client.DaemonSets方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1:
It("should run and stop simple daemon", func() {
label := map[string]string{daemonsetNameLabel: dsName}
Logf("Creating simple daemon set %s", dsName)
_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: label,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: dsName,
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("Check that reaper kills all daemon pods for %s", dsName)
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c)
Expect(err).NotTo(HaveOccurred())
示例2: dumpClusterInfo
func dumpClusterInfo(f *cmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
var c *unversioned.Client
var err error
if c, err = f.Client(); err != nil {
return err
}
printer, _, err := kubectl.GetPrinter("json", "")
if err != nil {
return err
}
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(nodes, setupOutputWriter(cmd, out, "nodes.json")); err != nil {
return err
}
var namespaces []string
if cmdutil.GetFlagBool(cmd, "all-namespaces") {
namespaceList, err := c.Namespaces().List(api.ListOptions{})
if err != nil {
return err
}
for ix := range namespaceList.Items {
namespaces = append(namespaces, namespaceList.Items[ix].Name)
}
} else {
namespaces = cmdutil.GetFlagStringSlice(cmd, "namespaces")
if len(namespaces) == 0 {
cmdNamespace, _, err := f.DefaultNamespace()
if err != nil {
return err
}
namespaces = []string{
api.NamespaceSystem,
cmdNamespace,
}
}
}
for _, namespace := range namespaces {
// TODO: this is repetitive in the extreme. Use reflection or
// something to make this a for loop.
events, err := c.Events(namespace).List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(events, setupOutputWriter(cmd, out, path.Join(namespace, "events.json"))); err != nil {
return err
}
rcs, err := c.ReplicationControllers(namespace).List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(rcs, setupOutputWriter(cmd, out, path.Join(namespace, "replication-controllers.json"))); err != nil {
return err
}
svcs, err := c.Services(namespace).List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(svcs, setupOutputWriter(cmd, out, path.Join(namespace, "services.json"))); err != nil {
return err
}
sets, err := c.DaemonSets(namespace).List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(sets, setupOutputWriter(cmd, out, path.Join(namespace, "daemonsets.json"))); err != nil {
return err
}
deps, err := c.Deployments(namespace).List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(deps, setupOutputWriter(cmd, out, path.Join(namespace, "deployments.json"))); err != nil {
return err
}
rps, err := c.ReplicaSets(namespace).List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(rps, setupOutputWriter(cmd, out, path.Join(namespace, "replicasets.json"))); err != nil {
return err
}
pods, err := c.Pods(namespace).List(api.ListOptions{})
if err != nil {
return err
}
if err := printer.PrintObj(pods, setupOutputWriter(cmd, out, path.Join(namespace, "pods.json"))); err != nil {
return err
//.........这里部分代码省略.........
示例3: GetPodsForDeletionOnNodeDrain
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
// about possibly problematic pods (unreplicated and deamon sets).
func GetPodsForDeletionOnNodeDrain(
podList []*api.Pod,
decoder runtime.Decoder,
skipNodesWithSystemPods bool,
skipNodesWithLocalStorage bool,
checkReferences bool, // Setting this to true requires client to be not-null.
client *client.Client,
minReplica int32) ([]*api.Pod, error) {
pods := []*api.Pod{}
for _, pod := range podList {
if IsMirrorPod(pod) {
continue
}
daemonsetPod := false
replicated := false
sr, err := CreatorRef(pod)
if err != nil {
return []*api.Pod{}, fmt.Errorf("failed to obtain refkind: %v", err)
}
refKind := ""
if sr != nil {
refKind = sr.Reference.Kind
}
if refKind == "ReplicationController" {
if checkReferences {
rc, err := client.ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume a reason for an error is because the RC is either
// gone/missing or that the rc has too few replicas configured.
// TODO: replace the minReplica check with pod disruption budget.
if err == nil && rc != nil {
if rc.Spec.Replicas < minReplica {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
pod.Namespace, pod.Name, rc.Spec.Replicas, minReplica)
}
replicated = true
} else {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
}
} else {
replicated = true
}
} else if refKind == "DaemonSet" {
if checkReferences {
ds, err := client.DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the DaemonSet is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && ds != nil {
// Otherwise, treat daemonset-managed pods as unmanaged since
// DaemonSet Controller currently ignores the unschedulable bit.
// FIXME(mml): Add link to the issue concerning a proper way to drain
// daemonset pods, probably using taints.
daemonsetPod = true
} else {
return []*api.Pod{}, fmt.Errorf("deamonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err)
}
} else {
daemonsetPod = true
}
} else if refKind == "Job" {
if checkReferences {
job, err := client.ExtensionsClient.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the Job is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && job != nil {
replicated = true
} else {
return []*api.Pod{}, fmt.Errorf("job for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
}
} else {
replicated = true
}
} else if refKind == "ReplicaSet" {
if checkReferences {
rs, err := client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the RS is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && rs != nil {
if rs.Spec.Replicas < minReplica {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
pod.Namespace, pod.Name, rs.Spec.Replicas, minReplica)
}
replicated = true
} else {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
}
} else {
//.........这里部分代码省略.........
示例4: GetPodsForDeletionOnNodeDrain
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
// about possibly problematic pods (unreplicated and deamon sets).
func GetPodsForDeletionOnNodeDrain(client *client.Client, nodename string, decoder runtime.Decoder, removeUnderplicated bool,
ignoreDeamonSet bool) (pods []api.Pod, unreplicatedPodNames []string, daemonSetPodNames []string, finalError error) {
pods = []api.Pod{}
unreplicatedPodNames = []string{}
daemonSetPodNames = []string{}
podList, err := client.Pods(api.NamespaceAll).List(api.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodename})})
if err != nil {
return []api.Pod{}, []string{}, []string{}, err
}
for _, pod := range podList.Items {
_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
if found {
// Skip mirror pod
continue
}
replicated := false
daemonset_pod := false
creatorRef, found := pod.ObjectMeta.Annotations[controller.CreatedByAnnotation]
if found {
// Now verify that the specified creator actually exists.
var sr api.SerializedReference
if err := runtime.DecodeInto(decoder, []byte(creatorRef), &sr); err != nil {
return []api.Pod{}, []string{}, []string{}, err
}
if sr.Reference.Kind == "ReplicationController" {
rc, err := client.ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the RC is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && rc != nil {
replicated = true
}
} else if sr.Reference.Kind == "DaemonSet" {
ds, err := client.DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the DaemonSet is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && ds != nil {
// Otherwise, treat daemonset-managed pods as unmanaged since
// DaemonSet Controller currently ignores the unschedulable bit.
// FIXME(mml): Add link to the issue concerning a proper way to drain
// daemonset pods, probably using taints.
daemonset_pod = true
}
} else if sr.Reference.Kind == "Job" {
job, err := client.ExtensionsClient.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the Job is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && job != nil {
replicated = true
}
} else if sr.Reference.Kind == "ReplicaSet" {
rs, err := client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the RS is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && rs != nil {
replicated = true
}
}
}
switch {
case daemonset_pod:
daemonSetPodNames = append(daemonSetPodNames, pod.Name)
case !replicated:
unreplicatedPodNames = append(unreplicatedPodNames, pod.Name)
if removeUnderplicated {
pods = append(pods, pod)
}
default:
pods = append(pods, pod)
}
}
return pods, unreplicatedPodNames, daemonSetPodNames, nil
}