本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.Interface.Apps方法的典型用法代码示例。如果您正苦于以下问题:Golang Interface.Apps方法的具体用法?Golang Interface.Apps怎么用?Golang Interface.Apps使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.Interface
的用法示例。
在下文中一共展示了Interface.Apps方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: newStatefulSetInformer
func newStatefulSetInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
var internalOptions api.ListOptions
if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
return nil, err
}
return client.Apps().StatefulSets(api.NamespaceAll).List(internalOptions)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
var internalOptions api.ListOptions
if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
return nil, err
}
return client.Apps().StatefulSets(api.NamespaceAll).Watch(internalOptions)
},
},
&apps.StatefulSet{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
return sharedIndexInformer
}
示例2: ReaperFor
func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, error) {
switch kind {
case api.Kind("ReplicationController"):
return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil
case extensions.Kind("ReplicaSet"):
return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil
case extensions.Kind("DaemonSet"):
return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil
case api.Kind("Pod"):
return &PodReaper{c.Core()}, nil
case api.Kind("Service"):
return &ServiceReaper{c.Core()}, nil
case extensions.Kind("Job"), batch.Kind("Job"):
return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil
case apps.Kind("StatefulSet"):
return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil
case extensions.Kind("Deployment"):
return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil
}
return nil, &NoSuchReaperError{kind}
}
示例3: ScalerFor
func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) {
switch kind {
case api.Kind("ReplicationController"):
return &ReplicationControllerScaler{c.Core()}, nil
case extensions.Kind("ReplicaSet"):
return &ReplicaSetScaler{c.Extensions()}, nil
case extensions.Kind("Job"), batch.Kind("Job"):
return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
case apps.Kind("StatefulSet"):
return &StatefulSetScaler{c.Apps()}, nil
case extensions.Kind("Deployment"):
return &DeploymentScaler{c.Extensions()}, nil
}
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
示例4: toStatefulSetPodController
func toStatefulSetPodController(client k8sClient.Interface, reference api.ObjectReference, pods []api.Pod, events []api.Event, heapsterClient client.HeapsterClient) (*Controller, error) {
statefulset, err := client.Apps().StatefulSets(reference.Namespace).Get(reference.Name)
if err != nil {
return nil, err
}
statefulsets := []apps.StatefulSet{*statefulset}
statefulSetList := statefulsetlist.CreateStatefulSetList(statefulsets, pods, events, dataselect.StdMetricsDataSelect, &heapsterClient)
return &Controller{
Kind: "StatefulSet",
StatefulSetList: statefulSetList,
}, nil
}
示例5: GetStatefulSetListChannel
// GetStatefulSetListChannel returns a pair of channels to a StatefulSet list and errors that
// both must be read numReads times.
func GetStatefulSetListChannel(client client.Interface,
nsQuery *NamespaceQuery, numReads int) StatefulSetListChannel {
channel := StatefulSetListChannel{
List: make(chan *apps.StatefulSetList, numReads),
Error: make(chan error, numReads),
}
go func() {
statefulSets, err := client.Apps().StatefulSets(nsQuery.ToRequestParam()).List(listEverything)
var filteredItems []apps.StatefulSet
for _, item := range statefulSets.Items {
if nsQuery.Matches(item.ObjectMeta.Namespace) {
filteredItems = append(filteredItems, item)
}
}
statefulSets.Items = filteredItems
for i := 0; i < numReads; i++ {
channel.List <- statefulSets
channel.Error <- err
}
}()
return channel
}
示例6: deleteAllStatefulSets
func deleteAllStatefulSets(c clientset.Interface, ns string) {
pst := &statefulSetTester{c: c}
psList, err := c.Apps().StatefulSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
ExpectNoError(err)
// Scale down each statefulset, then delete it completely.
// Deleting a pvc without doing this will leak volumes, #25101.
errList := []string{}
for _, ps := range psList.Items {
framework.Logf("Scaling statefulset %v to 0", ps.Name)
if err := pst.scale(&ps, 0); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
pst.waitForStatus(&ps, 0)
framework.Logf("Deleting statefulset %v", ps.Name)
if err := c.Apps().StatefulSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
}
// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a statefulset
pvcPollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
pvcList, err := c.Core().PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil {
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil
}
for _, pvc := range pvcList.Items {
pvNames.Insert(pvc.Spec.VolumeName)
// TODO: Double check that there are no pods referencing the pvc
framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
if err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
return false, nil
}
}
return true, nil
})
if pvcPollErr != nil {
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
}
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
pvList, err := c.Core().PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil {
framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil
}
waitingFor := []string{}
for _, pv := range pvList.Items {
if pvNames.Has(pv.Name) {
waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
}
}
if len(waitingFor) == 0 {
return true, nil
}
framework.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
return false, nil
})
if pollErr != nil {
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
}
if len(errList) != 0 {
ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
}
}
示例7:
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
dumpDebugInfo(c, ns)
}
framework.Logf("Deleting all statefulset in ns %v", ns)
deleteAllStatefulSets(c, ns)
})
It("should provide basic identity [Feature:StatefulSet]", func() {
By("creating statefulset " + psName + " in namespace " + ns)
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
setInitializedAnnotation(ps, "false")
_, err := c.Apps().StatefulSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
pst := statefulSetTester{c: c}
By("Saturating pet set " + ps.Name)
pst.saturate(ps)
By("Verifying statefulset mounted data directory is usable")
ExpectNoError(pst.checkMount(ps, "/data"))
By("Verifying statefulset provides a stable hostname for each pod")
ExpectNoError(pst.checkHostname(ps))
cmd := "echo $(hostname) > /data/hostname; sync;"
By("Running " + cmd + " in all pets")
示例8: verifyExpectedRcsExistAndGetExpectedPods
func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, error) {
expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
options := api.ListOptions{LabelSelector: selector}
deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
rcList, err := c.Core().ReplicationControllers(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
psList, err := c.Apps().StatefulSets(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
rcLabel, len(rcList.Items))
}
// Check all the replication controllers.
for _, rc := range rcList.Items {
selector := labels.Set(rc.Spec.Selector).AsSelector()
options := api.ListOptions{LabelSelector: selector}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, string(pod.UID))
}
}
// Do the same for all deployments.
for _, rc := range deploymentList.Items {
selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
options := api.ListOptions{LabelSelector: selector}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, string(pod.UID))
}
}
// And for pet sets.
for _, ps := range psList.Items {
selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
options := api.ListOptions{LabelSelector: selector}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, string(pod.UID))
}
}
}
return expectedPods, nil
}
示例9:
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
dumpDebugInfo(c, ns)
}
framework.Logf("Deleting all petset in ns %v", ns)
deleteAllPetSets(c, ns)
})
It("should provide basic identity [Feature:PetSet]", func() {
By("creating petset " + psName + " in namespace " + ns)
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.Apps().PetSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
pst := petSetTester{c: c}
By("Saturating pet set " + ps.Name)
pst.saturate(ps)
By("Verifying petset mounted data directory is usable")
ExpectNoError(pst.checkMount(ps, "/data"))
cmd := "echo $(hostname) > /data/hostname; sync;"
By("Running " + cmd + " in all pets")
ExpectNoError(pst.execInPets(ps, cmd))
By("Restarting pet set " + ps.Name)