本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.StartPods函数的典型用法代码示例。如果您正苦于以下问题:Golang StartPods函数的具体用法?Golang StartPods怎么用?Golang StartPods使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了StartPods函数的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: SpreadServiceOrFail
// Check that the pods comprising a service get spread evenly across available zones
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
// First create the service
serviceName := "test-service"
serviceSpec := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
Namespace: f.Namespace.Name,
},
Spec: api.ServiceSpec{
Selector: map[string]string{
"service": serviceName,
},
Ports: []api.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
_, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec)
Expect(err).NotTo(HaveOccurred())
// Now create some pods behind the service
podSpec := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
Labels: map[string]string{"service": serviceName},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test",
Image: framework.GetPauseImageName(f.Client),
},
},
},
}
// Caution: StartPods requires at least one pod to replicate.
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
// test for replicaCount > 0. Otherwise, StartPods panics.
framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
// Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.Client)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
}
示例2: SpreadServiceOrFail
// Check that the pods comprising a service get spread evenly across available zones
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
// First create the service
serviceName := "test-service"
serviceSpec := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
Namespace: f.Namespace.Name,
},
Spec: api.ServiceSpec{
Selector: map[string]string{
"service": serviceName,
},
Ports: []api.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
_, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec)
Expect(err).NotTo(HaveOccurred())
// Now create some pods behind the service
podSpec := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
Labels: map[string]string{"service": serviceName},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test",
Image: "gcr.io/google_containers/pause-amd64:3.0",
},
},
},
}
framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
// Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.Client)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
}
示例3:
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are satured
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: "",
Labels: map[string]string{"name": ""},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "",
Image: framework.GetPauseImageName(f.Client),
},
},
},
}, true)
}
podName := "additional-pod"
_, err := c.Pods(ns).Create(&api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
示例4:
totalPodCapacity += podCapacity.Value()
}
currentlyScheduledPods := framework.WaitForStableCluster(c, masterNodes)
podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
// As the pods are distributed randomly among nodes,
// it can easily happen that all nodes are satured
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
framework.StartPods(c, podsNeededForSaturation, ns, "maxp",
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
}), true)
}
podName := "additional-pod"
createPausePod(f, pausePodConfig{
Name: podName,
Labels: map[string]string{"name": "additional"},
})
waitForScheduler()
verifyResult(c, podsNeededForSaturation, 1, ns)
})
// This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity.
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.