本文整理汇总了Golang中github.com/qingyuancloud/QingYuan/pkg/client.Client.ReplicationControllers方法的典型用法代码示例。如果您正苦于以下问题:Golang Client.ReplicationControllers方法的具体用法?Golang Client.ReplicationControllers怎么用?Golang Client.ReplicationControllers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/qingyuancloud/QingYuan/pkg/client.Client
的用法示例。
在下文中一共展示了Client.ReplicationControllers方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例2: verifyExpectedRcsExistAndGetExpectedPods
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
expectedPods := []string{}
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
if err != nil {
return nil, err
}
if len(rcList.Items) != 1 {
return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
rcLabel, len(rcList.Items))
}
for _, rc := range rcList.Items {
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil {
return nil, err
}
for _, pod := range podList.Items {
expectedPods = append(expectedPods, string(pod.UID))
}
}
}
return expectedPods, nil
}
示例3: CreateNewControllerFromCurrentController
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) {
// load the old RC into the "new" RC
newRc, err := c.ReplicationControllers(namespace).Get(oldName)
if err != nil {
return nil, err
}
if len(newRc.Spec.Template.Spec.Containers) > 1 {
// TODO: support multi-container image update.
return nil, goerrors.New("Image update is not supported for multi-container pods")
}
if len(newRc.Spec.Template.Spec.Containers) == 0 {
return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
}
newRc.Spec.Template.Spec.Containers[0].Image = image
newHash, err := api.HashObject(newRc, c.Codec)
if err != nil {
return nil, err
}
if len(newName) == 0 {
newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
}
newRc.Name = newName
newRc.Spec.Selector[deploymentKey] = newHash
newRc.Spec.Template.Labels[deploymentKey] = newHash
// Clear resource version after hashing so that identical updates get different hashes.
newRc.ResourceVersion = ""
return newRc, nil
}
示例4: LoadExistingNextReplicationController
func LoadExistingNextReplicationController(c *client.Client, namespace, newName string) (*api.ReplicationController, error) {
if len(newName) == 0 {
return nil, nil
}
newRc, err := c.ReplicationControllers(namespace).Get(newName)
if err != nil && errors.IsNotFound(err) {
return nil, nil
}
return newRc, err
}
示例5: StartRC
// StartRC creates given rc if it doesn't already exist, then updates it via qingctl's scaler.
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
if err != nil {
glog.Infof("Rc %v doesn't exist, creating", controller.Name)
created, err = restClient.ReplicationControllers(controller.Namespace).Create(controller)
if err != nil {
return nil, err
}
}
// If we just created an rc, wait till it creates its replicas.
return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
}
示例6: ScaleRC
// ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
scaler, err := qingctl.ScalerFor("ReplicationController", qingctl.NewScalerClient(restClient))
if err != nil {
return nil, err
}
retry := &qingctl.RetryParams{50 * time.Millisecond, DefaultTimeout}
waitForReplicas := &qingctl.RetryParams{50 * time.Millisecond, DefaultTimeout}
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
if err != nil {
return nil, err
}
scaled, err := restClient.ReplicationControllers(ns).Get(name)
if err != nil {
return nil, err
}
return scaled, nil
}
示例7: runReplicationControllerTest
func runReplicationControllerTest(c *client.Client) {
clientAPIVersion := c.APIVersion()
data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json")
if err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
var controller api.ReplicationController
if err := api.Scheme.DecodeInto(data, &controller); err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
glog.Infof("Creating replication controllers")
updated, err := c.ReplicationControllers("test").Create(&controller)
if err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
glog.Infof("Done creating replication controllers")
// In practice the controller doesn't need 60s to create a handful of pods, but network latencies on CI
// systems have been observed to vary unpredictably, so give the controller enough time to create pods.
// Our e2e scalability tests will catch controllers that are *actually* slow.
if err := wait.Poll(time.Second, time.Second*60, client.ControllerHasDesiredReplicas(c, updated)); err != nil {
glog.Fatalf("FAILED: pods never created %v", err)
}
// Poll till we can retrieve the status of all pods matching the given label selector from their minions.
// This involves 3 operations:
// - The scheduler must assign all pods to a minion
// - The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
// - We need to be able to query the qinglet on that minion for information about the pod
if err := wait.Poll(
time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
glog.Fatalf("FAILED: pods never started running %v", err)
}
glog.Infof("Pods created")
}
示例8:
nsForTesting, err := createTestingNS("density", c)
ns = nsForTesting.Name
expectNoError(err)
uuid = string(util.NewUUID())
expectNoError(resetMetrics(c))
expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777))
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "before"))
})
AfterEach(func() {
// Remove any remaining pods from this test if the
// replication controller still exists and the replica count
// isn't 0. This means the controller wasn't cleaned up
// during the test so clean it up here
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName)
expectNoError(err)
}
By("Removing additional pods if any")
for i := 1; i <= minionCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
if err := c.Namespaces().Delete(ns); err != nil {
Failf("Couldn't delete ns %s", err)
示例9: ServeImageOrFail
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(c *client.Client, test string, image string) {
ns := api.NamespaceDefault
name := "my-hostname-" + test + "-" + string(util.NewUUID())
replicas := 2
// Create a replication controller for a service
// that serves its hostname.
// The source for the Docker containter qingyuan/serve_hostname is
// in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.ReplicationControllerSpec{
Replicas: replicas,
Selector: map[string]string{
"name": name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
By("Cleaning up the replication controller")
rcReaper, err := qingctl.ReaperFor("ReplicationController", c)
if err != nil {
Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
}
if _, err = rcReaper.Stop(ns, controller.Name, 0, nil); err != nil {
Logf("Failed to stop replication controller %v: %v.", controller.Name, err)
}
}()
// List the pods, making sure we observe all the replicas.
listTimeout := time.Minute
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := c.Pods(ns).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred())
t := time.Now()
for {
Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas)
if len(pods.Items) == replicas {
break
}
if time.Since(t) > listTimeout {
Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds",
name, replicas, len(pods.Items), time.Since(t).Seconds())
}
time.Sleep(5 * time.Second)
pods, err = c.Pods(ns).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred())
}
By("Ensuring each pod is running")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
for _, pod := range pods.Items {
err = waitForPodRunning(c, pod.Name)
Expect(err).NotTo(HaveOccurred())
}
// Verify that something is listening.
By("Trying to dial each unique pod")
retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second
err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, true, pods}.checkAllResponses)
if err != nil {
Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
}
}