本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/deployment.GetOldReplicaSets函数的典型用法代码示例。如果您正苦于以下问题:Golang GetOldReplicaSets函数的具体用法?Golang GetOldReplicaSets怎么用?Golang GetOldReplicaSets使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GetOldReplicaSets函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: testDeploymentLabelAdopted
func testDeploymentLabelAdopted(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
podName := "nginx"
podLabels := map[string]string{"name": podName}
rsName := "test-adopted-controller"
replicas := 3
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, podLabels, podName, podName))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, podName, false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a nginx deployment to adopt the old rs.
deploymentName := "test-adopted-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, podName, podName, extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", "nginx")
Expect(err).NotTo(HaveOccurred())
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// There should be no old RSs (overlapping RS)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
oldRSs, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(len(oldRSs)).Should(Equal(0))
Expect(len(allOldRSs)).Should(Equal(0))
// New RS should contain pod-template-hash in its selector, label, and template label
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(len(newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(newRS.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(newRS.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := api.ListOptions{LabelSelector: selector}
pods, err := c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
Expect(len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
}
Expect(len(pods.Items)).Should(Equal(replicas))
}
示例2: History
// History returns a revision-to-replicaset map as the revision history of a deployment
func (h *DeploymentHistoryViewer) History(namespace, name string) (HistoryInfo, error) {
historyInfo := HistoryInfo{
RevisionToTemplate: make(map[int64]*api.PodTemplateSpec),
}
deployment, err := h.c.Extensions().Deployments(namespace).Get(name)
if err != nil {
return historyInfo, fmt.Errorf("failed to retrieve deployment %s: %v", name, err)
}
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(*deployment, h.c)
if err != nil {
return historyInfo, fmt.Errorf("failed to retrieve old replica sets from deployment %s: %v", name, err)
}
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, h.c)
if err != nil {
return historyInfo, fmt.Errorf("failed to retrieve new replica set from deployment %s: %v", name, err)
}
allRSs := append(allOldRSs, newRS)
for _, rs := range allRSs {
v, err := deploymentutil.Revision(rs)
if err != nil {
continue
}
historyInfo.RevisionToTemplate[v] = rs.Spec.Template
changeCause := getChangeCause(rs)
if historyInfo.RevisionToTemplate[v].Annotations == nil {
historyInfo.RevisionToTemplate[v].Annotations = make(map[string]string)
}
historyInfo.RevisionToTemplate[v].Annotations[ChangeCauseAnnotation] = changeCause
}
return historyInfo, nil
}
示例3: testRollingUpdateDeployment
func testRollingUpdateDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rsPodLabels := map[string]string{
"name": "sample-pod",
"pod": nginxImageName,
}
rsName := "test-rolling-update-controller"
replicas := 3
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
// Wait for it to be updated to revision 1
err = waitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted)
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(len(allOldRSs)).Should(Equal(1))
// The old RS should contain pod-template-hash in its selector, label, and template label
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
}
示例4: testPausedDeployment
func testPausedDeployment(f *framework.Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
deploymentName := "test-paused-deployment"
podLabels := map[string]string{"name": nginxImageName}
d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
d.Spec.Paused = true
tgps := int64(20)
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps
framework.Logf("Creating paused deployment %s", deploymentName)
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer stopDeployment(c, f.Client, ns, deploymentName)
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that there is no latest state realized for the new deployment.
rs, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
if rs != nil {
err = fmt.Errorf("unexpected new rs/%s for deployment/%s", rs.Name, deployment.Name)
Expect(err).NotTo(HaveOccurred())
}
// Update the deployment to run
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Paused = false
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the resume.
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
opts := api.ListOptions{LabelSelector: selector}
w, err := c.Extensions().ReplicaSets(ns).Watch(opts)
Expect(err).NotTo(HaveOccurred())
select {
case <-w.ResultChan():
// this is it
case <-time.After(time.Minute):
err = fmt.Errorf("expected a new replica set to be created")
Expect(err).NotTo(HaveOccurred())
}
// Pause the deployment and delete the replica set.
// The paused deployment shouldn't recreate a new one.
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Paused = true
})
Expect(err).NotTo(HaveOccurred())
// Use observedGeneration to determine if the controller noticed the pause.
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
// Update the deployment template - the new replicaset should stay the same
framework.Logf("Updating paused deployment %q", deploymentName)
newTGPS := int64(40)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.TerminationGracePeriodSeconds = &newTGPS
})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Looking for new replicaset for paused deployment %q (there should be none)", deploymentName)
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
if newRS != nil {
err = fmt.Errorf("No replica set should match the deployment template but there is %q", newRS.Name)
Expect(err).NotTo(HaveOccurred())
}
_, allOldRs, err := deploymentutil.GetOldReplicaSets(deployment, c)
Expect(err).NotTo(HaveOccurred())
if len(allOldRs) != 1 {
err = fmt.Errorf("expected an old replica set")
Expect(err).NotTo(HaveOccurred())
}
framework.Logf("Comparing deployment diff with old replica set %q", allOldRs[0].Name)
if *allOldRs[0].Spec.Template.Spec.TerminationGracePeriodSeconds == newTGPS {
err = fmt.Errorf("TerminationGracePeriodSeconds on the replica set should be %d but is %d", tgps, newTGPS)
Expect(err).NotTo(HaveOccurred())
}
}
示例5: ViewHistory
// ViewHistory prints the revision history of a deployment
func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) {
deployment, err := h.c.Extensions().Deployments(namespace).Get(name)
if err != nil {
return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err)
}
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, h.c)
if err != nil {
return "", fmt.Errorf("failed to retrieve old replica sets from deployment %s: %v", name, err)
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, h.c)
if err != nil {
return "", fmt.Errorf("failed to retrieve new replica set from deployment %s: %v", name, err)
}
historyInfo := make(map[int64]*api.PodTemplateSpec)
for _, rs := range append(allOldRSs, newRS) {
v, err := deploymentutil.Revision(rs)
if err != nil {
continue
}
historyInfo[v] = &rs.Spec.Template
changeCause := getChangeCause(rs)
if historyInfo[v].Annotations == nil {
historyInfo[v].Annotations = make(map[string]string)
}
if len(changeCause) > 0 {
historyInfo[v].Annotations[ChangeCauseAnnotation] = changeCause
}
}
if len(historyInfo) == 0 {
return "No rollout history found.", nil
}
if revision > 0 {
// Print details of a specific revision
template, ok := historyInfo[revision]
if !ok {
return "", fmt.Errorf("unable to find the specified revision")
}
buf := bytes.NewBuffer([]byte{})
DescribePodTemplate(template, buf)
return buf.String(), nil
}
// Sort the revisionToChangeCause map by revision
var revisions []string
for k := range historyInfo {
revisions = append(revisions, strconv.FormatInt(k, 10))
}
sort.Strings(revisions)
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n")
errs := []error{}
for _, r := range revisions {
// Find the change-cause of revision r
r64, err := strconv.ParseInt(r, 10, 64)
if err != nil {
errs = append(errs, err)
continue
}
changeCause := historyInfo[r64].Annotations[ChangeCauseAnnotation]
if len(changeCause) == 0 {
changeCause = "<none>"
}
fmt.Fprintf(out, "%s\t%s\n", r, changeCause)
}
return errors.NewAggregate(errs)
})
}
示例6: testRollbackDeploymentRSNoRevision
// testRollbackDeploymentRSNoRevision tests that deployment supports rollback even when there's old replica set without revision.
// An old replica set without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
// annotation to the old replica set. Then rollback the deployment to last revision, and it should fail and emit related event.
// Then update the deployment to v2 and rollback it to v1 should succeed and emit related event, now the deployment
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail and emit related event.
// Finally, rollback the deployment (v3) to v3 should be no-op and emit related event.
func testRollbackDeploymentRSNoRevision(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(f.Client)
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
"name": podName,
"pod": "nginx",
}
rsName := "nginx-controller"
rsReplicas := 0
rs := newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx")
rs.Annotations = make(map[string]string)
rs.Annotations["make"] = "difference"
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Create a deployment to create nginx pods, which have different template than the replica set created above.
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
deploymentReplicas := 1
deploymentImage := "nginx"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName)
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
_, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRS := range oldRSs {
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
}
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// DeploymentStatus should be appropriately updated.
Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
Expect(deployment.Status.UpdatedReplicas).Should(Equal(deploymentReplicas))
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Check that the replica set we created still doesn't contain revision information
rs, err = c.Extensions().ReplicaSets(ns).Get(rsName)
Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
// Update the deploymentRollback to rollback to last revision
// Since there's only 1 revision in history, it should stay as revision 1
revision := int64(0)
Logf("rolling back deployment %s to last revision", deploymentName)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// There should be revision not found event since there's no last revision
waitForEvents(unversionedClient, ns, deployment, 2)
events, err := c.Events(ns).Search(deployment)
Expect(err).NotTo(HaveOccurred())
Expect(events.Items[1].Reason).Should(Equal(deploymentutil.RollbackRevisionNotFound))
// Check if it's still revision 1
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Update the deployment to create redis pods.
updatedDeploymentImage := "redis"
updatedDeploymentImageName := "redis"
d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
Logf("updating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Update(d)
//.........这里部分代码省略.........
示例7: testRollbackDeployment
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
// and then rollback to last revision.
func testRollbackDeployment(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
// Create a deployment to create nginx pods.
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
deploymentReplicas := 1
deploymentImage := "nginx"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName)
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRS := range oldRSs {
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
}
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// DeploymentStatus should be appropriately updated.
Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
Expect(deployment.Status.UpdatedReplicas).Should(Equal(deploymentReplicas))
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Update the deployment to create redis pods.
updatedDeploymentImage := "redis"
updatedDeploymentImageName := "redis"
d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
Logf("updating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Update(d)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 2 correctly
checkDeploymentRevision(c, ns, deploymentName, "2", updatedDeploymentImageName, updatedDeploymentImage)
// Update the deploymentRollback to rollback to revision 1
revision := int64(1)
Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 3 correctly
checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage)
// Update the deploymentRollback to rollback to last revision
revision = 0
Logf("rolling back deployment %s to last revision", deploymentName)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 4 correctly
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
}