本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_2.FromUnversionedClient函数的典型用法代码示例。如果您正苦于以下问题:Golang FromUnversionedClient函数的具体用法?Golang FromUnversionedClient怎么用?Golang FromUnversionedClient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FromUnversionedClient函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
config := HollowNodeConfig{}
config.addFlags(pflag.CommandLine)
util.InitFlags()
if !knownMorphs.Has(config.Morph) {
glog.Fatal("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List())
}
// create a client to communicate with API server.
cl, err := createClientFromFile(config.KubeconfigPath)
clientset := clientset.FromUnversionedClient(cl)
if err != nil {
glog.Fatal("Failed to create a Client. Exiting.")
}
if config.Morph == "kubelet" {
cadvisorInterface := new(cadvisor.Fake)
containerManager := cm.NewStubContainerManager()
fakeDockerClient := dockertools.NewFakeDockerClient()
fakeDockerClient.VersionInfo = docker.Env{"Version=1.1.3", "ApiVersion=1.18"}
fakeDockerClient.EnableSleep = true
hollowKubelet := kubemark.NewHollowKubelet(
config.NodeName,
clientset,
cadvisorInterface,
fakeDockerClient,
config.KubeletPort,
config.KubeletReadOnlyPort,
containerManager,
)
hollowKubelet.Run()
}
if config.Morph == "proxy" {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName})
iptInterface := fakeiptables.NewFake()
serviceConfig := proxyconfig.NewServiceConfig()
serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
endpointsConfig := proxyconfig.NewEndpointsConfig()
endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{})
hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder)
hollowProxy.Run()
}
}
示例2: beforeEach
// beforeEach gets a client and makes a namespace.
func (f *Framework) beforeEach() {
// The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.afterEach)
By("Creating a kubernetes client")
config, err := loadConfig()
Expect(err).NotTo(HaveOccurred())
config.QPS = f.options.clientQPS
config.Burst = f.options.clientBurst
c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred())
f.Client = c
f.Clientset_1_2 = release_1_2.FromUnversionedClient(c)
By("Building a namespace api object")
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
Expect(err).NotTo(HaveOccurred())
f.Namespace = namespace
if testContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace")
err = waitForDefaultServiceAccountInNamespace(c, namespace.Name)
Expect(err).NotTo(HaveOccurred())
} else {
Logf("Skipping waiting for service account")
}
if testContext.GatherKubeSystemResourceUsageData {
f.gatherer, err = NewResourceUsageGatherer(c)
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.startGatheringData()
}
}
if testContext.GatherLogsSizes {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(c, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()
}()
}
}
示例3: testRollingUpdateDeployment
func testRollingUpdateDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rcPodLabels := map[string]string{
"name": "sample-pod",
"pod": "nginx",
}
rcName := "nginx-controller"
replicas := 3
_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "redis-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", "redis", "redis")
}
示例4: testNewDeployment
func testNewDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(f.Client)
deploymentName := "nginx-deployment"
podLabels := map[string]string{"name": "nginx"}
replicas := 1
Logf("Creating simple deployment %s", deploymentName)
d := newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil)
d.Annotations = map[string]string{"test": "should-copy-to-RC", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-RC"}
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "nginx", false, replicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// DeploymentStatus should be appropriately updated.
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Expect(deployment.Status.Replicas).Should(Equal(replicas))
Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas))
// Check if it's updated to revision 1 correctly
_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", "nginx", "nginx")
// Check other annotations
Expect(newRC.Annotations["test"]).Should(Equal("should-copy-to-RC"))
Expect(newRC.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal(""))
Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-RC"))
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-RC"))
}
示例5: testDeploymentCleanUpPolicy
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rcPodLabels := map[string]string{
"name": "cleanup-pod",
"pod": "nginx",
}
rcName := "nginx-controller"
replicas := 1
revisionHistoryLimit := new(int)
*revisionHistoryLimit = 0
_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "redis-deployment"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentOldRCsNum(c, ns, deploymentName, *revisionHistoryLimit)
Expect(err).NotTo(HaveOccurred())
}
示例6: beforeEach
// beforeEach gets a client and makes a namespace.
func (f *Framework) beforeEach() {
By("Creating a kubernetes client")
c, err := loadClient()
Expect(err).NotTo(HaveOccurred())
f.Client = c
f.Clientset_1_2 = release_1_2.FromUnversionedClient(c)
By("Building a namespace api object")
namespace, err := f.nsCreateFunc(f.BaseName, f.Client, map[string]string{
"e2e-framework": f.BaseName,
})
Expect(err).NotTo(HaveOccurred())
f.Namespace = namespace
if testContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace")
err = waitForDefaultServiceAccountInNamespace(c, namespace.Name)
Expect(err).NotTo(HaveOccurred())
} else {
Logf("Skipping waiting for service account")
}
if testContext.GatherKubeSystemResourceUsageData {
f.gatherer.startGatheringData(c, resourceDataGatheringPeriodSeconds*time.Second)
}
if testContext.GatherLogsSizes {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(c, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()
}()
}
}
示例7: testRollbackDeploymentRCNoRevision
// testRollbackDeploymentRCNoRevision tests that deployment supports rollback even when there's old RC without revision.
// An old RC without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
// annotation to the old RC. Then rollback the deployment to last revision, and it should fail and emit related event.
// Then update the deployment to v2 and rollback it to v1 should succeed and emit related event, now the deployment
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail and emit related event.
// Finally, rollback the deployment (v3) to v3 should be no-op and emit related event.
func testRollbackDeploymentRCNoRevision(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(f.Client)
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
rcPodLabels := map[string]string{
"name": podName,
"pod": "nginx",
}
rcName := "nginx-controller"
rcReplicas := 0
rc := newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx")
rc.Annotations = make(map[string]string)
rc.Annotations["make"] = "difference"
_, err := c.Legacy().ReplicationControllers(ns).Create(rc)
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
}()
// Create a deployment to create nginx pods, which have different template than the rc created above.
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
deploymentReplicas := 1
deploymentImage := "nginx"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName)
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
_, err = c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRC := range oldRCs {
Expect(c.Legacy().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
}
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// DeploymentStatus should be appropriately updated.
Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
Expect(deployment.Status.UpdatedReplicas).Should(Equal(deploymentReplicas))
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Check that the rc we created still doesn't contain revision information
rc, err = c.Legacy().ReplicationControllers(ns).Get(rcName)
Expect(rc.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
// Update the deploymentRollback to rollback to last revision
// Since there's only 1 revision in history, it should stay as revision 1
revision := int64(0)
Logf("rolling back deployment %s to last revision", deploymentName)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
// There should be revision not found event since there's no last revision
waitForEvents(unversionedClient, ns, deployment, 2)
events, err := c.Events(ns).Search(deployment)
Expect(err).NotTo(HaveOccurred())
Expect(events.Items[1].Reason).Should(Equal(deploymentutil.RollbackRevisionNotFound))
// Check if it's still revision 1
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Update the deployment to create redis pods.
updatedDeploymentImage := "redis"
updatedDeploymentImageName := "redis"
d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
Logf("updating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Update(d)
//.........这里部分代码省略.........
示例8: testRollbackDeployment
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
// and then rollback to last revision.
func testRollbackDeployment(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
// Create a deployment to create nginx pods.
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
deploymentReplicas := 1
deploymentImage := "nginx"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName)
d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRC := range oldRCs {
Expect(c.Legacy().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
}
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// DeploymentStatus should be appropriately updated.
Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
Expect(deployment.Status.UpdatedReplicas).Should(Equal(deploymentReplicas))
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Update the deployment to create redis pods.
updatedDeploymentImage := "redis"
updatedDeploymentImageName := "redis"
d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
Logf("updating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Update(d)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 2 correctly
checkDeploymentRevision(c, ns, deploymentName, "2", updatedDeploymentImageName, updatedDeploymentImage)
// Update the deploymentRollback to rollback to revision 1
revision := int64(1)
Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 3 correctly
checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage)
// Update the deploymentRollback to rollback to last revision
revision = 0
Logf("rolling back deployment %s to last revision", deploymentName)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.Extensions().Deployments(ns).Rollback(rollback)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 4 correctly
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
}
示例9: testPausedDeployment
func testPausedDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
deploymentName := "nginx"
podLabels := map[string]string{"name": "nginx"}
d := newDeployment(deploymentName, 1, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil)
d.Spec.Paused = true
Logf("Creating paused deployment %s", deploymentName)
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
_, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Verify that there is no latest state realized for the new deployment.
rc, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
if rc != nil {
err = fmt.Errorf("unexpected new rc/%s for deployment/%s", rc.Name, deployment.Name)
Expect(err).NotTo(HaveOccurred())
}
// Update the deployment to run
deployment.Spec.Paused = false
deployment, err = c.Extensions().Deployments(ns).Update(deployment)
Expect(err).NotTo(HaveOccurred())
opts := api.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector()}
w, err := c.Legacy().ReplicationControllers(ns).Watch(opts)
Expect(err).NotTo(HaveOccurred())
select {
case <-w.ResultChan():
// this is it
case <-time.After(time.Minute):
err = fmt.Errorf("expected a new rc to be created")
Expect(err).NotTo(HaveOccurred())
}
// Pause the deployment and delete the replication controller.
// The paused deployment shouldn't recreate a new one.
deployment.Spec.Paused = true
deployment.ResourceVersion = ""
deployment, err = c.Extensions().Deployments(ns).Update(deployment)
Expect(err).NotTo(HaveOccurred())
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
if !deployment.Spec.Paused {
err = fmt.Errorf("deployment %q should be paused", deployment.Name)
Expect(err).NotTo(HaveOccurred())
}
shouldBeNil, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
if shouldBeNil != nil {
err = fmt.Errorf("deployment %q shouldn't have a rc but there is %q", deployment.Name, shouldBeNil.Name)
Expect(err).NotTo(HaveOccurred())
}
}
示例10: testRolloverDeployment
// testRolloverDeployment tests that deployment supports rollover.
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
func testRolloverDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
podName := "rollover-pod"
deploymentPodLabels := map[string]string{"name": podName}
rcPodLabels := map[string]string{
"name": podName,
"pod": "nginx",
}
rcName := "nginx-controller"
rcReplicas := 4
_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, podName, false, rcReplicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
deploymentName, deploymentImageName := "redis-deployment", "redis-slave"
deploymentReplicas := 4
deploymentImage := "gcr.io/google_samples/gb-redisslave:v1"
deploymentMinReadySeconds := 5
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
Logf("Creating deployment %s", deploymentName)
newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
newDeployment.Spec.MinReadySeconds = deploymentMinReadySeconds
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
MaxUnavailable: intstr.FromInt(1),
MaxSurge: intstr.FromInt(1),
}
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
// Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Make sure the deployment starts to scale up and down RCs
waitForPartialEvents(unversionedClient, ns, deployment, 2)
// Check if it's updated to revision 1 correctly
_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Before the deployment finishes, update the deployment to rollover the above 2 rcs and bring up redis pods.
// If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it.
Expect(newRC.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
updatedDeploymentImage := "redis"
newDeployment.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImage
newDeployment.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
Logf("updating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Update(newDeployment)
Expect(err).NotTo(HaveOccurred())
err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds)
Expect(err).NotTo(HaveOccurred())
// Check if it's updated to revision 2 correctly
checkDeploymentRevision(c, ns, deploymentName, "2", updatedDeploymentImage, updatedDeploymentImage)
}
示例11: testRecreateDeployment
func testRecreateDeployment(f *Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
rcPodLabels := map[string]string{
"name": "sample-pod-3",
"pod": "nginx",
}
rcName := "nginx-controller"
replicas := 3
_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "redis-deployment-3"
Logf("Creating deployment %s", deploymentName)
_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil))
Expect(err).NotTo(HaveOccurred())
defer func() {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
if err != nil {
deployment, _ := c.Extensions().Deployments(ns).Get(deploymentName)
Logf("deployment = %+v", deployment)
}
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
waitForEvents(unversionedClient, ns, deployment, 2)
events, err := c.Legacy().Events(ns).Search(deployment)
if err != nil {
Logf("error in listing events: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// There should be 2 events, one to scale up the new RC and then to scale down the old RC.
Expect(len(events.Items)).Should(Equal(2))
newRC, err := deploymentutil.GetNewRC(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(newRC).NotTo(Equal(nil))
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName)))
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 3", newRC.Name)))
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", "redis", "redis")
}
示例12: NewFactory
//.........这里部分代码省略.........
if !t.Spec.Paused {
return true, nil
}
t.Spec.Paused = false
_, err := c.Extensions().Deployments(t.Namespace).Update(t)
return false, err
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return false, err
}
return false, fmt.Errorf("cannot resume %v", gvk)
}
},
Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
},
Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
},
HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
clientset := clientset.FromUnversionedClient(client)
if err != nil {
return nil, err
}
return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
},
Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
},
Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
if validate {
client, err := clients.ClientForVersion(nil)
if err != nil {
return nil, err
}
dir := cacheDir
if len(dir) > 0 {
version, err := client.ServerVersion()
if err != nil {
return nil, err
}
dir = path.Join(cacheDir, version.String())
}
return &clientSwaggerSchema{
c: client,
cacheDir: dir,
mapper: api.RESTMapper,
}, nil