本文整理汇总了Golang中github.com/openshift/origin/test/extended/util.CLI.SetOutputDir方法的典型用法代码示例。如果您正苦于以下问题:Golang CLI.SetOutputDir方法的具体用法?Golang CLI.SetOutputDir怎么用?Golang CLI.SetOutputDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/openshift/origin/test/extended/util.CLI
的用法示例。
在下文中一共展示了CLI.SetOutputDir方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: testPruneImages
func testPruneImages(oc *exutil.CLI, schemaVersion int) {
var mediaType string
switch schemaVersion {
case 1:
mediaType = schema1.MediaTypeManifest
case 2:
mediaType = schema2.MediaTypeManifest
default:
g.Fail(fmt.Sprintf("unexpected schema version %d", schemaVersion))
}
oc.SetOutputDir(exutil.TestContext.OutputDir)
outSink := g.GinkgoWriter
cleanUp := cleanUpContainer{}
defer tearDownPruneImagesTest(oc, &cleanUp)
dClient, err := testutil.NewDockerClient()
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("build two images using Docker and push them as schema %d", schemaVersion))
imgPruneName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
o.Expect(err).NotTo(o.HaveOccurred())
cleanUp.imageNames = append(cleanUp.imageNames, imgPruneName)
pruneSize, err := getRegistryStorageSize(oc)
o.Expect(err).NotTo(o.HaveOccurred())
imgKeepName, err := BuildAndPushImageOfSizeWithDocker(oc, dClient, "prune", "latest", testImageSize, 2, outSink, true)
o.Expect(err).NotTo(o.HaveOccurred())
cleanUp.imageNames = append(cleanUp.imageNames, imgKeepName)
keepSize, err := getRegistryStorageSize(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pruneSize < keepSize).To(o.BeTrue())
g.By(fmt.Sprintf("ensure uploaded image is of schema %d", schemaVersion))
imgPrune, err := oc.AsAdmin().Client().Images().Get(imgPruneName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(imgPrune.DockerImageManifestMediaType).To(o.Equal(mediaType))
imgKeep, err := oc.AsAdmin().Client().Images().Get(imgKeepName)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(imgKeep.DockerImageManifestMediaType).To(o.Equal(mediaType))
g.By("prune the first image uploaded (dry-run)")
output, err := oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0").Output()
g.By("verify images, layers and configs about to be pruned")
o.Expect(output).To(o.ContainSubstring(imgPruneName))
if schemaVersion == 1 {
o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
} else {
o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
}
for _, layer := range imgPrune.DockerImageLayers {
if !strings.Contains(output, layer.Name) {
o.Expect(output).To(o.ContainSubstring(layer.Name))
}
}
o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
for _, layer := range imgKeep.DockerImageLayers {
if !strings.Contains(output, layer.Name) {
o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
}
}
noConfirmSize, err := getRegistryStorageSize(oc)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(noConfirmSize).To(o.Equal(keepSize))
g.By("prune the first image uploaded (confirm)")
output, err = oc.WithoutNamespace().Run("adm").Args("prune", "images", "--keep-tag-revisions=1", "--keep-younger-than=0", "--confirm").Output()
g.By("verify images, layers and configs about to be pruned")
o.Expect(output).To(o.ContainSubstring(imgPruneName))
if schemaVersion == 1 {
o.Expect(output).NotTo(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
} else {
o.Expect(output).To(o.ContainSubstring(imgPrune.DockerImageMetadata.ID))
}
for _, layer := range imgPrune.DockerImageLayers {
if !strings.Contains(output, layer.Name) {
o.Expect(output).To(o.ContainSubstring(layer.Name))
}
}
o.Expect(output).NotTo(o.ContainSubstring(imgKeepName))
o.Expect(output).NotTo(o.ContainSubstring(imgKeep.DockerImageMetadata.ID))
for _, layer := range imgKeep.DockerImageLayers {
if !strings.Contains(output, layer.Name) {
o.Expect(output).NotTo(o.ContainSubstring(layer.Name))
}
}
confirmSize, err := getRegistryStorageSize(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("confirming storage size: sizeOfKeepImage=%d <= sizeAfterPrune=%d < beforePruneSize=%d", imgKeep.DockerImageMetadata.Size, confirmSize, keepSize))
o.Expect(confirmSize >= imgKeep.DockerImageMetadata.Size).To(o.BeTrue())
o.Expect(confirmSize < keepSize).To(o.BeTrue())
g.By(fmt.Sprintf("confirming pruned size: sizeOfPruneImage=%d <= (sizeAfterPrune=%d - sizeBeforePrune=%d)", imgPrune, keepSize, confirmSize))
o.Expect(imgPrune.DockerImageMetadata.Size <= keepSize-confirmSize).To(o.BeTrue())
//.........这里部分代码省略.........
示例2: replicationTestFactory
func replicationTestFactory(oc *exutil.CLI, template string) func() {
return func() {
oc.SetOutputDir(exutil.TestContext.OutputDir)
defer cleanup(oc)
_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
o.Expect(err).NotTo(o.HaveOccurred())
err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", "templates", true)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("new-app").Args("-f", template).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.KubeFramework().WaitForAnEndpoint(helperName)
o.Expect(err).NotTo(o.HaveOccurred())
tableCounter := 0
assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
tableCounter++
table := fmt.Sprintf("table_%0.2d", tableCounter)
master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())
// Test if we can query as root
oc.KubeFramework().WaitForAnEndpoint("mysql-master")
err := helper.TestRemoteLogin(oc, "mysql-master")
o.Expect(err).NotTo(o.HaveOccurred())
// Create a new table with random name
_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
o.Expect(err).NotTo(o.HaveOccurred())
// Write new data to the table through master
_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
o.Expect(err).NotTo(o.HaveOccurred())
// Make sure data is present on master
err = exutil.WaitForQueryOutput(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
o.Expect(err).NotTo(o.HaveOccurred())
// Make sure data was replicated to all slaves
for _, slave := range slaves {
err = exutil.WaitForQueryOutput(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
o.Expect(err).NotTo(o.HaveOccurred())
}
return master, slaves, helper
}
g.By("after initial deployment")
master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)
g.By("after master is restarted by changing the Deployment Config")
err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
g.By("after master is restarted by deleting the pod")
err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
g.By("after slave is restarted by deleting the pod")
err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(pods.Items)).To(o.Equal(1))
g.By("after slave is scaled to 0 and then back to 4 replicas")
err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
}
}
示例3: replicationTestFactory
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
return func() {
oc.SetOutputDir(exutil.TestContext.OutputDir)
defer cleanup(oc)
_, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
o.Expect(err).NotTo(o.HaveOccurred())
err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.CheckOpenShiftNamespaceImageStreams(oc)
err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete,
// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
g.By("waiting for the deployment to complete")
err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("waiting for an endpoint")
err = oc.KubeFramework().WaitForAnEndpoint(helperName)
o.Expect(err).NotTo(o.HaveOccurred())
tableCounter := 0
assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
tableCounter++
table := fmt.Sprintf("table_%0.2d", tableCounter)
g.By("creating replication helpers")
master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())
// Test if we can query as root
g.By("wait for mysql-master endpoint")
oc.KubeFramework().WaitForAnEndpoint("mysql-master")
err := helper.TestRemoteLogin(oc, "mysql-master")
o.Expect(err).NotTo(o.HaveOccurred())
// Create a new table with random name
g.By("create new table")
_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
o.Expect(err).NotTo(o.HaveOccurred())
// Write new data to the table through master
_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
o.Expect(err).NotTo(o.HaveOccurred())
// Make sure data is present on master
err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
o.Expect(err).NotTo(o.HaveOccurred())
// Make sure data was replicated to all slaves
for _, slave := range slaves {
err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
o.Expect(err).NotTo(o.HaveOccurred())
}
return master, slaves, helper
}
g.By("after initial deployment")
master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)
if tc.SkipReplication {
return
}
g.By("after master is restarted by changing the Deployment Config")
err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
g.By("after master is restarted by deleting the pod")
err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
g.By("after slave is restarted by deleting the pod")
err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(pods.Items)).To(o.Equal(1))
// NOTE: Commented out, current template does not support multiple replicas.
/*
g.By("after slave is scaled to 0 and then back to 4 replicas")
//.........这里部分代码省略.........