本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.LookForStringInLog函数的典型用法代码示例。如果您正苦于以下问题:Golang LookForStringInLog函数的具体用法?Golang LookForStringInLog怎么用?Golang LookForStringInLog使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LookForStringInLog函数的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: RunLogPodsWithSleepOf
// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0))
kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.
appName := "logging-soak" + podname
podlables := f.CreatePodsPerNodeForSimpleApp(
appName,
func(n v1.Node) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "logging-soak",
Image: "gcr.io/google_containers/busybox:1.24",
Args: []string{
"/bin/sh",
"-c",
fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
},
}},
NodeName: n.Name,
RestartPolicy: v1.RestartPolicyAlways,
}
},
totalPods,
)
logSoakVerification := f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: podlables,
ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err
},
},
)
largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)
if err != nil {
framework.Failf("Error in wait... %v", err)
} else if len(pods) < totalPods {
framework.Failf("Only got %v out of %v", len(pods), totalPods)
}
}
示例2:
framework.PodStateVerification{
Selectors: iperfClientPodLabels,
ValidPhases: []api.PodPhase{api.PodSucceeded},
},
)
pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
if err2 != nil {
framework.Failf("Error in wait...")
} else if len(pods) < expectedCli {
framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
} else {
// For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach(
func(p api.Pod) {
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
if err == nil {
framework.Logf(resultS)
iperfResults.Add(NewIPerf(resultS))
} else {
framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
}
})
}
fmt.Println("[begin] Node,Bandwith CSV")
fmt.Println(iperfResults.ToTSV())
fmt.Println("[end] Node,Bandwith CSV")
for ipClient, bandwidth := range iperfResults.BandwidthMap {
framework.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
}
示例3:
sentinelControllerYaml := mkpath("redis-sentinel-controller.yaml")
controllerYaml := mkpath("redis-controller.yaml")
bootstrapPodName := "redis-master"
redisRC := "redis"
sentinelRC := "redis-sentinel"
nsFlag := fmt.Sprintf("--namespace=%v", ns)
expectedOnServer := "The server is now ready to accept connections"
expectedOnSentinel := "+monitor master"
By("starting redis bootstrap")
framework.RunKubectlOrDie("create", "-f", bootstrapYaml, nsFlag)
err := framework.WaitForPodNameRunningInNamespace(c, bootstrapPodName, ns)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("setting up services and controllers")
framework.RunKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{sentinelRC: "true"}))
err = framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
label = labels.SelectorFromSet(labels.Set(map[string]string{"name": redisRC}))
err = framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
示例4: runClientServerBandwidthMeasurement
func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int, maxBandwidthBits int64) {
// TODO: Make this a function parameter, once we distribute iperf endpoints, possibly via session affinity.
numServer := 1
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
Expect(totalPods).NotTo(Equal(0))
appName := "iperf-e2e"
err, _ := f.CreateServiceForSimpleAppWithPods(
8001,
8002,
appName,
func(n api.Node) api.PodSpec {
return api.PodSpec{
Containers: []api.Container{{
Name: "iperf-server",
Image: "gcr.io/google_containers/iperf:e2e",
Args: []string{
"/bin/sh",
"-c",
"/usr/local/bin/iperf -s -p 8001 ",
},
Ports: []api.ContainerPort{{ContainerPort: 8001}},
}},
NodeName: n.Name,
RestartPolicy: api.RestartPolicyOnFailure,
}
},
// this will be used to generate the -service name which all iperf clients point at.
numServer, // Generally should be 1 server unless we do affinity or use a version of iperf that supports LB
true, // Make sure we wait, otherwise all the clients will die and need to restart.
)
if err != nil {
framework.Failf("Fatal error waiting for iperf server endpoint : %v", err)
}
iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
"iperf-e2e-cli",
func(n api.Node) api.PodSpec {
return api.PodSpec{
Containers: []api.Container{
{
Name: "iperf-client",
Image: "gcr.io/google_containers/iperf:e2e",
Args: []string{
"/bin/sh",
"-c",
"/usr/local/bin/iperf -c service-for-" + appName + " -p 8002 --reportstyle C && sleep 5",
},
},
},
RestartPolicy: api.RestartPolicyOnFailure, // let them successfully die.
}
},
numClient,
)
framework.Logf("Reading all perf results to stdout.")
framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
// Calculate expected number of clients based on total nodes.
expectedCli := func() int {
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}()
// Extra 1/10 second per client.
iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second)
iperfResults := &IPerfResults{}
iperfClusterVerification := f.NewClusterVerification(
framework.PodStateVerification{
Selectors: iperfClientPodLabels,
ValidPhases: []api.PodPhase{api.PodSucceeded},
},
)
pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
if err2 != nil {
framework.Failf("Error in wait...")
} else if len(pods) < expectedCli {
framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
} else {
// For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach(
func(p api.Pod) {
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
if err == nil {
framework.Logf(resultS)
iperfResults.Add(NewIPerf(resultS))
} else {
framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
}
})
}
fmt.Println("[begin] Node,Bandwith CSV")
//.........这里部分代码省略.........
示例5: getNsCmdFlag
queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name))
// create a pod in each namespace
for _, ns := range namespaces {
framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie()
}
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
// that we cannot wait for the pods to be running because our pods terminate by themselves.
for _, ns := range namespaces {
err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName)
framework.ExpectNoError(err)
}
// wait for pods to print their result
for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
})
func getNsCmdFlag(ns *api.Namespace) string {
return fmt.Sprintf("--namespace=%v", ns.Name)
}