本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.PrintPerfData函数的典型用法代码示例。如果您正苦于以下问题:Golang PrintPerfData函数的具体用法?Golang PrintPerfData怎么用?Golang PrintPerfData使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PrintPerfData函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: verifyResource
// verifyResource verifies whether resource usage satisfies the limit.
func verifyResource(f *framework.Framework, cpuLimits framework.ContainersCPUSummary,
memLimits framework.ResourceUsagePerContainer, rc *ResourceCollector) {
nodeName := framework.TestContext.NodeName
// Obtain memory PerfData
usagePerContainer, err := rc.GetLatest()
Expect(err).NotTo(HaveOccurred())
framework.Logf("%s", formatResourceUsageStats(usagePerContainer))
usagePerNode := make(framework.ResourceUsagePerNode)
usagePerNode[nodeName] = usagePerContainer
// Obtain cpu PerfData
cpuSummary := rc.GetCPUSummary()
framework.Logf("%s", formatCPUSummary(cpuSummary))
cpuSummaryPerNode := make(framework.NodesCPUSummary)
cpuSummaryPerNode[nodeName] = cpuSummary
// Log resource usage
framework.PrintPerfData(framework.ResourceUsageToPerfData(usagePerNode))
framework.PrintPerfData(framework.CPUUsageToPerfData(cpuSummaryPerNode))
// Verify resource usage
verifyMemoryLimits(f.Client, memLimits, usagePerNode)
verifyCPULimits(cpuLimits, cpuSummaryPerNode)
}
示例2: logAndVerifyResource
// logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit.
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits framework.ContainersCPUSummary,
memLimits framework.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) {
nodeName := framework.TestContext.NodeName
// Obtain memory PerfData
usagePerContainer, err := rc.GetLatest()
Expect(err).NotTo(HaveOccurred())
framework.Logf("%s", formatResourceUsageStats(usagePerContainer))
usagePerNode := make(framework.ResourceUsagePerNode)
usagePerNode[nodeName] = usagePerContainer
// Obtain CPU PerfData
cpuSummary := rc.GetCPUSummary()
framework.Logf("%s", formatCPUSummary(cpuSummary))
cpuSummaryPerNode := make(framework.NodesCPUSummary)
cpuSummaryPerNode[nodeName] = cpuSummary
// Print resource usage
framework.PrintPerfData(framework.ResourceUsageToPerfDataWithLabels(usagePerNode, testInfo))
framework.PrintPerfData(framework.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode, testInfo))
// Verify resource usage
if isVerify {
verifyMemoryLimits(f.Client, memLimits, usagePerNode)
verifyCPULimits(cpuLimits, cpuSummaryPerNode)
}
}
示例3: verifyResource
func verifyResource(f *framework.Framework, testArg DensityTest, rc *ResourceCollector) {
nodeName := framework.TestContext.NodeName
// verify and log memory
usagePerContainer, err := rc.GetLatest()
Expect(err).NotTo(HaveOccurred())
framework.Logf("%s", formatResourceUsageStats(usagePerContainer))
usagePerNode := make(framework.ResourceUsagePerNode)
usagePerNode[nodeName] = usagePerContainer
memPerfData := framework.ResourceUsageToPerfData(usagePerNode)
framework.PrintPerfData(memPerfData)
verifyMemoryLimits(f.Client, testArg.memLimits, usagePerNode)
// verify and log cpu
cpuSummary := rc.GetCPUSummary()
framework.Logf("%s", formatCPUSummary(cpuSummary))
cpuSummaryPerNode := make(framework.NodesCPUSummary)
cpuSummaryPerNode[nodeName] = cpuSummary
cpuPerfData := framework.CPUUsageToPerfData(cpuSummaryPerNode)
framework.PrintPerfData(cpuPerfData)
verifyCPULimits(testArg.cpuLimits, cpuSummaryPerNode)
}
示例4: runResourceTrackingTest
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
// TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName,
Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: totalPods,
})).NotTo(HaveOccurred())
// Log once and flush the stats.
rm.LogLatest()
rm.Reset()
By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met.
// Note that without calling framework.ResourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine
// for the current test duration, but we should reclaim the
// entries if we plan to monitor longer (e.g., 8 hours).
deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now())
framework.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
time.Sleep(reportingPeriod)
}
logPodsOnNodes(f.ClientSet, nodeNames.List())
}
By("Reporting overall resource usage")
logPodsOnNodes(f.ClientSet, nodeNames.List())
usageSummary, err := rm.GetLatest()
Expect(err).NotTo(HaveOccurred())
// TODO(random-liu): Remove the original log when we migrate to new perfdash
framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary()
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
// Log perf result
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC")
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
}
示例5: logAndVerifyLatency
// logAndVerifyLatency verifies that whether pod creation latency satisfies the limit.
func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyData, podStartupLimits framework.LatencyMetric,
podBatchStartupLimit time.Duration, testName string, isVerify bool) {
framework.PrintLatencies(e2eLags, "worst client e2e total latencies")
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
latencyMetrics, _ := getPodStartLatency(kubeletAddr)
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics))
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
// log latency perf data
framework.PrintPerfData(getLatencyPerfData(podCreateLatency.Latency, testName))
if isVerify {
// check whether e2e pod startup time is acceptable.
framework.ExpectNoError(verifyPodStartupLatency(podStartupLimits, podCreateLatency.Latency))
// check bactch pod creation latency
if podBatchStartupLimit > 0 {
Expect(batchLag <= podBatchStartupLimit).To(Equal(true), "Batch creation startup time %v exceed limit %v",
batchLag, podBatchStartupLimit)
}
}
}
示例6: logPodCreateThroughput
// logThroughput calculates and logs pod creation throughput.
func logPodCreateThroughput(batchLag time.Duration, e2eLags []framework.PodLatencyData, podsNr int, testName string) {
framework.PrintPerfData(getThroughputPerfData(batchLag, e2eLags, podsNr, testName))
}
示例7:
By("Reporting overall resource usage")
logPodsOnNode(f.Client)
usagePerContainer, err := rc.GetLatest()
Expect(err).NotTo(HaveOccurred())
// TODO(random-liu): Remove the original log when we migrate to new perfdash
nodeName := framework.TestContext.NodeName
framework.Logf("%s", formatResourceUsageStats(usagePerContainer))
// Log perf result
usagePerNode := make(framework.ResourceUsagePerNode)
usagePerNode[nodeName] = usagePerContainer
framework.PrintPerfData(framework.ResourceUsageToPerfData(usagePerNode))
verifyMemoryLimits(f.Client, expectedMemory, usagePerNode)
cpuSummary := rc.GetCPUSummary()
framework.Logf("%s", formatCPUSummary(cpuSummary))
// Log perf result
cpuSummaryPerNode := make(framework.NodesCPUSummary)
cpuSummaryPerNode[nodeName] = cpuSummary
framework.PrintPerfData(framework.CPUUsageToPerfData(cpuSummaryPerNode))
verifyCPULimits(expectedCPU, cpuSummaryPerNode)
})
}
})
})