本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.PrettyPrintJSON函数的典型用法代码示例。如果您正苦于以下问题:Golang PrettyPrintJSON函数的具体用法?Golang PrettyPrintJSON怎么用?Golang PrettyPrintJSON使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PrettyPrintJSON函数的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: logDensityTimeSeries
// logDensityTimeSeries logs the time series data of operation and resource usage
func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]unversioned.Time, testInfo map[string]string) {
timeSeries := &NodeTimeSeries{
Labels: testInfo,
Version: currentDataVersion,
}
// Attach operation time series.
timeSeries.OperationData = map[string][]int64{
"create": getCumulatedPodTimeSeries(create),
"running": getCumulatedPodTimeSeries(watch),
}
// Attach resource time series.
timeSeries.ResourceData = rc.GetResourceTimeSeries()
// Log time series with tags
framework.Logf("%s %s\n%s", TimeSeriesTag, framework.PrettyPrintJSON(timeSeries), TimeSeriesEnd)
}
示例2: verifyLatency
// verifyLatency verifies that whether pod creation latency satisfies the limit.
func verifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyData, testArg densityTest) {
framework.PrintLatencies(e2eLags, "worst client e2e total latencies")
// Zhou: do not trust `kubelet' metrics since they are not reset!
latencyMetrics, _ := getPodStartLatency(kubeletAddr)
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics))
// check whether e2e pod startup time is acceptable.
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
framework.Logf("Pod create latency: %s", framework.PrettyPrintJSON(podCreateLatency))
framework.ExpectNoError(verifyPodStartupLatency(testArg.podStartupLimits, podCreateLatency.Latency))
// check bactch pod creation latency
if testArg.podBatchStartupLimit > 0 {
Expect(batchLag <= testArg.podBatchStartupLimit).To(Equal(true), "Batch creation startup time %v exceed limit %v",
batchLag, testArg.podBatchStartupLimit)
}
// calculate and log throughput
throughputBatch := float64(testArg.podsNr) / batchLag.Minutes()
framework.Logf("Batch creation throughput is %.1f pods/min", throughputBatch)
throughputSequential := 1.0 / e2eLags[len(e2eLags)-1].Latency.Minutes()
framework.Logf("Sequential creation throughput is %.1f pods/min", throughputSequential)
}
示例3: logAndVerifyLatency
// logAndVerifyLatency verifies that whether pod creation latency satisfies the limit.
func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyData, podStartupLimits framework.LatencyMetric,
podBatchStartupLimit time.Duration, testName string, isVerify bool) {
framework.PrintLatencies(e2eLags, "worst client e2e total latencies")
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
latencyMetrics, _ := getPodStartLatency(kubeletAddr)
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics))
podCreateLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLags)}
// log latency perf data
framework.PrintPerfData(getLatencyPerfData(podCreateLatency.Latency, testName))
if isVerify {
// check whether e2e pod startup time is acceptable.
framework.ExpectNoError(verifyPodStartupLatency(podStartupLimits, podCreateLatency.Latency))
// check bactch pod creation latency
if podBatchStartupLimit > 0 {
Expect(batchLag <= podBatchStartupLimit).To(Equal(true), "Batch creation startup time %v exceed limit %v",
batchLag, podBatchStartupLimit)
}
}
}
示例4:
var masters sets.String
// Gathers data prior to framework namespace teardown
AfterEach(func() {
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
saturationData := framework.SaturationTime{
TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount,
NumberOfPods: totalPods,
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
}
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
// Verify latency metrics.
highLatencyRequests, err := framework.HighLatencyRequests(c)
framework.ExpectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
framework.ExpectNoError(framework.VerifySchedulerLatency(c))
})
// Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.).
f := framework.NewDefaultFramework("density")