本文整理汇总了Golang中github.com/prometheus/client_golang/prometheus.ExponentialBuckets函数的典型用法代码示例。如果您正苦于以下问题:Golang ExponentialBuckets函数的具体用法?Golang ExponentialBuckets怎么用?Golang ExponentialBuckets使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ExponentialBuckets函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: init
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package backend
import "github.com/prometheus/client_golang/prometheus"
var (
commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "disk",
Name: "backend_commit_duration_seconds",
Help: "The latency distributions of commit called by backend.",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
})
)
func init() {
prometheus.MustRegister(commitDurations)
}
示例2:
Namespace: "tidb",
Subsystem: "ddl",
Name: "waiting_jobs",
Help: "Gauge of jobs.",
}, []string{"type", "action"})
// handle job result state.
handleJobSucc = "handle_job_succ"
handleJobFailed = "handle_job_failed"
handleJobHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "ddl",
Name: "handle_job_duration_seconds",
Help: "Bucketed histogram of processing time (s) of handle jobs",
Buckets: prometheus.ExponentialBuckets(0.01, 2, 20),
}, []string{"type", "action", "result_state"})
// handle batch data type.
batchAddCol = "batch_add_col"
batchAddIdx = "batch_add_idx"
batchDelData = "batch_del_data"
batchHandleDataHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "ddl",
Name: "batch_add_or_del_data_succ",
Help: "Bucketed histogram of processing time (s) of batch handle data",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 20),
}, []string{"handle_data_type"})
)
示例3: init
promFileSyncs = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "torus_server_file_syncs",
Help: "Number of times a file has been synced on this server",
}, []string{"volume"})
promFileChangedSyncs = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "torus_server_file_changed_syncs",
Help: "Number of times a file has been synced on this server, and the file has changed underneath it",
}, []string{"volume"})
promFileWrittenBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "torus_server_file_written_bytes",
Help: "Number of bytes written to a file on this server",
}, []string{"volume"})
promFileBlockRead = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "torus_server_file_block_read_us",
Help: "Histogram of ms taken to read a block through the layers and into the file abstraction",
Buckets: prometheus.ExponentialBuckets(50.0, 2, 20),
})
promFileBlockWrite = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "torus_server_file_block_write_us",
Help: "Histogram of ms taken to write a block through the layers and into the file abstraction",
Buckets: prometheus.ExponentialBuckets(50.0, 2, 20),
})
)
func init() {
prometheus.MustRegister(promOpenINodes)
prometheus.MustRegister(promOpenFiles)
prometheus.MustRegister(promFileSyncs)
prometheus.MustRegister(promFileChangedSyncs)
prometheus.MustRegister(promFileWrittenBytes)
prometheus.MustRegister(promFileBlockRead)
示例4: Register
var (
// TODO(a-robinson): Add unit tests for the handling of these metrics once
// the upstream library supports it.
requestCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "apiserver_request_count",
Help: "Counter of apiserver requests broken out for each verb, API resource, client, and HTTP response code.",
},
[]string{"verb", "resource", "client", "code"},
)
requestLatencies = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "apiserver_request_latencies",
Help: "Response latency distribution in microseconds for each verb, resource and client.",
// Use buckets ranging from 125 ms to 8 seconds.
Buckets: prometheus.ExponentialBuckets(125000, 2.0, 7),
},
[]string{"verb", "resource"},
)
requestLatenciesSummary = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "apiserver_request_latencies_summary",
Help: "Response latency summary in microseconds for each verb and resource.",
},
[]string{"verb", "resource"},
)
)
// Register all metrics.
func Register() {
prometheus.MustRegister(requestCounter)
示例5: init
"github.com/prometheus/client_golang/prometheus"
)
var (
port = flag.String("port", "8000", "Port to listen on.")
)
var (
requests = prometheus.NewCounter(prometheus.CounterOpts{
Name: "requests", Help: "total requests received"})
errors = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "errors", Help: "total errors served"}, []string{"code"})
latency_ms = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "latency_ms",
Help: "request latency in milliseconds",
Buckets: prometheus.ExponentialBuckets(1, 2, 20)})
backend_latency_ms = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "backend_latency_ms",
Help: "request latency in milliseconds",
Buckets: prometheus.ExponentialBuckets(1, 2, 20)})
)
func init() {
prometheus.MustRegister(requests)
prometheus.MustRegister(errors)
prometheus.MustRegister(latency_ms)
prometheus.MustRegister(backend_latency_ms)
}
var (
randLock sync.Mutex
示例6: Query
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
var (
queryMetric = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "query",
Name: "handle_query_duration_seconds",
Help: "Bucketed histogram of processing time (s) of handled queries.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
})
)
// Query is used for add query cost time into metrics.
func Query(costTime time.Duration) {
queryMetric.Observe(float64(costTime))
}
func init() {
prometheus.MustRegister(queryMetric)
}
示例7:
pendingEventsGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "etcd",
Subsystem: "storage",
Name: "pending_events_total",
Help: "Total number of pending events to be sent.",
})
indexCompactionPauseDurations = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "storage",
Name: "index_compaction_pause_duration_milliseconds",
Help: "Bucketed histogram of index compaction pause duration.",
// 0.5ms -> 1second
Buckets: prometheus.ExponentialBuckets(0.5, 2, 12),
})
dbCompactionPauseDurations = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "storage",
Name: "db_compaction_pause_duration_milliseconds",
Help: "Bucketed histogram of db compaction pause duration.",
// 1ms -> 4second
Buckets: prometheus.ExponentialBuckets(1, 2, 13),
})
dbCompactionTotalDurations = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "etcd",
示例8:
"time"
"github.com/prometheus/client_golang/prometheus"
)
const schedulerSubsystem = "scheduler"
var BindingSaturationReportInterval = 1 * time.Second
var (
E2eSchedulingLatency = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: schedulerSubsystem,
Name: "e2e_scheduling_latency_microseconds",
Help: "E2e scheduling latency (scheduling algorithm + binding)",
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
},
)
SchedulingAlgorithmLatency = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: schedulerSubsystem,
Name: "scheduling_algorithm_latency_microseconds",
Help: "Scheduling algorithm latency",
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
},
)
BindingLatency = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: schedulerSubsystem,
Name: "binding_latency_microseconds",
Help: "Binding latency",
示例9: init
package lucky
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
RequestsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "lucky_requests_ms",
Help: "Lucky requests",
Buckets: prometheus.ExponentialBuckets(1, 5, 6),
},
[]string{"backend", "method"})
BackendsGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "lucky_backends",
Help: "Lucky backends",
},
[]string{"backend"})
FrontendsGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "lucky_frontends",
Help: "Lucky frontends",
},
[]string{"frontend", "type"})
)
func init() {
prometheus.MustRegister(RequestsHistogram)
prometheus.MustRegister(BackendsGauge)
prometheus.MustRegister(FrontendsGauge)
}
示例10:
txnCmdCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "txn_cmd_total",
Help: "Counter of txn commands.",
}, []string{"type"})
txnCmdHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "txn_cmd_seconds",
Help: "Bucketed histogram of processing time of txn cmds.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 18),
}, []string{"type"})
backoffCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "backoff_total",
Help: "Counter of backoff.",
}, []string{"type"})
backoffHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "backoff_seconds",
示例11:
copBuildTaskHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "cop_buildtask_seconds",
Help: "Coprocessor buildTask cost time.",
})
copTaskLenHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "cop_task_len",
Help: "Coprocessor task length.",
Buckets: prometheus.ExponentialBuckets(1, 2, 11),
})
coprocessorCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "coprocessor_actions_total",
Help: "Counter of coprocessor actions.",
}, []string{"type"})
gcWorkerCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "tikvclient",
Name: "gc_worker_actions_total",
示例12: init
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/middleware"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
recordResponseDuration("action", nil, time.Second)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_http_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "error"},
)
// recordResponseDuration records the duration of time to respond to a Request
// in milliseconds .
func recordResponseDuration(action string, err error, duration time.Duration) {
var errString string
if err != nil {
errString = err.Error()
}
promResponseDurationMilliseconds.
WithLabelValues(action, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
示例13: handler
// class (1xx, 2xx, ...). This creates a fair amount of time series on
// the Prometheus server. Usually, you would track the duration of
// serving HTTP request without partitioning by outcome. Do something
// like this only if needed. Also note how only status classes are
// tracked, not every single status code. The latter would create an
// even larger amount of time series. Request counters partitioned by
// status code are usually OK as each counter only creates one time
// series. Histograms are way more expensive, so partition with care and
// only where you really need separate latency tracking. Partitioning by
// status class is only an example. In concrete cases, other partitions
// might make more sense.
apiRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "api_request_duration_seconds",
Help: "Histogram for the request duration of the public API, partitioned by status class.",
Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
},
[]string{"status_class"},
)
)
func handler(w http.ResponseWriter, r *http.Request) {
status := http.StatusOK
// The ObserverFunc gets called by the deferred ObserveDuration and
// decides wich Histogram's Observe method is called.
timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
switch {
case status >= 500: // Server error.
apiRequestDuration.WithLabelValues("5xx").Observe(v)
case status >= 400: // Client error.
apiRequestDuration.WithLabelValues("4xx").Observe(v)