本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/metric.NewCounter函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewCounter函數的具體用法?Golang NewCounter怎麽用?Golang NewCounter使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewCounter函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: NewNodeLiveness
// NewNodeLiveness returns a new instance of NodeLiveness configured
// with the specified gossip instance.
func NewNodeLiveness(
ambient log.AmbientContext,
clock *hlc.Clock,
db *client.DB,
g *gossip.Gossip,
livenessThreshold time.Duration,
heartbeatInterval time.Duration,
) *NodeLiveness {
nl := &NodeLiveness{
ambientCtx: ambient,
clock: clock,
db: db,
gossip: g,
livenessThreshold: livenessThreshold,
heartbeatInterval: heartbeatInterval,
stopHeartbeat: make(chan struct{}),
metrics: LivenessMetrics{
HeartbeatSuccesses: metric.NewCounter(metaHeartbeatSuccesses),
HeartbeatFailures: metric.NewCounter(metaHeartbeatFailures),
EpochIncrements: metric.NewCounter(metaEpochIncrements),
},
}
nl.mu.nodes = map[roachpb.NodeID]Liveness{}
livenessRegex := gossip.MakePrefixPattern(gossip.KeyNodeLivenessPrefix)
nl.gossip.RegisterCallback(livenessRegex, nl.livenessGossipUpdate)
return nl
}
示例2: NewNodeLiveness
// NewNodeLiveness returns a new instance of NodeLiveness configured
// with the specified gossip instance.
func NewNodeLiveness(
ambient log.AmbientContext,
clock *hlc.Clock,
db *client.DB,
g *gossip.Gossip,
livenessThreshold time.Duration,
renewalDuration time.Duration,
) *NodeLiveness {
nl := &NodeLiveness{
ambientCtx: ambient,
clock: clock,
db: db,
gossip: g,
livenessThreshold: livenessThreshold,
heartbeatInterval: livenessThreshold - renewalDuration,
}
nl.metrics = LivenessMetrics{
LiveNodes: metric.NewFunctionalGauge(metaLiveNodes, nl.numLiveNodes),
HeartbeatSuccesses: metric.NewCounter(metaHeartbeatSuccesses),
HeartbeatFailures: metric.NewCounter(metaHeartbeatFailures),
EpochIncrements: metric.NewCounter(metaEpochIncrements),
}
nl.pauseHeartbeat.Store(false)
nl.mu.nodes = map[roachpb.NodeID]Liveness{}
livenessRegex := gossip.MakePrefixPattern(gossip.KeyNodeLivenessPrefix)
nl.gossip.RegisterCallback(livenessRegex, nl.livenessGossipUpdate)
return nl
}
示例3: makeMetrics
// makeMetrics makes a new metrics object with rates.
func makeMetrics() Metrics {
return Metrics{
BytesReceived: metric.NewCounter(MetaBytesReceivedRates),
BytesSent: metric.NewCounter(MetaBytesSentRates),
InfosReceived: metric.NewCounter(MetaInfosReceivedRates),
InfosSent: metric.NewCounter(MetaInfosSentRates),
}
}
示例4: makeNodeMetrics
func makeNodeMetrics(reg *metric.Registry, sampleInterval time.Duration) nodeMetrics {
nm := nodeMetrics{
Latency: metric.NewLatency(metaExecLatency, sampleInterval),
Success: metric.NewCounter(metaExecSuccess),
Err: metric.NewCounter(metaExecError),
}
reg.AddMetricStruct(nm)
return nm
}
示例5: makeServerMetrics
func makeServerMetrics(internalMemMetrics *sql.MemoryMetrics) ServerMetrics {
return ServerMetrics{
Conns: metric.NewCounter(MetaConns),
BytesInCount: metric.NewCounter(MetaBytesIn),
BytesOutCount: metric.NewCounter(MetaBytesOut),
ConnMemMetrics: sql.MakeMemMetrics("conns"),
SQLMemMetrics: sql.MakeMemMetrics("client"),
internalMemMetrics: internalMemMetrics,
}
}
示例6: BenchmarkDecodeBinaryDecimal
func BenchmarkDecodeBinaryDecimal(b *testing.B) {
wbuf := writeBuffer{bytecount: metric.NewCounter(metric.Metadata{Name: ""})}
expected := new(parser.DDecimal)
s := "-1728718718271827121233.1212121212"
if _, ok := expected.SetString(s); !ok {
b.Fatalf("could not set %q on decimal", s)
}
wbuf.writeBinaryDatum(expected, nil)
rbuf := readBuffer{msg: wbuf.wrapped.Bytes()}
plen, err := rbuf.getUint32()
if err != nil {
b.Fatal(err)
}
bytes, err := rbuf.getBytes(int(plen))
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StartTimer()
got, err := decodeOidDatum(oid.T_numeric, formatBinary, bytes)
b.StopTimer()
if err != nil {
b.Fatal(err)
} else if got.Compare(expected) != 0 {
b.Fatalf("expected %s, got %s", expected, got)
}
}
}
示例7: MakeMemMetrics
// MakeMemMetrics instantiates the metric objects for an SQL endpoint.
func MakeMemMetrics(endpoint string) MemoryMetrics {
prefix := "sql.mon." + endpoint
MetaMemMaxBytes := metric.Metadata{Name: prefix + ".max"}
MetaMemCurBytes := metric.Metadata{Name: prefix + ".cur"}
MetaMemMaxTxnBytes := metric.Metadata{Name: prefix + ".txn.max"}
MetaMemTxnCurBytes := metric.Metadata{Name: prefix + ".txn.cur"}
MetaMemMaxSessionBytes := metric.Metadata{Name: prefix + ".session.max"}
MetaMemSessionCurBytes := metric.Metadata{Name: prefix + ".session.cur"}
return MemoryMetrics{
MaxBytesHist: metric.NewHistogram(MetaMemMaxBytes, time.Minute, log10int64times1000, 3),
CurBytesCount: metric.NewCounter(MetaMemCurBytes),
TxnMaxBytesHist: metric.NewHistogram(MetaMemMaxTxnBytes, time.Minute, log10int64times1000, 3),
TxnCurBytesCount: metric.NewCounter(MetaMemTxnCurBytes),
SessionMaxBytesHist: metric.NewHistogram(MetaMemMaxSessionBytes, time.Minute, log10int64times1000, 3),
SessionCurBytesCount: metric.NewCounter(MetaMemSessionCurBytes),
}
}
示例8: makeDistSenderMetrics
func makeDistSenderMetrics() DistSenderMetrics {
return DistSenderMetrics{
BatchCount: metric.NewCounter(metaDistSenderBatchCount),
PartialBatchCount: metric.NewCounter(metaDistSenderPartialBatchCount),
SentCount: metric.NewCounter(metaTransportSentCount),
LocalSentCount: metric.NewCounter(metaTransportLocalSentCount),
SendNextTimeoutCount: metric.NewCounter(metaDistSenderSendNextTimeoutCount),
NextReplicaErrCount: metric.NewCounter(metaDistSenderNextReplicaErrCount),
NotLeaseHolderErrCount: metric.NewCounter(metaDistSenderNotLeaseHolderErrCount),
}
}
示例9: benchmarkWriteType
func benchmarkWriteType(b *testing.B, d parser.Datum, format formatCode) {
buf := writeBuffer{bytecount: metric.NewCounter(metric.Metadata{Name: ""})}
writeMethod := buf.writeTextDatum
if format == formatBinary {
writeMethod = buf.writeBinaryDatum
}
// Warm up the buffer.
writeMethod(d, nil)
buf.wrapped.Reset()
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Starting and stopping the timer in each loop iteration causes this
// to take much longer. See http://stackoverflow.com/a/37624250/3435257.
// buf.wrapped.Reset() should be fast enough to be negligible.
writeMethod(d, nil)
buf.wrapped.Reset()
}
}
示例10: BenchmarkWriteBinaryDecimal
func BenchmarkWriteBinaryDecimal(b *testing.B) {
buf := writeBuffer{bytecount: metric.NewCounter(metric.Metadata{Name: ""})}
dec := new(parser.DDecimal)
s := "-1728718718271827121233.1212121212"
if _, ok := dec.SetString(s); !ok {
b.Fatalf("could not set %q on decimal", s)
}
// Warm up the buffer.
buf.writeBinaryDatum(dec, nil)
buf.wrapped.Reset()
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StartTimer()
buf.writeBinaryDatum(dec, nil)
b.StopTimer()
buf.wrapped.Reset()
}
}
示例11: testBinaryDatumType
func testBinaryDatumType(t *testing.T, typ string, datumConstructor func(val string) parser.Datum) {
var tests []binaryTest
f, err := os.Open(filepath.Join("testdata", fmt.Sprintf("%s_test.json", typ)))
if err != nil {
t.Fatal(err)
}
if err := json.NewDecoder(f).Decode(&tests); err != nil {
t.Fatal(err)
}
f.Close()
buf := writeBuffer{bytecount: metric.NewCounter(metric.Metadata{})}
for _, test := range tests {
buf.wrapped.Reset()
d := datumConstructor(test.In)
oid, _ := sql.DatumToOid(d.ResolvedType())
func() {
defer func() {
if r := recover(); r != nil {
fmt.Printf("%q: %s", test.In, r)
panic(r)
}
}()
buf.writeBinaryDatum(d, time.UTC)
if buf.err != nil {
t.Fatal(buf.err)
}
if got := buf.wrapped.Bytes(); !bytes.Equal(got, test.Expect) {
t.Errorf("%q:\n\t%v found,\n\t%v expected", test.In, got, test.Expect)
} else if datum, err := decodeOidDatum(oid, formatBinary, got[4:]); err != nil {
t.Fatalf("unable to decode %v: %s", got[4:], err)
} else if d.Compare(datum) != 0 {
t.Errorf("expected %s, got %s", d, datum)
}
}()
}
}
示例12: newStoreMetrics
func newStoreMetrics(sampleInterval time.Duration) *StoreMetrics {
storeRegistry := metric.NewRegistry()
sm := &StoreMetrics{
registry: storeRegistry,
// Replica metrics.
ReplicaCount: metric.NewCounter(metaReplicaCount),
ReservedReplicaCount: metric.NewCounter(metaReservedReplicaCount),
RaftLeaderCount: metric.NewGauge(metaRaftLeaderCount),
RaftLeaderNotLeaseHolderCount: metric.NewGauge(metaRaftLeaderNotLeaseHolderCount),
LeaseHolderCount: metric.NewGauge(metaLeaseHolderCount),
QuiescentCount: metric.NewGauge(metaQuiescentCount),
// Replica CommandQueue metrics.
MaxCommandQueueSize: metric.NewGauge(metaMaxCommandQueueSize),
MaxCommandQueueWriteCount: metric.NewGauge(metaMaxCommandQueueWriteCount),
MaxCommandQueueReadCount: metric.NewGauge(metaMaxCommandQueueReadCount),
MaxCommandQueueTreeSize: metric.NewGauge(metaMaxCommandQueueTreeSize),
MaxCommandQueueOverlaps: metric.NewGauge(metaMaxCommandQueueOverlaps),
CombinedCommandQueueSize: metric.NewGauge(metaCombinedCommandQueueSize),
CombinedCommandWriteCount: metric.NewGauge(metaCombinedCommandWriteCount),
CombinedCommandReadCount: metric.NewGauge(metaCombinedCommandReadCount),
// Range metrics.
RangeCount: metric.NewGauge(metaRangeCount),
UnavailableRangeCount: metric.NewGauge(metaUnavailableRangeCount),
UnderReplicatedRangeCount: metric.NewGauge(metaUnderReplicatedRangeCount),
// Lease request metrics.
LeaseRequestSuccessCount: metric.NewCounter(metaLeaseRequestSuccessCount),
LeaseRequestErrorCount: metric.NewCounter(metaLeaseRequestErrorCount),
// Storage metrics.
LiveBytes: metric.NewGauge(metaLiveBytes),
KeyBytes: metric.NewGauge(metaKeyBytes),
ValBytes: metric.NewGauge(metaValBytes),
IntentBytes: metric.NewGauge(metaIntentBytes),
LiveCount: metric.NewGauge(metaLiveCount),
KeyCount: metric.NewGauge(metaKeyCount),
ValCount: metric.NewGauge(metaValCount),
IntentCount: metric.NewGauge(metaIntentCount),
IntentAge: metric.NewGauge(metaIntentAge),
GcBytesAge: metric.NewGauge(metaGcBytesAge),
LastUpdateNanos: metric.NewGauge(metaLastUpdateNanos),
Capacity: metric.NewGauge(metaCapacity),
Available: metric.NewGauge(metaAvailable),
Reserved: metric.NewCounter(metaReserved),
SysBytes: metric.NewGauge(metaSysBytes),
SysCount: metric.NewGauge(metaSysCount),
// RocksDB metrics.
RdbBlockCacheHits: metric.NewGauge(metaRdbBlockCacheHits),
RdbBlockCacheMisses: metric.NewGauge(metaRdbBlockCacheMisses),
RdbBlockCacheUsage: metric.NewGauge(metaRdbBlockCacheUsage),
RdbBlockCachePinnedUsage: metric.NewGauge(metaRdbBlockCachePinnedUsage),
RdbBloomFilterPrefixChecked: metric.NewGauge(metaRdbBloomFilterPrefixChecked),
RdbBloomFilterPrefixUseful: metric.NewGauge(metaRdbBloomFilterPrefixUseful),
RdbMemtableHits: metric.NewGauge(metaRdbMemtableHits),
RdbMemtableMisses: metric.NewGauge(metaRdbMemtableMisses),
RdbMemtableTotalSize: metric.NewGauge(metaRdbMemtableTotalSize),
RdbFlushes: metric.NewGauge(metaRdbFlushes),
RdbCompactions: metric.NewGauge(metaRdbCompactions),
RdbTableReadersMemEstimate: metric.NewGauge(metaRdbTableReadersMemEstimate),
RdbReadAmplification: metric.NewGauge(metaRdbReadAmplification),
RdbNumSSTables: metric.NewGauge(metaRdbNumSSTables),
// Range event metrics.
RangeSplits: metric.NewCounter(metaRangeSplits),
RangeAdds: metric.NewCounter(metaRangeAdds),
RangeRemoves: metric.NewCounter(metaRangeRemoves),
RangeSnapshotsGenerated: metric.NewCounter(metaRangeSnapshotsGenerated),
RangeSnapshotsNormalApplied: metric.NewCounter(metaRangeSnapshotsNormalApplied),
RangeSnapshotsPreemptiveApplied: metric.NewCounter(metaRangeSnapshotsPreemptiveApplied),
// Raft processing metrics.
RaftTicks: metric.NewCounter(metaRaftTicks),
RaftWorkingDurationNanos: metric.NewCounter(metaRaftWorkingDurationNanos),
RaftTickingDurationNanos: metric.NewCounter(metaRaftTickingDurationNanos),
// Raft message metrics.
RaftRcvdMsgProp: metric.NewCounter(metaRaftRcvdProp),
RaftRcvdMsgApp: metric.NewCounter(metaRaftRcvdApp),
RaftRcvdMsgAppResp: metric.NewCounter(metaRaftRcvdAppResp),
RaftRcvdMsgVote: metric.NewCounter(metaRaftRcvdVote),
RaftRcvdMsgVoteResp: metric.NewCounter(metaRaftRcvdVoteResp),
RaftRcvdMsgPreVote: metric.NewCounter(metaRaftRcvdPreVote),
RaftRcvdMsgPreVoteResp: metric.NewCounter(metaRaftRcvdPreVoteResp),
RaftRcvdMsgSnap: metric.NewCounter(metaRaftRcvdSnap),
RaftRcvdMsgHeartbeat: metric.NewCounter(metaRaftRcvdHeartbeat),
RaftRcvdMsgHeartbeatResp: metric.NewCounter(metaRaftRcvdHeartbeatResp),
RaftRcvdMsgTransferLeader: metric.NewCounter(metaRaftRcvdTransferLeader),
RaftRcvdMsgTimeoutNow: metric.NewCounter(metaRaftRcvdTimeoutNow),
RaftRcvdMsgDropped: metric.NewCounter(metaRaftRcvdDropped),
raftRcvdMessages: make(map[raftpb.MessageType]*metric.Counter, len(raftpb.MessageType_name)),
RaftEnqueuedPending: metric.NewGauge(metaRaftEnqueuedPending),
// This Gauge measures the number of heartbeats queued up just before
// the queue is cleared, to avoid flapping wildly.
RaftCoalescedHeartbeatsPending: metric.NewGauge(metaRaftCoalescedHeartbeatsPending),
//.........這裏部分代碼省略.........
示例13: makeReplicaGCQueueMetrics
func makeReplicaGCQueueMetrics() ReplicaGCQueueMetrics {
return ReplicaGCQueueMetrics{
RemoveReplicaCount: metric.NewCounter(metaReplicaGCQueueRemoveReplicaCount),
}
}
示例14: TestMetricsRecorder
//.........這裏部分代碼省略.........
TimestampNanos: time,
Value: float64(val),
},
},
}
expected = append(expected, expect)
// Generate status summary data.
if isNode {
expectedNodeSummaryMetrics[prefix+name] = float64(val)
} else {
// This can overwrite the previous value, but this is expected as
// all stores in our tests have identical values; when comparing
// status summaries, the same map is used as expected data for all
// stores.
expectedStoreSummaryMetrics[prefix+name] = float64(val)
}
}
for _, reg := range regList {
for _, data := range metricNames {
switch data.typ {
case "gauge":
g := metric.NewGauge(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(g)
g.Update(data.val)
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "floatgauge":
g := metric.NewGaugeFloat64(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(g)
g.Update(float64(data.val))
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "counter":
c := metric.NewCounter(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(c)
c.Inc((data.val))
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "counterwithrates":
r := metric.NewCounterWithRates(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(r)
r.Inc(data.val)
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "histogram":
h := metric.NewHistogram(metric.Metadata{Name: reg.prefix + data.name}, time.Second, 1000, 2)
reg.reg.AddMetric(h)
h.RecordValue(data.val)
for _, q := range recordHistogramQuantiles {
addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val, reg.isNode)
}
case "latency":
l := metric.NewLatency(metric.Metadata{Name: reg.prefix + data.name}, time.Hour)
reg.reg.AddMetric(l)
l.RecordValue(data.val)
// Latency is simply three histograms (at different resolution
// time scales).
for _, q := range recordHistogramQuantiles {
addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val, reg.isNode)
}
default:
t.Fatalf("unexpected: %+v", data)
}
}
}
// ========================================
// Verify time series data
示例15: NewExecutor
// NewExecutor creates an Executor and registers a callback on the
// system config.
func NewExecutor(
cfg ExecutorConfig, stopper *stop.Stopper, startupMemMetrics *MemoryMetrics,
) *Executor {
exec := &Executor{
cfg: cfg,
reCache: parser.NewRegexpCache(512),
Latency: metric.NewLatency(MetaLatency, cfg.MetricsSampleInterval),
TxnBeginCount: metric.NewCounter(MetaTxnBegin),
TxnCommitCount: metric.NewCounter(MetaTxnCommit),
TxnAbortCount: metric.NewCounter(MetaTxnAbort),
TxnRollbackCount: metric.NewCounter(MetaTxnRollback),
SelectCount: metric.NewCounter(MetaSelect),
UpdateCount: metric.NewCounter(MetaUpdate),
InsertCount: metric.NewCounter(MetaInsert),
DeleteCount: metric.NewCounter(MetaDelete),
DdlCount: metric.NewCounter(MetaDdl),
MiscCount: metric.NewCounter(MetaMisc),
QueryCount: metric.NewCounter(MetaQuery),
}
exec.systemConfigCond = sync.NewCond(exec.systemConfigMu.RLocker())
gossipUpdateC := cfg.Gossip.RegisterSystemConfigChannel()
stopper.RunWorker(func() {
for {
select {
case <-gossipUpdateC:
sysCfg, _ := cfg.Gossip.GetSystemConfig()
exec.updateSystemConfig(sysCfg)
case <-stopper.ShouldStop():
return
}
}
})
ctx := log.WithLogTag(context.Background(), "startup", nil)
startupSession := NewSession(ctx, SessionArgs{}, exec, nil, startupMemMetrics)
if err := exec.virtualSchemas.init(&startupSession.planner); err != nil {
log.Fatal(ctx, err)
}
startupSession.Finish(exec)
return exec
}