本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/metric.NewRegistry函数的典型用法代码示例。如果您正苦于以下问题:Golang NewRegistry函数的具体用法?Golang NewRegistry怎么用?Golang NewRegistry使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewRegistry函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestGossipRaceLogStatus
func TestGossipRaceLogStatus(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
local.mu.Lock()
peer := startGossip(2, stopper, t, metric.NewRegistry())
local.startClient(&peer.is.NodeAddr)
local.mu.Unlock()
// Race gossiping against LogStatus.
gun := make(chan struct{})
for i := uint8(0); i < 10; i++ {
go func() {
<-gun
local.LogStatus()
}()
gun <- struct{}{}
if err := local.AddInfo(
strconv.FormatUint(uint64(i), 10),
[]byte{i},
time.Hour,
); err != nil {
t.Fatal(err)
}
}
close(gun)
}
示例2: TestClientGossip
// TestClientGossip verifies a client can gossip a delta to the server.
func TestClientGossip(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
disconnected := make(chan *client, 1)
c := newClient(&remote.is.NodeAddr, makeMetrics(metric.NewRegistry()))
defer func() {
stopper.Stop()
if c != <-disconnected {
t.Errorf("expected client disconnect after remote close")
}
}()
if err := local.AddInfo("local-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
if err := remote.AddInfo("remote-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
gossipSucceedsSoon(t, stopper, disconnected, map[*client]*Gossip{
c: local,
}, func() error {
if _, err := remote.GetInfo("local-key"); err != nil {
return err
}
if _, err := local.GetInfo("remote-key"); err != nil {
return err
}
return nil
})
}
示例3: TestClientDisallowMultipleConns
// TestClientDisallowMultipleConns verifies that the server disallows
// multiple connections from the same client node ID.
func TestClientDisallowMultipleConns(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.is.NodeAddr
// Start two clients from local to remote. RPC client cache is
// disabled via the context, so we'll start two different outgoing
// connections.
local.startClient(&rAddr)
local.startClient(&rAddr)
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
util.SucceedsSoon(t, func() error {
// Verify that the remote server has only a single incoming
// connection and the local server has only a single outgoing
// connection.
local.mu.Lock()
remote.mu.Lock()
outgoing := local.outgoing.len()
incoming := remote.incoming.len()
local.mu.Unlock()
remote.mu.Unlock()
if outgoing == 1 && incoming == 1 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.Errorf("incorrect number of incoming (%d) or outgoing (%d) connections", incoming, outgoing)
})
}
示例4: TestClientDisconnectRedundant
// TestClientDisconnectRedundant verifies that the gossip server
// will drop an outgoing client connection that is already an
// inbound client connection of another node.
func TestClientDisconnectRedundant(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
// startClient requires locks are held, so acquire here.
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.mu.is.NodeAddr
lAddr := local.mu.is.NodeAddr
local.startClient(&rAddr, remote.mu.is.NodeID)
remote.startClient(&lAddr, local.mu.is.NodeID)
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
util.SucceedsSoon(t, func() error {
// Check which of the clients is connected to the other.
ok1 := local.findClient(func(c *client) bool { return c.addr.String() == rAddr.String() }) != nil
ok2 := remote.findClient(func(c *client) bool { return c.addr.String() == lAddr.String() }) != nil
// We expect node 2 to disconnect; if both are still connected,
// it's possible that node 1 gossiped before node 2 connected, in
// which case we have to gossip from node 1 to trigger the
// disconnect redundant client code.
if ok1 && ok2 {
if err := local.AddInfo("local-key", nil, time.Second); err != nil {
t.Fatal(err)
}
} else if ok1 && !ok2 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.New("local client to remote not yet closed as redundant")
})
}
示例5: TestClientRetryBootstrap
// TestClientRetryBootstrap verifies that an initial failure to connect
// to a bootstrap host doesn't stall the bootstrapping process in the
// absence of any additional activity. This can happen during acceptance
// tests if the DNS can't lookup hostnames when gossip is started.
func TestClientRetryBootstrap(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
remote.mu.Lock()
rAddr := remote.is.NodeAddr
remote.mu.Unlock()
if err := local.AddInfo("local-key", []byte("hello"), 0*time.Second); err != nil {
t.Fatal(err)
}
local.SetBootstrapInterval(10 * time.Millisecond)
local.SetResolvers([]resolver.Resolver{
&testResolver{addr: rAddr.String(), numFails: 3, numSuccesses: 1},
})
local.bootstrap()
local.manage()
util.SucceedsSoon(t, func() error {
_, err := remote.GetInfo("local-key")
return err
})
}
示例6: TestClientForwardUnresolved
// TestClientForwardUnresolved verifies that a client does not resolve a forward
// address prematurely.
func TestClientForwardUnresolved(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
const nodeID = 1
local := startGossip(nodeID, stopper, t, metric.NewRegistry())
local.mu.Lock()
addr := local.is.NodeAddr
local.mu.Unlock()
client := newClient(&addr, makeMetrics(metric.NewRegistry())) // never started
newAddr := util.UnresolvedAddr{
NetworkField: "tcp",
AddressField: "localhost:2345",
}
reply := &Response{
NodeID: nodeID,
Addr: addr,
AlternateNodeID: nodeID + 1,
AlternateAddr: &newAddr,
}
if err := client.handleResponse(local, reply); !testutils.IsError(err, "received forward") {
t.Fatal(err)
}
if !proto.Equal(client.forwardAddr, &newAddr) {
t.Fatalf("unexpected forward address %v, expected %v", client.forwardAddr, &newAddr)
}
}
示例7: TestClientGossipMetrics
// TestClientGossipMetrics verifies a that gossip stats are generated.
func TestClientGossipMetrics(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
localRegistry := metric.NewRegistry()
local := startGossip(1, stopper, t, localRegistry)
remoteRegistry := metric.NewRegistry()
remote := startGossip(2, stopper, t, remoteRegistry)
gossipSucceedsSoon(
t, stopper, make(chan *client, 2),
map[*client]*Gossip{
newClient(&local.is.NodeAddr, makeMetrics(metric.NewRegistry())): remote,
newClient(&remote.is.NodeAddr, makeMetrics(metric.NewRegistry())): local,
},
func() error {
if err := local.AddInfo("local-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
if err := remote.AddInfo("remote-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
// Infos/Bytes Sent/Received should not be zero.
for i, reg := range []*metric.Registry{localRegistry, remoteRegistry} {
for _, ratesName := range []string{
InfosSentRatesName,
InfosReceivedRatesName,
BytesSentRatesName,
BytesReceivedRatesName,
} {
counterName := ratesName + "-count"
counter := reg.GetCounter(counterName)
if counter == nil {
return errors.Errorf("%d: missing counter %q", i, counterName)
}
if count := counter.Count(); count <= 0 {
return errors.Errorf("%d: expected metrics counter %q > 0; = %d", i, counterName, count)
}
}
}
// Since there are two gossip nodes, there should be at least one incoming
// and outgoing connection.
for i, reg := range []*metric.Registry{localRegistry, remoteRegistry} {
for _, name := range []string{} {
gauge := reg.GetGauge(name)
if gauge == nil {
return errors.Errorf("%d: missing gauge %q", i, name)
}
if count := gauge.Value(); count <= 0 {
return errors.Errorf("%d: expected metrics gauge %q > 0; = %d", i, name, count)
}
}
}
return nil
})
}
示例8: TestGossipOrphanedStallDetection
func TestGossipOrphanedStallDetection(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
local.SetStallInterval(5 * time.Millisecond)
// Make sure we have the sentinel to ensure that its absence is not the
// cause of stall detection.
if err := local.AddInfo(KeySentinel, nil, time.Hour); err != nil {
t.Fatal(err)
}
peerStopper := stop.NewStopper()
peer := startGossip(2, peerStopper, t, metric.NewRegistry())
peerNodeID := peer.GetNodeID()
peerAddr := peer.GetNodeAddr()
local.startClient(peerAddr, peerNodeID)
util.SucceedsSoon(t, func() error {
for _, peerID := range local.Outgoing() {
if peerID == peerNodeID {
return nil
}
}
return errors.Errorf("%d not yet connected", peerNodeID)
})
local.bootstrap()
local.manage()
peerStopper.Stop()
util.SucceedsSoon(t, func() error {
for _, peerID := range local.Outgoing() {
if peerID == peerNodeID {
return errors.Errorf("%d still connected", peerNodeID)
}
}
return nil
})
peerStopper = stop.NewStopper()
defer peerStopper.Stop()
peer = startGossipAtAddr(peerNodeID, peerAddr, peerStopper, t, metric.NewRegistry())
util.SucceedsSoon(t, func() error {
for _, peerID := range local.Outgoing() {
if peerID == peerNodeID {
return nil
}
}
return errors.Errorf("%d not yet connected", peerNodeID)
})
}
示例9: TestClientGossipMetrics
// TestClientGossipMetrics verifies a that gossip stats are generated.
func TestClientGossipMetrics(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
if err := local.AddInfo("local-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
if err := remote.AddInfo("remote-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
gossipSucceedsSoon(
t, stopper, make(chan *client, 2),
map[*client]*Gossip{
newClient(context.TODO(), local.GetNodeAddr(), remote.nodeMetrics): remote,
},
func() error {
// Infos/Bytes Sent/Received should not be zero.
for i, s := range []*server{local.server, remote.server} {
for _, rate := range []metric.Rates{
s.nodeMetrics.InfosSent,
s.nodeMetrics.InfosReceived,
s.nodeMetrics.BytesSent,
s.nodeMetrics.BytesReceived,
} {
counter := rate.Counter
if count := counter.Count(); count <= 0 {
return errors.Errorf("%d: expected metrics counter %q > 0; = %d", i, counter.GetName(), count)
}
}
}
// Since there are two gossip nodes, there should be exactly one incoming
// or outgoing connection due to gossip's connection de-duplication.
for i, g := range []*Gossip{local, remote} {
g.mu.Lock()
defer g.mu.Unlock()
count := int64(0)
for _, gauge := range []*metric.Gauge{g.mu.incoming.gauge, g.outgoing.gauge} {
if gauge == nil {
return errors.Errorf("%d: missing gauge", i)
}
count += gauge.Value()
}
const expected = 1
if count != expected {
return errors.Errorf("%d: expected metrics incoming + outgoing connection count == %d; = %d", i, expected, count)
}
}
return nil
})
}
示例10: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
ctx.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCtx := makeTestContext()
g := gossip.New(
context.Background(),
nodeRPCContext,
grpcServer,
serverCtx.GossipBootstrapResolvers,
stopper,
metric.NewRegistry())
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(ln.Addr())
}
ctx.Gossip = g
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(&kv.DistSenderConfig{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
ctx.Ctx = tracing.WithTracer(context.Background(), tracing.NewTracer())
sender := kv.NewTxnCoordSender(ctx.Ctx, distSender, ctx.Clock, false, stopper,
kv.MakeTxnMetrics())
ctx.DB = client.NewDB(sender)
ctx.Transport = storage.NewDummyRaftTransport()
node := NewNode(ctx, status.NewMetricsRecorder(ctx.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例11: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br := ba.CreateReply()
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
br.Txn.Writing = true
return br, nil
}), clock, false, tracing.NewTracer(), stopper, NewTxnMetrics(metric.NewRegistry()))
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例12: NewStoreStatusMonitor
// NewStoreStatusMonitor constructs a StoreStatusMonitor with the given ID.
func NewStoreStatusMonitor(id roachpb.StoreID, metaRegistry *metric.Registry) *StoreStatusMonitor {
registry := metric.NewRegistry()
// Format as `cr.store.<metric>.<id>` in output, in analogy to the time
// series data written.
metaRegistry.MustAdd(storeTimeSeriesPrefix+"%s."+id.String(), registry)
return &StoreStatusMonitor{
ID: id,
registry: registry,
rangeCount: registry.Counter("ranges"),
leaderRangeCount: registry.Gauge("ranges.leader"),
replicatedRangeCount: registry.Gauge("ranges.replicated"),
availableRangeCount: registry.Gauge("ranges.available"),
liveBytes: registry.Gauge("livebytes"),
keyBytes: registry.Gauge("keybytes"),
valBytes: registry.Gauge("valbytes"),
intentBytes: registry.Gauge("intentbytes"),
liveCount: registry.Gauge("livecount"),
keyCount: registry.Gauge("keycount"),
valCount: registry.Gauge("valcount"),
intentCount: registry.Gauge("intentcount"),
intentAge: registry.Gauge("intentage"),
gcBytesAge: registry.Gauge("gcbytesage"),
lastUpdateNanos: registry.Gauge("lastupdatenanos"),
capacity: registry.Gauge("capacity"),
available: registry.Gauge("capacity.available"),
}
}
示例13: NewExecutor
// NewExecutor creates an Executor and registers a callback on the
// system config.
func NewExecutor(db client.DB, gossip *gossip.Gossip, leaseMgr *LeaseManager, stopper *stop.Stopper) *Executor {
registry := metric.NewRegistry()
exec := &Executor{
db: db,
reCache: parser.NewRegexpCache(512),
leaseMgr: leaseMgr,
registry: registry,
latency: registry.Latency("latency"),
txnBeginCount: registry.Counter("transaction.begincount"),
selectCount: registry.Counter("select.count"),
updateCount: registry.Counter("update.count"),
insertCount: registry.Counter("insert.count"),
deleteCount: registry.Counter("delete.count"),
ddlCount: registry.Counter("ddl.count"),
miscCount: registry.Counter("misc.count"),
}
exec.systemConfigCond = sync.NewCond(&exec.systemConfigMu)
gossipUpdateC := gossip.RegisterSystemConfigChannel()
stopper.RunWorker(func() {
for {
select {
case <-gossipUpdateC:
cfg := gossip.GetSystemConfig()
exec.updateSystemConfig(cfg)
case <-stopper.ShouldStop():
return
}
}
})
return exec
}
示例14: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = ts.stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, ts.Gossip())
tds := kv.NewTxnCoordSender(ds, ts.Clock(), ts.Ctx.Linearizable, tracing.NewTracer(),
ts.stopper, kv.NewTxnMetrics(metric.NewRegistry()))
for _, sk := range tc.splitKeys {
if err := ts.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(tds, nil, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
reply, err := client.SendWrapped(tds, nil, scan)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
}
}
示例15: makeTestV3Conn
func makeTestV3Conn(c net.Conn) v3Conn {
return makeV3Conn(c,
sql.NewDummyExecutor(),
newServerMetrics(metric.NewRegistry()),
sql.SessionArgs{},
)
}