本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/metric.NewRegistry函数的典型用法代码示例。如果您正苦于以下问题:Golang NewRegistry函数的具体用法?Golang NewRegistry怎么用?Golang NewRegistry使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewRegistry函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestClientDisconnectRedundant
// TestClientDisconnectRedundant verifies that the gossip server
// will drop an outgoing client connection that is already an
// inbound client connection of another node.
func TestClientDisconnectRedundant(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
// startClient requires locks are held, so acquire here.
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.mu.is.NodeAddr
lAddr := local.mu.is.NodeAddr
local.startClient(&rAddr, remote.NodeID.Get())
remote.startClient(&lAddr, local.NodeID.Get())
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
util.SucceedsSoon(t, func() error {
// Check which of the clients is connected to the other.
ok1 := local.findClient(func(c *client) bool { return c.addr.String() == rAddr.String() }) != nil
ok2 := remote.findClient(func(c *client) bool { return c.addr.String() == lAddr.String() }) != nil
// We expect node 2 to disconnect; if both are still connected,
// it's possible that node 1 gossiped before node 2 connected, in
// which case we have to gossip from node 1 to trigger the
// disconnect redundant client code.
if ok1 && ok2 {
if err := local.AddInfo("local-key", nil, time.Second); err != nil {
t.Fatal(err)
}
} else if ok1 && !ok2 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.New("local client to remote not yet closed as redundant")
})
}
示例2: TestGossipRaceLogStatus
func TestGossipRaceLogStatus(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
local.mu.Lock()
peer := startGossip(2, stopper, t, metric.NewRegistry())
local.startClient(&peer.mu.is.NodeAddr)
local.mu.Unlock()
// Race gossiping against LogStatus.
gun := make(chan struct{})
for i := uint8(0); i < 10; i++ {
go func() {
<-gun
local.LogStatus()
gun <- struct{}{}
}()
gun <- struct{}{}
if err := local.AddInfo(
strconv.FormatUint(uint64(i), 10),
[]byte{i},
time.Hour,
); err != nil {
t.Fatal(err)
}
<-gun
}
close(gun)
}
示例3: TestClientGossip
// TestClientGossip verifies a client can gossip a delta to the server.
func TestClientGossip(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
disconnected := make(chan *client, 1)
c := newClient(log.AmbientContext{}, remote.GetNodeAddr(), makeMetrics())
defer func() {
stopper.Stop()
if c != <-disconnected {
t.Errorf("expected client disconnect after remote close")
}
}()
if err := local.AddInfo("local-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
if err := remote.AddInfo("remote-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
gossipSucceedsSoon(t, stopper, disconnected, map[*client]*Gossip{
c: local,
}, func() error {
if _, err := remote.GetInfo("local-key"); err != nil {
return err
}
if _, err := local.GetInfo("remote-key"); err != nil {
return err
}
return nil
})
}
示例4: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.StoreConfig{}
stopper := stop.NewStopper()
cfg.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
cfg.Gossip = gossip.NewTest(
0,
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry(),
)
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
cfg.Gossip.SetResolvers([]resolver.Resolver{r})
cfg.Gossip.Start(ln.Addr())
}
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, cfg.Gossip)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
sender := kv.NewTxnCoordSender(
cfg.AmbientCtx,
distSender,
cfg.Clock,
false,
stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
示例5: TestClientDisallowMultipleConns
// TestClientDisallowMultipleConns verifies that the server disallows
// multiple connections from the same client node ID.
func TestClientDisallowMultipleConns(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
local.mu.Lock()
remote.mu.Lock()
rAddr := remote.mu.is.NodeAddr
// Start two clients from local to remote. RPC client cache is
// disabled via the context, so we'll start two different outgoing
// connections.
local.startClient(&rAddr, remote.NodeID.Get())
local.startClient(&rAddr, remote.NodeID.Get())
local.mu.Unlock()
remote.mu.Unlock()
local.manage()
remote.manage()
util.SucceedsSoon(t, func() error {
// Verify that the remote server has only a single incoming
// connection and the local server has only a single outgoing
// connection.
local.mu.Lock()
remote.mu.Lock()
outgoing := local.outgoing.len()
incoming := remote.mu.incoming.len()
local.mu.Unlock()
remote.mu.Unlock()
if outgoing == 1 && incoming == 1 && verifyServerMaps(local, 0) && verifyServerMaps(remote, 1) {
return nil
}
return errors.Errorf("incorrect number of incoming (%d) or outgoing (%d) connections", incoming, outgoing)
})
}
示例6: TestClientGossipMetrics
// TestClientGossipMetrics verifies a that gossip stats are generated.
func TestClientGossipMetrics(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
if err := local.AddInfo("local-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
if err := remote.AddInfo("remote-key", nil, time.Hour); err != nil {
t.Fatal(err)
}
gossipSucceedsSoon(
t, stopper, make(chan *client, 2),
map[*client]*Gossip{
newClient(log.AmbientContext{}, local.GetNodeAddr(), remote.nodeMetrics): remote,
},
func() error {
// Infos/Bytes Sent/Received should not be zero.
for i, s := range []*server{local.server, remote.server} {
for _, counter := range []*metric.Counter{
s.nodeMetrics.InfosSent,
s.nodeMetrics.InfosReceived,
s.nodeMetrics.BytesSent,
s.nodeMetrics.BytesReceived,
} {
if count := counter.Count(); count <= 0 {
return errors.Errorf("%d: expected metrics counter %q > 0; = %d", i, counter.GetName(), count)
}
}
}
// Since there are two gossip nodes, there should be exactly one incoming
// or outgoing connection due to gossip's connection de-duplication.
for i, g := range []*Gossip{local, remote} {
g.mu.Lock()
defer g.mu.Unlock()
count := int64(0)
for _, gauge := range []*metric.Gauge{g.mu.incoming.gauge, g.outgoing.gauge} {
if gauge == nil {
return errors.Errorf("%d: missing gauge", i)
}
count += gauge.Value()
}
const expected = 1
if count != expected {
return errors.Errorf("%d: expected metrics incoming + outgoing connection count == %d; = %d", i, expected, count)
}
}
return nil
})
}
示例7: startFakeServerGossips
// startFakeServerGossips creates local gossip instances and remote
// faked gossip instance. The remote gossip instance launches its
// faked gossip service just for check the client message.
func startFakeServerGossips(
t *testing.T, localNodeID roachpb.NodeID,
) (*Gossip, *fakeGossipServer, *stop.Stopper) {
stopper := stop.NewStopper()
lRPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
lserver := rpc.NewServer(lRPCContext)
local := NewTest(localNodeID, lRPCContext, lserver, nil, stopper, metric.NewRegistry())
lln, err := netutil.ListenAndServeGRPC(stopper, lserver, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
local.start(lln.Addr())
rRPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
rserver := rpc.NewServer(rRPCContext)
rln, err := netutil.ListenAndServeGRPC(stopper, rserver, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
remote := newFakeGossipServer(rserver, stopper)
addr := rln.Addr()
remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String())
return local, remote, stopper
}
示例8: TestClientForwardUnresolved
// TestClientForwardUnresolved verifies that a client does not resolve a forward
// address prematurely.
func TestClientForwardUnresolved(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
const nodeID = 1
local := startGossip(nodeID, stopper, t, metric.NewRegistry())
addr := local.GetNodeAddr()
client := newClient(log.AmbientContext{}, addr, makeMetrics()) // never started
newAddr := util.UnresolvedAddr{
NetworkField: "tcp",
AddressField: "localhost:2345",
}
reply := &Response{
NodeID: nodeID,
Addr: *addr,
AlternateNodeID: nodeID + 1,
AlternateAddr: &newAddr,
}
if err := client.handleResponse(
context.TODO(), local, reply,
); !testutils.IsError(err, "received forward") {
t.Fatal(err)
}
if !proto.Equal(client.forwardAddr, &newAddr) {
t.Fatalf("unexpected forward address %v, expected %v", client.forwardAddr, &newAddr)
}
}
示例9: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
nc := &base.NodeIDContainer{}
ambient.AddLogTag("n", nc)
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
ltc.Stopper.AddCloser(ltc.Eng)
ltc.Stores = storage.NewStores(ambient, ltc.Clock)
ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
cfg := storage.TestStoreConfig()
if ltc.RangeRetryOptions != nil {
cfg.RangeRetryOptions = *ltc.RangeRetryOptions
}
cfg.AmbientCtx = ambient
cfg.Clock = ltc.Clock
cfg.DB = ltc.DB
cfg.Gossip = ltc.Gossip
cfg.Transport = transport
cfg.MetricsSampleInterval = metric.TestSampleInterval
ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
nc.Set(context.TODO(), nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
示例10: TestGossipCullNetwork
// TestGossipCullNetwork verifies that a client will be culled from
// the network periodically (at cullInterval duration intervals).
func TestGossipCullNetwork(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
local.SetCullInterval(5 * time.Millisecond)
local.mu.Lock()
for i := 0; i < minPeers; i++ {
peer := startGossip(roachpb.NodeID(i+2), stopper, t, metric.NewRegistry())
local.startClient(peer.GetNodeAddr())
}
local.mu.Unlock()
const slowGossipDuration = time.Minute
if err := util.RetryForDuration(slowGossipDuration, func() error {
if peers := len(local.Outgoing()); peers != minPeers {
return errors.Errorf("%d of %d peers connected", peers, minPeers)
}
return nil
}); err != nil {
t.Fatalf("condition failed to evaluate within %s: %s", slowGossipDuration, err)
}
local.manage()
if err := util.RetryForDuration(slowGossipDuration, func() error {
// Verify that a client is closed within the cull interval.
if peers := len(local.Outgoing()); peers != minPeers-1 {
return errors.Errorf("%d of %d peers connected", peers, minPeers-1)
}
return nil
}); err != nil {
t.Fatalf("condition failed to evaluate within %s: %s", slowGossipDuration, err)
}
}
示例11: TestClientRetryBootstrap
// TestClientRetryBootstrap verifies that an initial failure to connect
// to a bootstrap host doesn't stall the bootstrapping process in the
// absence of any additional activity. This can happen during acceptance
// tests if the DNS can't lookup hostnames when gossip is started.
func TestClientRetryBootstrap(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
local := startGossip(1, stopper, t, metric.NewRegistry())
remote := startGossip(2, stopper, t, metric.NewRegistry())
if err := local.AddInfo("local-key", []byte("hello"), 0*time.Second); err != nil {
t.Fatal(err)
}
local.SetBootstrapInterval(10 * time.Millisecond)
local.SetResolvers([]resolver.Resolver{
&testResolver{addr: remote.GetNodeAddr().String(), numFails: 3, numSuccesses: 1},
})
local.bootstrap()
local.manage()
util.SucceedsSoon(t, func() error {
_, err := remote.GetInfo("local-key")
return err
})
}
示例12: newRaftTransportTestContext
func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext {
rttc := &raftTransportTestContext{
t: t,
stopper: stop.NewStopper(),
transports: map[roachpb.NodeID]*storage.RaftTransport{},
}
rttc.nodeRPCContext = rpc.NewContext(
log.AmbientContext{}, testutils.NewNodeTestBaseContext(), nil, rttc.stopper,
)
server := rpc.NewServer(rttc.nodeRPCContext) // never started
rttc.gossip = gossip.NewTest(
1, rttc.nodeRPCContext, server, nil, rttc.stopper, metric.NewRegistry(),
)
return rttc
}
示例13: TestClientRegisterWithInitNodeID
// TestClientRegisterInitNodeID verifies two client's gossip request with NodeID 0.
func TestClientRegisterWithInitNodeID(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
// Create three gossip nodes, and connect to the first with NodeID 0.
var g []*Gossip
var gossipAddr string
for i := 0; i < 3; i++ {
RPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
server := rpc.NewServer(RPCContext)
ln, err := netutil.ListenAndServeGRPC(stopper, server, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
// Connect to the first gossip node.
if gossipAddr == "" {
gossipAddr = ln.Addr().String()
}
var resolvers []resolver.Resolver
resolver, err := resolver.NewResolver(gossipAddr)
if err != nil {
t.Fatal(err)
}
resolvers = append(resolvers, resolver)
// node ID must be non-zero
gnode := NewTest(
roachpb.NodeID(i+1), RPCContext, server, resolvers, stopper, metric.NewRegistry(),
)
g = append(g, gnode)
gnode.Start(ln.Addr())
}
util.SucceedsSoon(t, func() error {
// The first gossip node should have two gossip client address
// in nodeMap if these three gossip nodes registered success.
g[0].mu.Lock()
defer g[0].mu.Unlock()
if a, e := len(g[0].mu.nodeMap), 2; a != e {
return errors.Errorf("expected %s to contain %d nodes, got %d", g[0].mu.nodeMap, e, a)
}
return nil
})
}
示例14: MustGetSQLNetworkCounter
// MustGetSQLNetworkCounter implements TestServerInterface.
func (ts *TestServer) MustGetSQLNetworkCounter(name string) int64 {
var c int64
var found bool
reg := metric.NewRegistry()
reg.AddMetricStruct(ts.pgServer.Metrics())
reg.Each(func(n string, v interface{}) {
if name == n {
c = v.(*metric.Counter).Count()
found = true
}
})
if !found {
panic(fmt.Sprintf("couldn't find metric %s", name))
}
return c
}
示例15: TestGossipInfoStore
// TestGossipInfoStore verifies operation of gossip instance infostore.
func TestGossipInfoStore(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
rpcContext := newInsecureRPCContext(stopper)
g := NewTest(1, rpcContext, rpc.NewServer(rpcContext), nil, stopper, metric.NewRegistry())
slice := []byte("b")
if err := g.AddInfo("s", slice, time.Hour); err != nil {
t.Fatal(err)
}
if val, err := g.GetInfo("s"); !bytes.Equal(val, slice) || err != nil {
t.Errorf("error fetching string: %v", err)
}
if _, err := g.GetInfo("s2"); err == nil {
t.Errorf("expected error fetching nonexistent key \"s2\"")
}
}