本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/rpc.NewServer函数的典型用法代码示例。如果您正苦于以下问题:Golang NewServer函数的具体用法?Golang NewServer怎么用?Golang NewServer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewServer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: startFakeServerGossips
// startFakeServerGossips creates local gossip instances and remote
// faked gossip instance. The remote gossip instance launches its
// faked gossip service just for check the client message.
func startFakeServerGossips(
t *testing.T, localNodeID roachpb.NodeID,
) (*Gossip, *fakeGossipServer, *stop.Stopper) {
stopper := stop.NewStopper()
lRPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
lserver := rpc.NewServer(lRPCContext)
local := NewTest(localNodeID, lRPCContext, lserver, nil, stopper, metric.NewRegistry())
lln, err := netutil.ListenAndServeGRPC(stopper, lserver, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
local.start(lln.Addr())
rRPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
rserver := rpc.NewServer(rRPCContext)
rln, err := netutil.ListenAndServeGRPC(stopper, rserver, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
remote := newFakeGossipServer(rserver, stopper)
addr := rln.Addr()
remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String())
return local, remote, stopper
}
示例2: startGossipAtAddr
func startGossipAtAddr(
nodeID roachpb.NodeID,
addr net.Addr,
stopper *stop.Stopper,
t *testing.T,
registry *metric.Registry,
) *Gossip {
rpcContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
server := rpc.NewServer(rpcContext)
g := NewTest(nodeID, rpcContext, server, nil, stopper, registry)
ln, err := netutil.ListenAndServeGRPC(stopper, server, addr)
if err != nil {
t.Fatal(err)
}
addr = ln.Addr()
if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{
NodeID: nodeID,
Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
}); err != nil {
t.Fatal(err)
}
g.start(addr)
time.Sleep(time.Millisecond)
return g
}
示例3: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.StoreConfig{}
stopper := stop.NewStopper()
cfg.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
cfg.Gossip = gossip.NewTest(
0,
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry(),
)
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
cfg.Gossip.SetResolvers([]resolver.Resolver{r})
cfg.Gossip.Start(ln.Addr())
}
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, cfg.Gossip)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
sender := kv.NewTxnCoordSender(
cfg.AmbientCtx,
distSender,
cfg.Clock,
false,
stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
示例4: newTestServer
func newTestServer(t *testing.T, ctx *rpc.Context) (*grpc.Server, net.Listener) {
s := rpc.NewServer(ctx)
ln, err := netutil.ListenAndServeGRPC(ctx.Stopper, s, util.TestAddr)
if err != nil {
t.Fatal(err)
}
return s, ln
}
示例5: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
nc := &base.NodeIDContainer{}
ambient.AddLogTag("n", nc)
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
ltc.Stopper.AddCloser(ltc.Eng)
ltc.Stores = storage.NewStores(ambient, ltc.Clock)
ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
cfg := storage.TestStoreConfig()
if ltc.RangeRetryOptions != nil {
cfg.RangeRetryOptions = *ltc.RangeRetryOptions
}
cfg.AmbientCtx = ambient
cfg.Clock = ltc.Clock
cfg.DB = ltc.DB
cfg.Gossip = ltc.Gossip
cfg.Transport = transport
cfg.MetricsSampleInterval = metric.TestSampleInterval
ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
nc.Set(context.TODO(), nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
示例6: newRaftTransportTestContext
func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext {
rttc := &raftTransportTestContext{
t: t,
stopper: stop.NewStopper(),
transports: map[roachpb.NodeID]*storage.RaftTransport{},
}
rttc.nodeRPCContext = rpc.NewContext(
log.AmbientContext{}, testutils.NewNodeTestBaseContext(), nil, rttc.stopper,
)
server := rpc.NewServer(rttc.nodeRPCContext) // never started
rttc.gossip = gossip.NewTest(
1, rttc.nodeRPCContext, server, nil, rttc.stopper, metric.NewRegistry(),
)
return rttc
}
示例7: TestClientRegisterWithInitNodeID
// TestClientRegisterInitNodeID verifies two client's gossip request with NodeID 0.
func TestClientRegisterWithInitNodeID(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
// Create three gossip nodes, and connect to the first with NodeID 0.
var g []*Gossip
var gossipAddr string
for i := 0; i < 3; i++ {
RPCContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper)
server := rpc.NewServer(RPCContext)
ln, err := netutil.ListenAndServeGRPC(stopper, server, util.IsolatedTestAddr)
if err != nil {
t.Fatal(err)
}
// Connect to the first gossip node.
if gossipAddr == "" {
gossipAddr = ln.Addr().String()
}
var resolvers []resolver.Resolver
resolver, err := resolver.NewResolver(gossipAddr)
if err != nil {
t.Fatal(err)
}
resolvers = append(resolvers, resolver)
// node ID must be non-zero
gnode := NewTest(
roachpb.NodeID(i+1), RPCContext, server, resolvers, stopper, metric.NewRegistry(),
)
g = append(g, gnode)
gnode.Start(ln.Addr())
}
util.SucceedsSoon(t, func() error {
// The first gossip node should have two gossip client address
// in nodeMap if these three gossip nodes registered success.
g[0].mu.Lock()
defer g[0].mu.Unlock()
if a, e := len(g[0].mu.nodeMap), 2; a != e {
return errors.Errorf("expected %s to contain %d nodes, got %d", g[0].mu.nodeMap, e, a)
}
return nil
})
}
示例8: AddNodeWithoutGossip
// AddNodeWithoutGossip registers a node with the cluster. Nodes must
// be added before they can be used in other methods of
// raftTransportTestContext. Unless you are testing the effects of
// delaying gossip, use AddNode instead.
func (rttc *raftTransportTestContext) AddNodeWithoutGossip(
nodeID roachpb.NodeID,
) (*storage.RaftTransport, net.Addr) {
grpcServer := rpc.NewServer(rttc.nodeRPCContext)
ln, err := netutil.ListenAndServeGRPC(rttc.stopper, grpcServer, util.TestAddr)
if err != nil {
rttc.t.Fatal(err)
}
transport := storage.NewRaftTransport(
log.AmbientContext{},
storage.GossipAddressResolver(rttc.gossip),
grpcServer,
rttc.nodeRPCContext,
)
rttc.transports[nodeID] = transport
return transport, ln.Addr()
}
示例9: TestGossipOverwriteNode
// TestGossipOverwriteNode verifies that if a new node is added with the same
// address as an old node, that old node is removed from the cluster.
func TestGossipOverwriteNode(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
rpcContext := newInsecureRPCContext(stopper)
g := NewTest(1, rpcContext, rpc.NewServer(rpcContext), nil, stopper, metric.NewRegistry())
node1 := &roachpb.NodeDescriptor{NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "1.1.1.1:1")}
node2 := &roachpb.NodeDescriptor{NodeID: 2, Address: util.MakeUnresolvedAddr("tcp", "2.2.2.2:2")}
if err := g.SetNodeDescriptor(node1); err != nil {
t.Fatal(err)
}
if err := g.SetNodeDescriptor(node2); err != nil {
t.Fatal(err)
}
if val, err := g.GetNodeDescriptor(node1.NodeID); err != nil {
t.Error(err)
} else if val.NodeID != node1.NodeID {
t.Errorf("expected node %d, got %+v", node1.NodeID, val)
}
if val, err := g.GetNodeDescriptor(node2.NodeID); err != nil {
t.Error(err)
} else if val.NodeID != node2.NodeID {
t.Errorf("expected node %d, got %+v", node2.NodeID, val)
}
// Give node3 the same address as node1, which should cause node1 to be
// removed from the cluster.
node3 := &roachpb.NodeDescriptor{NodeID: 3, Address: node1.Address}
if err := g.SetNodeDescriptor(node3); err != nil {
t.Fatal(err)
}
if val, err := g.GetNodeDescriptor(node3.NodeID); err != nil {
t.Error(err)
} else if val.NodeID != node3.NodeID {
t.Errorf("expected node %d, got %+v", node3.NodeID, val)
}
// Quiesce the stopper now to ensure that the update has propagated before
// checking whether node 1 has been removed from the infoStore.
stopper.Quiesce()
expectedErr := "unable to look up descriptor for node"
if val, err := g.GetNodeDescriptor(node1.NodeID); !testutils.IsError(err, expectedErr) {
t.Errorf("expected error %q fetching node %d; got error %v and node %+v",
expectedErr, node1.NodeID, err, val)
}
}
示例10: TestGossipInfoStore
// TestGossipInfoStore verifies operation of gossip instance infostore.
func TestGossipInfoStore(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
rpcContext := newInsecureRPCContext(stopper)
g := NewTest(1, rpcContext, rpc.NewServer(rpcContext), nil, stopper, metric.NewRegistry())
slice := []byte("b")
if err := g.AddInfo("s", slice, time.Hour); err != nil {
t.Fatal(err)
}
if val, err := g.GetInfo("s"); !bytes.Equal(val, slice) || err != nil {
t.Errorf("error fetching string: %v", err)
}
if _, err := g.GetInfo("s2"); err == nil {
t.Errorf("expected error fetching nonexistent key \"s2\"")
}
}
示例11: CreateNode
// CreateNode creates a simulation node and starts an RPC server for it.
func (n *Network) CreateNode() (*Node, error) {
server := rpc.NewServer(n.rpcContext)
ln, err := net.Listen(util.TestAddr.Network(), util.TestAddr.String())
if err != nil {
return nil, err
}
node := &Node{Server: server, Listener: ln, Registry: metric.NewRegistry()}
node.Gossip = gossip.NewTest(0, n.rpcContext, server, nil, n.Stopper, node.Registry)
n.Stopper.RunWorker(func() {
<-n.Stopper.ShouldQuiesce()
netutil.FatalIfUnexpected(ln.Close())
<-n.Stopper.ShouldStop()
server.Stop()
node.Gossip.EnableSimulationCycler(false)
})
n.Nodes = append(n.Nodes, node)
return node, nil
}
示例12: createTestStorePool
// createTestStorePool creates a stopper, gossip and storePool for use in
// tests. Stopper must be stopped by the caller.
func createTestStorePool(
timeUntilStoreDead time.Duration,
) (*stop.Stopper, *gossip.Gossip, *hlc.ManualClock, *StorePool) {
stopper := stop.NewStopper()
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
rpcContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, clock, stopper)
server := rpc.NewServer(rpcContext) // never started
g := gossip.NewTest(1, rpcContext, server, nil, stopper, metric.NewRegistry())
storePool := NewStorePool(
log.AmbientContext{},
g,
clock,
rpcContext,
timeUntilStoreDead,
stopper,
)
return stopper, g, mc, storePool
}
示例13: createTestStorePool
// createTestStorePool creates a stopper, gossip and storePool for use in
// tests. Stopper must be stopped by the caller.
func createTestStorePool(
timeUntilStoreDead time.Duration, deterministic bool, defaultNodeLiveness bool,
) (*stop.Stopper, *gossip.Gossip, *hlc.ManualClock, *StorePool, *mockNodeLiveness) {
stopper := stop.NewStopper()
mc := hlc.NewManualClock(123)
clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, clock, stopper)
server := rpc.NewServer(rpcContext) // never started
g := gossip.NewTest(1, rpcContext, server, nil, stopper, metric.NewRegistry())
mnl := newMockNodeLiveness(defaultNodeLiveness)
storePool := NewStorePool(
log.AmbientContext{},
g,
clock,
mnl.nodeLivenessFunc,
timeUntilStoreDead,
deterministic,
)
return stopper, g, mc, storePool, mnl
}
示例14: TestGossipGetNextBootstrapAddress
func TestGossipGetNextBootstrapAddress(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
resolverSpecs := []string{
"127.0.0.1:9000",
"127.0.0.1:9001",
"localhost:9004",
}
resolvers := []resolver.Resolver{}
for _, rs := range resolverSpecs {
resolver, err := resolver.NewResolver(rs)
if err == nil {
resolvers = append(resolvers, resolver)
}
}
if len(resolvers) != 3 {
t.Errorf("expected 3 resolvers; got %d", len(resolvers))
}
server := rpc.NewServer(
rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper),
)
g := NewTest(0, nil, server, resolvers, stop.NewStopper(), metric.NewRegistry())
// Using specified resolvers, fetch bootstrap addresses 3 times
// and verify the results match expected addresses.
expAddresses := []string{
"127.0.0.1:9000",
"127.0.0.1:9001",
"localhost:9004",
}
for i := 0; i < len(expAddresses); i++ {
if addr := g.getNextBootstrapAddress(); addr == nil {
t.Errorf("%d: unexpected nil addr when expecting %s", i, expAddresses[i])
} else if addrStr := addr.String(); addrStr != expAddresses[i] {
t.Errorf("%d: expected addr %s; got %s", i, expAddresses[i], addrStr)
}
}
}
示例15: createCluster
// createCluster generates a new cluster using the provided stopper and the
// number of nodes supplied. Each node will have one store to start.
func createCluster(
stopper *stop.Stopper,
nodeCount int,
epochWriter, actionWriter io.Writer,
script Script,
rand *rand.Rand,
) *Cluster {
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, clock, stopper)
server := rpc.NewServer(rpcContext)
// We set the node ID to MaxInt32 for the cluster Gossip instance to prevent
// conflicts with real node IDs.
g := gossip.NewTest(math.MaxInt32, rpcContext, server, nil, stopper, metric.NewRegistry())
// Set the store pool to deterministic so that a run with the exact same
// input will always produce the same output.
storePool := storage.NewStorePool(
log.AmbientContext{},
g,
clock,
rpcContext,
storage.TestTimeUntilStoreDeadOff,
stopper,
/* deterministic */ true,
)
c := &Cluster{
stopper: stopper,
clock: clock,
rpc: rpcContext,
gossip: g,
storePool: storePool,
allocator: storage.MakeAllocator(storePool, storage.AllocatorOptions{
AllowRebalance: true,
}),
storeGossiper: gossiputil.NewStoreGossiper(g),
nodes: make(map[roachpb.NodeID]*Node),
stores: make(map[roachpb.StoreID]*Store),
ranges: make(map[roachpb.RangeID]*Range),
rangeIDsByStore: make(map[roachpb.StoreID]roachpb.RangeIDSlice),
rand: rand,
epochWriter: tabwriter.NewWriter(epochWriter, 8, 1, 2, ' ', 0),
actionWriter: tabwriter.NewWriter(actionWriter, 8, 1, 2, ' ', 0),
script: script,
epoch: -1,
}
// Add the nodes.
for i := 0; i < nodeCount; i++ {
c.addNewNodeWithStore()
}
// Add a single range and add to this first node's first store.
firstRange := c.addRange()
firstRange.addReplica(c.stores[0])
c.calculateRangeIDsByStore()
// Output the first epoch header.
c.epoch = 0
c.OutputEpochHeader()
c.OutputEpoch()
c.flush()
return c
}