本文整理汇总了Golang中github.com/cockroachdb/cockroach/storage.StoreContext类的典型用法代码示例。如果您正苦于以下问题:Golang StoreContext类的具体用法?Golang StoreContext怎么用?Golang StoreContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了StoreContext类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BootstrapCluster
// BootstrapCluster bootstraps a multiple stores using the provided engines and
// cluster ID. The first bootstrapped store contains a single range spanning
// all keys. Initial range lookup metadata is populated for the range.
//
// Returns a KV client for unittest purposes. Caller should close the returned
// client.
func BootstrapCluster(clusterID string, engines []engine.Engine, stopper *stop.Stopper) (*client.DB, error) {
ctx := storage.StoreContext{}
ctx.ScanInterval = 10 * time.Minute
ctx.Clock = hlc.NewClock(hlc.UnixNano)
// Create a KV DB with a local sender.
lSender := kv.NewLocalSender()
sender := kv.NewTxnCoordSender(lSender, ctx.Clock, false, nil, stopper)
ctx.DB = client.NewDB(sender)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
for i, eng := range engines {
sIdent := roachpb.StoreIdent{
ClusterID: clusterID,
NodeID: 1,
StoreID: roachpb.StoreID(i + 1),
}
// The bootstrapping store will not connect to other nodes so its
// StoreConfig doesn't really matter.
s := storage.NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})
// Verify the store isn't already part of a cluster.
if len(s.Ident.ClusterID) > 0 {
return nil, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
}
// Bootstrap store to persist the store ident.
if err := s.Bootstrap(sIdent, stopper); err != nil {
return nil, err
}
// Create first range, writing directly to engine. Note this does
// not create the range, just its data. Only do this if this is the
// first store.
if i == 0 {
// TODO(marc): this is better than having storage/ import sql, but still
// not great. Find a better place to keep those.
initialValues := sql.GetInitialSystemValues()
if err := s.BootstrapRange(initialValues); err != nil {
return nil, err
}
}
if err := s.Start(stopper); err != nil {
return nil, err
}
lSender.AddStore(s)
// Initialize node and store ids. Only initialize the node once.
if i == 0 {
if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil {
return nil, util.Errorf("expected to initialize node id allocator to %d, got %d: %s",
sIdent.NodeID, nodeID, err)
}
}
if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil {
return nil, util.Errorf("expected to initialize store id allocator to %d, got %d: %s",
sIdent.StoreID, storeID, err)
}
}
return ctx.DB, nil
}
示例2: createTestStoreWithEngine
// createTestStoreWithEngine creates a test store using the given engine and clock.
// The caller is responsible for closing the store on exit.
func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock,
bootstrap bool, context *storage.StoreContext) (*storage.Store, *stop.Stopper) {
stopper := stop.NewStopper()
rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper)
if context == nil {
// make a copy
ctx := storage.TestStoreContext
context = &ctx
}
context.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
lSender := kv.NewLocalSender()
sender := kv.NewTxnCoordSender(lSender, clock, false, nil, stopper)
context.Clock = clock
context.DB = client.NewDB(sender)
context.Transport = multiraft.NewLocalRPCTransport(stopper)
// TODO(bdarnell): arrange to have the transport closed.
store := storage.NewStore(*context, eng, &proto.NodeDescriptor{NodeID: 1})
if bootstrap {
if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
}
lSender.AddStore(store)
if bootstrap {
if err := store.BootstrapRange(nil); err != nil {
t.Fatal(err)
}
}
if err := store.Start(stopper); err != nil {
t.Fatal(err)
}
return store, stopper
}
示例3: createTestStoreWithEngine
// createTestStoreWithEngine creates a test store using the given engine and clock.
// The caller is responsible for closing the store on exit.
func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock,
bootstrap bool, sCtx *storage.StoreContext) (*storage.Store, *stop.Stopper) {
stopper := stop.NewStopper()
rpcContext := rpc.NewContext(&base.Context{}, clock, stopper)
if sCtx == nil {
// make a copy
ctx := storage.TestStoreContext
sCtx = &ctx
}
nodeDesc := &proto.NodeDescriptor{NodeID: 1}
sCtx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
localSender := kv.NewLocalSender()
rpcSend := func(_ rpc.Options, _ string, _ []net.Addr,
getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
_ *rpc.Context) ([]gogoproto.Message, error) {
call := proto.Call{
Args: getArgs(nil /* net.Addr */).(proto.Request),
Reply: getReply().(proto.Response),
}
localSender.Send(context.Background(), call)
return []gogoproto.Message{call.Reply}, call.Reply.Header().GoError()
}
// Mostly makes sure that we don't see a warning per request.
{
if err := sCtx.Gossip.AddInfoProto(gossip.MakeNodeIDKey(nodeDesc.NodeID), nodeDesc, time.Hour); err != nil {
t.Fatal(err)
}
if err := sCtx.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatal(err)
}
}
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: clock,
RPCSend: rpcSend, // defined above
RangeDescriptorDB: localSender, // for descriptor lookup
}, sCtx.Gossip)
sender := kv.NewTxnCoordSender(distSender, clock, false, nil, stopper)
sCtx.Clock = clock
sCtx.DB = client.NewDB(sender)
sCtx.Transport = multiraft.NewLocalRPCTransport(stopper)
// TODO(bdarnell): arrange to have the transport closed.
store := storage.NewStore(*sCtx, eng, nodeDesc)
if bootstrap {
if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
}
localSender.AddStore(store)
if bootstrap {
if err := store.BootstrapRange(sql.GetInitialSystemValues()); err != nil {
t.Fatal(err)
}
}
if err := store.Start(stopper); err != nil {
t.Fatal(err)
}
return store, stopper
}
示例4: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(addr, nodeRPCContext)
if err := rpcServer.Start(); err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS == addr {
gossipBS = rpcServer.Addr()
}
g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)})
g.Start(rpcServer, stopper)
}
ctx.Gossip = g
sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
node := NewNode(ctx)
return rpcServer, ctx.Clock, node, stopper
}
示例5: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
ctx.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCtx := makeTestContext()
g := gossip.New(
context.Background(),
nodeRPCContext,
grpcServer,
serverCtx.GossipBootstrapResolvers,
stopper,
metric.NewRegistry())
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(ln.Addr())
}
ctx.Gossip = g
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(&kv.DistSenderConfig{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
ctx.Ctx = tracing.WithTracer(context.Background(), tracing.NewTracer())
sender := kv.NewTxnCoordSender(ctx.Ctx, distSender, ctx.Clock, false, stopper,
kv.MakeTxnMetrics())
ctx.DB = client.NewDB(sender)
ctx.Transport = storage.NewDummyRaftTransport()
node := NewNode(ctx, status.NewMetricsRecorder(ctx.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例6: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(nodeRPCContext)
grpcServer := grpc.NewServer()
tlsConfig, err := nodeRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), addr, tlsConfig)
if err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers, stopper)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(grpcServer, ln.Addr())
}
ctx.Gossip = g
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
tracer := tracing.NewTracer()
sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = storage.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
ctx.Tracer = tracer
node := NewNode(ctx, metric.NewRegistry(), stopper, nil)
return rpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例7: makeContext
func (m *multiTestContext) makeContext(i int) storage.StoreContext {
var ctx storage.StoreContext
if m.storeContext != nil {
ctx = *m.storeContext
} else {
ctx = storage.TestStoreContext
}
ctx.Clock = m.clocks[i]
ctx.DB = m.db
ctx.Gossip = m.gossip
ctx.StorePool = m.storePool
ctx.Transport = m.transport
ctx.EventFeed = m.feed
return ctx
}
示例8: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(nodeRPCContext)
tlsConfig, err := nodeRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
ln, err := util.ListenAndServe(stopper, rpcServer, addr, tlsConfig)
if err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS == addr {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(rpcServer, ln.Addr(), stopper)
}
ctx.Gossip = g
sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock, RPCContext: nodeRPCContext}, g)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = storage.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
node := NewNode(ctx, metric.NewRegistry(), stopper)
return rpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例9: bootstrapCluster
// bootstrapCluster bootstraps a multiple stores using the provided
// engines and cluster ID. The first bootstrapped store contains a
// single range spanning all keys. Initial range lookup metadata is
// populated for the range. Returns the cluster ID.
func bootstrapCluster(engines []engine.Engine) (uuid.UUID, error) {
clusterID := uuid.MakeV4()
stopper := stop.NewStopper()
defer stopper.Stop()
ctx := storage.StoreContext{}
ctx.ScanInterval = 10 * time.Minute
ctx.Clock = hlc.NewClock(hlc.UnixNano)
ctx.Tracer = tracing.NewTracer()
// Create a KV DB with a local sender.
stores := storage.NewStores(ctx.Clock)
sender := kv.NewTxnCoordSender(stores, ctx.Clock, false, ctx.Tracer, stopper)
ctx.DB = client.NewDB(sender)
ctx.Transport = storage.NewLocalRPCTransport(stopper)
for i, eng := range engines {
sIdent := roachpb.StoreIdent{
ClusterID: clusterID,
NodeID: 1,
StoreID: roachpb.StoreID(i + 1),
}
// The bootstrapping store will not connect to other nodes so its
// StoreConfig doesn't really matter.
s := storage.NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})
// Verify the store isn't already part of a cluster.
if s.Ident.ClusterID != *uuid.EmptyUUID {
return uuid.UUID{}, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
}
// Bootstrap store to persist the store ident.
if err := s.Bootstrap(sIdent, stopper); err != nil {
return uuid.UUID{}, err
}
// Create first range, writing directly to engine. Note this does
// not create the range, just its data. Only do this if this is the
// first store.
if i == 0 {
initialValues := GetBootstrapSchema().GetInitialValues()
if err := s.BootstrapRange(initialValues); err != nil {
return uuid.UUID{}, err
}
}
if err := s.Start(stopper); err != nil {
return uuid.UUID{}, err
}
stores.AddStore(s)
// Initialize node and store ids. Only initialize the node once.
if i == 0 {
if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil {
return uuid.UUID{}, util.Errorf("expected to initialize node id allocator to %d, got %d: %s",
sIdent.NodeID, nodeID, err)
}
}
if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil {
return uuid.UUID{}, util.Errorf("expected to initialize store id allocator to %d, got %d: %s",
sIdent.StoreID, storeID, err)
}
}
return clusterID, nil
}
示例10: NewServer
//.........这里部分代码省略.........
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}
s.distSender = kv.NewDistSender(&distSenderCfg, s.gossip)
txnMetrics := kv.MakeTxnMetrics()
s.registry.AddMetricStruct(txnMetrics)
s.txnCoordSender = kv.NewTxnCoordSender(s.Ctx(), s.distSender, s.clock, srvCtx.Linearizable,
s.stopper, txnMetrics)
s.db = client.NewDB(s.txnCoordSender)
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(s.ctx.Context, s.txnCoordSender, s.stopper)
roachpb.RegisterExternalServer(s.grpc, s.kvDB)
// Set up Lease Manager
var lmKnobs sql.LeaseManagerTestingKnobs
if srvCtx.TestingKnobs.SQLLeaseManager != nil {
lmKnobs = *srvCtx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs)
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
// Set up the DistSQL server
distSQLCfg := distsql.ServerConfig{
Context: s.Ctx(),
DB: s.db,
RPCContext: s.rpcContext,
}
s.distSQLServer = distsql.NewServer(distSQLCfg)
distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer)
// Set up Executor
execCfg := sql.ExecutorConfig{
Context: s.Ctx(),
DB: s.db,
Gossip: s.gossip,
LeaseManager: s.leaseMgr,
Clock: s.clock,
DistSQLSrv: s.distSQLServer,
}
if srvCtx.TestingKnobs.SQLExecutor != nil {
execCfg.TestingKnobs = srvCtx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs)
} else {
execCfg.TestingKnobs = &sql.ExecutorTestingKnobs{}
}
s.sqlExecutor = sql.NewExecutor(execCfg, s.stopper)
s.registry.AddMetricStruct(s.sqlExecutor)
s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor)
s.registry.AddMetricStruct(s.pgServer.Metrics())
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Ctx: s.Ctx(),
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
RaftTickInterval: s.ctx.RaftTickInterval,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval,
ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure,
StorePool: s.storePool,
SQLExecutor: sql.InternalExecutor{
LeaseManager: s.leaseMgr,
},
LogRangeEvents: true,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
},
}
if srvCtx.TestingKnobs.Store != nil {
nCtx.TestingKnobs = *srvCtx.TestingKnobs.Store.(*storage.StoreTestingKnobs)
}
s.recorder = status.NewMetricsRecorder(s.clock)
s.registry.AddMetricStruct(s.rpcContext.RemoteClocks.Metrics())
s.runtime = status.MakeRuntimeStatSampler(s.clock)
s.registry.AddMetricStruct(s.runtime)
s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr))
roachpb.RegisterInternalServer(s.grpc, s.node)
storage.RegisterStoresServer(s.grpc, s.node.storesServer)
s.tsDB = ts.NewDB(s.db)
s.tsServer = ts.MakeServer(s.tsDB)
s.admin = makeAdminServer(s)
s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores)
for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} {
gw.RegisterService(s.grpc)
}
return s, nil
}
示例11: NewServer
//.........这里部分代码省略.........
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
s.distSender = kv.NewDistSender(&kv.DistSenderContext{
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}, s.gossip)
txnMetrics := kv.NewTxnMetrics(s.registry)
sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer,
s.stopper, txnMetrics)
s.db = client.NewDB(sender)
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(s.ctx.Context, sender, s.stopper)
roachpb.RegisterExternalServer(s.grpc, s.kvDB)
// Set up Lease Manager
var lmKnobs sql.LeaseManagerTestingKnobs
if ctx.TestingKnobs.SQLLeaseManager != nil {
lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs)
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
// Set up the DistSQL server
distSQLCtx := distsql.ServerContext{
Context: context.Background(),
DB: s.db,
RPCContext: s.rpcContext,
}
s.distSQLServer = distsql.NewServer(distSQLCtx)
distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer)
// Set up Executor
eCtx := sql.ExecutorContext{
Context: context.Background(),
DB: s.db,
Gossip: s.gossip,
LeaseManager: s.leaseMgr,
Clock: s.clock,
DistSQLSrv: s.distSQLServer,
}
if ctx.TestingKnobs.SQLExecutor != nil {
eCtx.TestingKnobs = ctx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs)
} else {
eCtx.TestingKnobs = &sql.ExecutorTestingKnobs{}
}
s.sqlExecutor = sql.NewExecutor(eCtx, s.stopper, s.registry)
s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor, s.registry)
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
RaftTickInterval: s.ctx.RaftTickInterval,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
ConsistencyCheckInterval: s.ctx.ConsistencyCheckInterval,
ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure,
Tracer: s.Tracer,
StorePool: s.storePool,
SQLExecutor: sql.InternalExecutor{
LeaseManager: s.leaseMgr,
},
LogRangeEvents: true,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
},
}
if ctx.TestingKnobs.Store != nil {
nCtx.TestingKnobs = *ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs)
}
s.recorder = status.NewMetricsRecorder(s.clock)
s.rpcContext.RemoteClocks.RegisterMetrics(s.registry)
s.runtime = status.MakeRuntimeStatSampler(s.clock, s.registry)
s.node = NewNode(nCtx, s.recorder, s.registry, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr))
roachpb.RegisterInternalServer(s.grpc, s.node)
roachpb.RegisterInternalStoresServer(s.grpc, s.node.InternalStoresServer)
s.tsDB = ts.NewDB(s.db)
s.tsServer = ts.MakeServer(s.tsDB)
s.admin = makeAdminServer(s)
s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores)
for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} {
gw.RegisterService(s.grpc)
}
return s, nil
}