本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/tracing.NewTracer函数的典型用法代码示例。如果您正苦于以下问题:Golang NewTracer函数的具体用法?Golang NewTracer怎么用?Golang NewTracer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewTracer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestTxnCoordSenderErrorWithIntent
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
txn := ba.Txn.Clone()
txn.Writing = true
pErr := roachpb.NewError(roachpb.NewTransactionRetryError())
pErr.SetTxn(&txn)
return nil, pErr
}), clock, false, tracing.NewTracer(), stopper)
defer stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
if _, pErr := ts.Send(context.Background(), ba); !testutils.IsPError(pErr, "retry txn") {
t.Fatalf("unexpected error: %v", pErr)
}
defer teardownHeartbeats(ts)
ts.Lock()
defer ts.Unlock()
if len(ts.txns) != 1 {
t.Fatalf("expected transaction to be tracked")
}
}
示例2: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = ts.stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, ts.Gossip())
tds := kv.NewTxnCoordSender(ds, ts.Clock(), ts.Ctx.Linearizable, tracing.NewTracer(),
ts.stopper, kv.NewTxnMetrics(metric.NewRegistry()))
for _, sk := range tc.splitKeys {
if err := ts.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(tds, nil, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
reply, err := client.SendWrapped(tds, nil, scan)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
}
}
示例3: TestSendToOneClient
// TestSendToOneClient verifies that Send correctly sends a request
// to one server using the heartbeat RPC.
func TestSendToOneClient(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
ctx := newNodeTestContext(nil, stopper)
s, ln := newTestServer(t, ctx)
registerBatch(t, s, 0)
sp := tracing.NewTracer().StartSpan("node test")
defer sp.Finish()
opts := SendOptions{
Ordering: orderStable,
SendNextTimeout: 1 * time.Second,
Timeout: 10 * time.Second,
Trace: sp,
}
reply, err := sendBatch(opts, []net.Addr{ln.Addr()}, ctx)
if err != nil {
t.Fatal(err)
}
if reply == nil {
t.Errorf("expected reply")
}
}
示例4: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br := ba.CreateReply()
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
br.Txn.Writing = true
return br, nil
}), clock, false, tracing.NewTracer(), stopper, NewTxnMetrics(metric.NewRegistry()))
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例5: NewDistSender
// NewDistSender returns a batch.Sender instance which connects to the
// Cockroach cluster via the supplied gossip instance. Supplying a
// DistSenderContext or the fields within is optional. For omitted values, sane
// defaults will be used.
func NewDistSender(ctx *DistSenderContext, gossip *gossip.Gossip) *DistSender {
if ctx == nil {
ctx = &DistSenderContext{}
}
clock := ctx.Clock
if clock == nil {
clock = hlc.NewClock(hlc.UnixNano)
}
ds := &DistSender{
clock: clock,
gossip: gossip,
}
if ctx.nodeDescriptor != nil {
atomic.StorePointer(&ds.nodeDescriptor, unsafe.Pointer(ctx.nodeDescriptor))
}
rcSize := ctx.RangeDescriptorCacheSize
if rcSize <= 0 {
rcSize = defaultRangeDescriptorCacheSize
}
rdb := ctx.RangeDescriptorDB
if rdb == nil {
rdb = ds
}
ds.rangeCache = newRangeDescriptorCache(rdb, int(rcSize))
lcSize := ctx.LeaderCacheSize
if lcSize <= 0 {
lcSize = defaultLeaderCacheSize
}
ds.leaderCache = newLeaderCache(int(lcSize))
if ctx.RangeLookupMaxRanges <= 0 {
ds.rangeLookupMaxRanges = defaultRangeLookupMaxRanges
}
if ctx.TransportFactory != nil {
ds.transportFactory = ctx.TransportFactory
}
ds.rpcRetryOptions = base.DefaultRetryOptions()
if ctx.RPCRetryOptions != nil {
ds.rpcRetryOptions = *ctx.RPCRetryOptions
}
if ctx.RPCContext != nil {
ds.rpcContext = ctx.RPCContext
if ds.rpcRetryOptions.Closer == nil {
ds.rpcRetryOptions.Closer = ds.rpcContext.Stopper.ShouldDrain()
}
}
if ctx.Tracer != nil {
ds.Tracer = ctx.Tracer
} else {
ds.Tracer = tracing.NewTracer()
}
if ctx.SendNextTimeout != 0 {
ds.sendNextTimeout = ctx.SendNextTimeout
} else {
ds.sendNextTimeout = defaultSendNextTimeout
}
return ds
}
示例6: TestRetryableError
// TestRetryableError verifies that Send returns a retryable error
// when it hits an RPC error.
func TestRetryableError(t *testing.T) {
defer leaktest.AfterTest(t)()
clientStopper := stop.NewStopper()
defer clientStopper.Stop()
clientContext := newNodeTestContext(nil, clientStopper)
serverStopper := stop.NewStopper()
serverContext := newNodeTestContext(nil, serverStopper)
s, ln := newTestServer(t, serverContext)
roachpb.RegisterInternalServer(s, Node(0))
conn, err := clientContext.GRPCDial(ln.Addr().String())
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
waitForConnState := func(desiredState grpc.ConnectivityState) {
clientState, err := conn.State()
for clientState != desiredState {
if err != nil {
t.Fatal(err)
}
if clientState == grpc.Shutdown {
t.Fatalf("%v has unexpectedly shut down", conn)
}
clientState, err = conn.WaitForStateChange(ctx, clientState)
}
}
// Wait until the client becomes healthy and shut down the server.
waitForConnState(grpc.Ready)
serverStopper.Stop()
// Wait until the client becomes unhealthy.
waitForConnState(grpc.TransientFailure)
sp := tracing.NewTracer().StartSpan("node test")
defer sp.Finish()
opts := SendOptions{
Ordering: orderStable,
SendNextTimeout: 100 * time.Millisecond,
Timeout: 100 * time.Millisecond,
Trace: sp,
}
if _, err := sendBatch(opts, []net.Addr{ln.Addr()}, clientContext); err != nil {
retryErr, ok := err.(retry.Retryable)
if !ok {
t.Fatalf("Unexpected error type: %v", err)
}
if !retryErr.CanRetry() {
t.Errorf("Expected retryable error: %v", retryErr)
}
} else {
t.Fatalf("Unexpected success")
}
}
示例7: setupMetricsTest
// setupMetricsTest returns a TxnCoordSender and ManualClock pointing to a newly created
// LocalTestCluster. Also returns a cleanup function to be executed at the end of the
// test.
func setupMetricsTest(t *testing.T) (*hlc.ManualClock, *TxnCoordSender, func()) {
s, testSender := createTestDB(t)
reg := metric.NewRegistry()
txnMetrics := NewTxnMetrics(reg)
sender := NewTxnCoordSender(testSender.wrapped, s.Clock, false, tracing.NewTracer(), s.Stopper, txnMetrics)
return s.Manual, sender, func() {
teardownHeartbeats(sender)
s.Stop()
}
}
示例8: setupMetricsTest
// setupMetricsTest returns a TxnCoordSender and ManualClock pointing to a newly created
// LocalTestCluster. Also returns a cleanup function to be executed at the end of the
// test.
func setupMetricsTest(t *testing.T) (*hlc.ManualClock, *TxnCoordSender, func()) {
s, testSender := createTestDB(t)
txnMetrics := MakeTxnMetrics()
ctx := tracing.WithTracer(context.Background(), tracing.NewTracer())
sender := NewTxnCoordSender(ctx, testSender.wrapped, s.Clock, false, s.Stopper, txnMetrics)
return s.Manual, sender, func() {
teardownHeartbeats(sender)
s.Stop()
}
}
示例9: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
ctx.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCtx := makeTestContext()
g := gossip.New(
context.Background(),
nodeRPCContext,
grpcServer,
serverCtx.GossipBootstrapResolvers,
stopper,
metric.NewRegistry())
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(ln.Addr())
}
ctx.Gossip = g
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(&kv.DistSenderConfig{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
ctx.Ctx = tracing.WithTracer(context.Background(), tracing.NewTracer())
sender := kv.NewTxnCoordSender(ctx.Ctx, distSender, ctx.Clock, false, stopper,
kv.MakeTxnMetrics())
ctx.DB = client.NewDB(sender)
ctx.Transport = storage.NewDummyRaftTransport()
node := NewNode(ctx, status.NewMetricsRecorder(ctx.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例10: TestTxnCoordSenderErrorWithIntent
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
testCases := []struct {
roachpb.Error
errMsg string
}{
{*roachpb.NewError(roachpb.NewTransactionRetryError()), "retry txn"},
{*roachpb.NewError(roachpb.NewTransactionPushError(roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
ID: uuid.NewV4(),
}})), "failed to push"},
{*roachpb.NewErrorf("testError"), "testError"},
}
for i, test := range testCases {
func() {
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
txn := ba.Txn.Clone()
txn.Writing = true
pErr := &roachpb.Error{}
*pErr = test.Error
pErr.SetTxn(&txn)
return nil, pErr
}
ctx := tracing.WithTracer(context.Background(), tracing.NewTracer())
ts := NewTxnCoordSender(ctx, senderFn(senderFunc), clock, false, stopper, MakeTxnMetrics())
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if !testutils.IsPError(pErr, test.errMsg) {
t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr)
}
defer teardownHeartbeats(ts)
ts.Lock()
defer ts.Unlock()
if len(ts.txns) != 1 {
t.Errorf("%d: expected transaction to be tracked", i)
}
}()
}
}
示例11: setupMetricsTest
// setupMetricsTest returns a TxnCoordSender and ManualClock pointing to a newly created
// LocalTestCluster. Also returns a cleanup function to be executed at the end of the
// test.
func setupMetricsTest(t *testing.T) (*hlc.ManualClock, *TxnCoordSender, func()) {
s := createTestDB(t)
reg := metric.NewRegistry()
txnMetrics := NewTxnMetrics(reg)
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
sender := NewTxnCoordSender(s.distSender, clock, false, tracing.NewTracer(), s.Stopper, txnMetrics)
return manual, sender, func() {
teardownHeartbeats(sender)
s.Stop()
}
}
示例12: NewDistSender
// NewDistSender returns a batch.Sender instance which connects to the
// Cockroach cluster via the supplied gossip instance. Supplying a
// DistSenderContext or the fields within is optional. For omitted values, sane
// defaults will be used.
func NewDistSender(ctx *DistSenderContext, gossip *gossip.Gossip) *DistSender {
if ctx == nil {
ctx = &DistSenderContext{}
}
clock := ctx.Clock
if clock == nil {
clock = hlc.NewClock(hlc.UnixNano)
}
ds := &DistSender{
clock: clock,
gossip: gossip,
}
if ctx.nodeDescriptor != nil {
atomic.StorePointer(&ds.nodeDescriptor, unsafe.Pointer(ctx.nodeDescriptor))
}
rcSize := ctx.RangeDescriptorCacheSize
if rcSize <= 0 {
rcSize = defaultRangeDescriptorCacheSize
}
rdb := ctx.RangeDescriptorDB
if rdb == nil {
rdb = ds
}
ds.rangeCache = newRangeDescriptorCache(rdb, int(rcSize))
lcSize := ctx.LeaderCacheSize
if lcSize <= 0 {
lcSize = defaultLeaderCacheSize
}
ds.leaderCache = newLeaderCache(int(lcSize))
if ctx.RangeLookupMaxRanges <= 0 {
ds.rangeLookupMaxRanges = defaultRangeLookupMaxRanges
}
ds.rpcSend = send
if ctx.RPCSend != nil {
ds.rpcSend = ctx.RPCSend
}
if ctx.RPCContext != nil {
ds.rpcContext = ctx.RPCContext
}
ds.rpcRetryOptions = defaultRPCRetryOptions
if ctx.RPCRetryOptions != nil {
ds.rpcRetryOptions = *ctx.RPCRetryOptions
}
if ctx.Tracer != nil {
ds.Tracer = ctx.Tracer
} else {
ds.Tracer = tracing.NewTracer()
}
return ds
}
示例13: TestRetryableError
// TestRetryableError verifies that Send returns a retryable error
// when it hits an RPC error.
func TestRetryableError(t *testing.T) {
defer leaktest.AfterTest(t)()
clientStopper := stop.NewStopper()
defer clientStopper.Stop()
clientContext := newNodeTestContext(nil, clientStopper)
clientContext.HeartbeatTimeout = 10 * clientContext.HeartbeatInterval
serverStopper := stop.NewStopper()
serverContext := newNodeTestContext(nil, serverStopper)
s, ln := newTestServer(t, serverContext)
registerBatch(t, s, 0)
c := rpc.NewClient(ln.Addr(), clientContext)
// Wait until the client becomes healthy and shut down the server.
<-c.Healthy()
serverStopper.Stop()
// Wait until the client becomes unhealthy.
func() {
for r := retry.Start(retry.Options{}); r.Next(); {
select {
case <-c.Healthy():
case <-time.After(1 * time.Nanosecond):
return
}
}
}()
sp := tracing.NewTracer().StartSpan("node test")
defer sp.Finish()
opts := SendOptions{
Ordering: orderStable,
SendNextTimeout: 100 * time.Millisecond,
Timeout: 100 * time.Millisecond,
Trace: sp,
}
if _, err := sendBatch(opts, []net.Addr{ln.Addr()}, clientContext); err != nil {
retryErr, ok := err.(retry.Retryable)
if !ok {
t.Fatalf("Unexpected error type: %v", err)
}
if !retryErr.CanRetry() {
t.Errorf("Expected retryable error: %v", retryErr)
}
} else {
t.Fatalf("Unexpected success")
}
}
示例14: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(nodeRPCContext)
grpcServer := grpc.NewServer()
tlsConfig, err := nodeRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), addr, tlsConfig)
if err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers, stopper)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(grpcServer, ln.Addr())
}
ctx.Gossip = g
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
tracer := tracing.NewTracer()
sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = storage.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
ctx.Tracer = tracer
node := NewNode(ctx, metric.NewRegistry(), stopper, nil)
return rpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例15: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Context, initSender InitSenderFn) {
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
tracer := tracing.NewTracer()
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(
context.Background(), rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)
ltc.Stores = storage.NewStores(ltc.Clock)
ltc.Sender = initSender(nodeDesc, tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
ctx := storage.TestStoreContext()
if ltc.RangeRetryOptions != nil {
ctx.RangeRetryOptions = *ltc.RangeRetryOptions
}
ctx.Ctx = tracing.WithTracer(context.Background(), tracer)
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Gossip.SetNodeID(nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}