本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/tracing.NewTracer函数的典型用法代码示例。如果您正苦于以下问题:Golang NewTracer函数的具体用法?Golang NewTracer怎么用?Golang NewTracer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewTracer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, 20*time.Nanosecond)
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br := ba.CreateReply()
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
br.Txn.Writing = true
return br, nil
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient, senderFn(senderFunc), clock, false, stopper, MakeTxnMetrics(metric.TestSampleInterval),
)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例2: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.StoreConfig{}
stopper := stop.NewStopper()
cfg.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
cfg.Gossip = gossip.NewTest(
0,
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry(),
)
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
cfg.Gossip.SetResolvers([]resolver.Resolver{r})
cfg.Gossip.Start(ln.Addr())
}
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, cfg.Gossip)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
sender := kv.NewTxnCoordSender(
cfg.AmbientCtx,
distSender,
cfg.Clock,
false,
stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
示例3: NewDummyExecutor
// NewDummyExecutor creates an empty Executor that is used for certain tests.
func NewDummyExecutor() *Executor {
return &Executor{
cfg: ExecutorConfig{
AmbientCtx: log.AmbientContext{Tracer: tracing.NewTracer()},
},
}
}
示例4: TestTxnCoordSenderErrorWithIntent
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
testCases := []struct {
roachpb.Error
errMsg string
}{
{*roachpb.NewError(roachpb.NewTransactionRetryError()), "retry txn"},
{*roachpb.NewError(roachpb.NewTransactionPushError(roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
ID: uuid.NewV4(),
}})), "failed to push"},
{*roachpb.NewErrorf("testError"), "testError"},
}
for i, test := range testCases {
func() {
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
txn := ba.Txn.Clone()
txn.Writing = true
pErr := &roachpb.Error{}
*pErr = test.Error
pErr.SetTxn(&txn)
return nil, pErr
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient,
senderFn(senderFunc),
clock,
false,
stopper,
MakeTxnMetrics(metric.TestSampleInterval),
)
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if !testutils.IsPError(pErr, test.errMsg) {
t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr)
}
defer teardownHeartbeats(ts)
ts.Lock()
defer ts.Unlock()
if len(ts.txns) != 1 {
t.Errorf("%d: expected transaction to be tracked", i)
}
}()
}
}
示例5: setupMetricsTest
// setupMetricsTest returns a TxnCoordSender and ManualClock pointing to a newly created
// LocalTestCluster. Also returns a cleanup function to be executed at the end of the
// test.
func setupMetricsTest(t *testing.T) (*hlc.ManualClock, *TxnCoordSender, func()) {
s, testSender := createTestDB(t)
txnMetrics := MakeTxnMetrics(metric.TestSampleInterval)
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
sender := NewTxnCoordSender(ambient, testSender.wrapped, s.Clock, false, s.Stopper, txnMetrics)
return s.Manual, sender, func() {
teardownHeartbeats(sender)
s.Stop()
}
}
示例6: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
nc := &base.NodeIDContainer{}
ambient.AddLogTag("n", nc)
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
ltc.Stopper.AddCloser(ltc.Eng)
ltc.Stores = storage.NewStores(ambient, ltc.Clock)
ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
cfg := storage.TestStoreConfig()
if ltc.RangeRetryOptions != nil {
cfg.RangeRetryOptions = *ltc.RangeRetryOptions
}
cfg.AmbientCtx = ambient
cfg.Clock = ltc.Clock
cfg.DB = ltc.DB
cfg.Gossip = ltc.Gossip
cfg.Transport = transport
cfg.MetricsSampleInterval = metric.TestSampleInterval
ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
nc.Set(context.TODO(), nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
示例7: makeTestV3Conn
func makeTestV3Conn(c net.Conn) v3Conn {
metrics := makeServerMetrics(nil)
mon := mon.MakeUnlimitedMonitor(context.Background(), "test", nil, nil, 1000)
exec := sql.NewExecutor(
sql.ExecutorConfig{
AmbientCtx: log.AmbientContext{Tracer: tracing.NewTracer()},
MetricsSampleInterval: metric.TestSampleInterval,
},
nil, /* stopper */
)
return makeV3Conn(context.Background(), c, &metrics, &mon, exec)
}
示例8: TestPollSource
// TestPollSource verifies that polled data sources are called as expected.
func TestPollSource(t *testing.T) {
defer leaktest.AfterTest(t)()
tm := newTestModel(t)
tm.Start()
defer tm.Stop()
testSource := modelDataSource{
model: tm,
r: Resolution10s,
stopper: stop.NewStopper(),
datasets: [][]tspb.TimeSeriesData{
{
{
Name: "test.metric.float",
Source: "cpu01",
Datapoints: []tspb.TimeSeriesDatapoint{
datapoint(1428713843000000000, 100.0),
datapoint(1428713843000000001, 50.2),
datapoint(1428713843000000002, 90.9),
},
},
{
Name: "test.metric.float",
Source: "cpu02",
Datapoints: []tspb.TimeSeriesDatapoint{
datapoint(1428713843000000000, 900.8),
datapoint(1428713843000000001, 30.12),
datapoint(1428713843000000002, 72.324),
},
},
},
{
{
Name: "test.metric",
Datapoints: []tspb.TimeSeriesDatapoint{
datapoint(-446061360000000000, 100),
},
},
},
},
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
tm.DB.PollSource(ambient, &testSource, time.Millisecond, Resolution10s, testSource.stopper)
<-testSource.stopper.IsStopped()
if a, e := testSource.calledCount, 2; a != e {
t.Errorf("testSource was called %d times, expected %d", a, e)
}
tm.assertKeyCount(3)
tm.assertModelCorrect()
}
示例9: TestAdminAPIUsers
func TestAdminAPIUsers(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
// Create sample users.
ac := log.AmbientContext{Tracer: tracing.NewTracer()}
ctx, span := ac.AnnotateCtxWithSpan(context.Background(), "test")
defer span.Finish()
session := sql.NewSession(
ctx, sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil, &sql.MemoryMetrics{})
session.StartUnlimitedMonitor()
defer session.Finish(ts.sqlExecutor)
query := `
INSERT INTO system.users (username, hashedPassword)
VALUES ('admin', 'abc'), ('bob', 'xyz')`
res := ts.sqlExecutor.ExecuteStatements(session, query, nil)
defer res.Close()
if a, e := len(res.ResultList), 1; a != e {
t.Fatalf("len(results) %d != %d", a, e)
} else if res.ResultList[0].Err != nil {
t.Fatal(res.ResultList[0].Err)
}
// Query the API for users.
var resp serverpb.UsersResponse
if err := getAdminJSONProto(s, "users", &resp); err != nil {
t.Fatal(err)
}
expResult := serverpb.UsersResponse{
Users: []serverpb.UsersResponse_User{
{Username: "admin"},
{Username: "bob"},
},
}
// Verify results.
const sortKey = "Username"
testutils.SortStructs(resp.Users, sortKey)
testutils.SortStructs(expResult.Users, sortKey)
if !reflect.DeepEqual(resp, expResult) {
t.Fatalf("result %v != expected %v", resp, expResult)
}
}
示例10: TestTxnCoordSenderTxnUpdatedOnError
// TestTxnCoordSenderTxnUpdatedOnError verifies that errors adjust the
// response transaction's timestamp and priority as appropriate.
func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) {
defer leaktest.AfterTest(t)()
origTS := makeTS(123, 0)
plus10 := origTS.Add(10, 10)
plus20 := plus10.Add(10, 0)
testCases := []struct {
pErr *roachpb.Error
expEpoch uint32
expPri int32
expTS, expOrigTS hlc.Timestamp
nodeSeen bool
}{
{
// No error, so nothing interesting either.
pErr: nil,
expEpoch: 0,
expPri: 1,
expTS: origTS,
expOrigTS: origTS,
},
{
// On uncertainty error, new epoch begins and node is seen.
// Timestamp moves ahead of the existing write.
pErr: func() *roachpb.Error {
pErr := roachpb.NewErrorWithTxn(
roachpb.NewReadWithinUncertaintyIntervalError(hlc.ZeroTimestamp, hlc.ZeroTimestamp),
&roachpb.Transaction{})
const nodeID = 1
pErr.GetTxn().UpdateObservedTimestamp(nodeID, plus10)
pErr.OriginNode = nodeID
return pErr
}(),
expEpoch: 1,
expPri: 1,
expTS: plus10,
expOrigTS: plus10,
nodeSeen: true,
},
{
// On abort, nothing changes but we get a new priority to use for
// the next attempt.
pErr: roachpb.NewErrorWithTxn(&roachpb.TransactionAbortedError{},
&roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{Timestamp: plus20, Priority: 10},
}),
expPri: 10,
},
{
// On failed push, new epoch begins just past the pushed timestamp.
// Additionally, priority ratchets up to just below the pusher's.
pErr: roachpb.NewErrorWithTxn(&roachpb.TransactionPushError{
PusheeTxn: roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{Timestamp: plus10, Priority: int32(10)},
},
},
&roachpb.Transaction{}),
expEpoch: 1,
expPri: 9,
expTS: plus10,
expOrigTS: plus10,
},
{
// On retry, restart with new epoch, timestamp and priority.
pErr: roachpb.NewErrorWithTxn(&roachpb.TransactionRetryError{},
&roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{Timestamp: plus10, Priority: int32(10)},
},
),
expEpoch: 1,
expPri: 10,
expTS: plus10,
expOrigTS: plus10,
},
}
for i, test := range testCases {
stopper := stop.NewStopper()
manual := hlc.NewManualClock(origTS.WallTime)
clock := hlc.NewClock(manual.UnixNano, 20*time.Nanosecond)
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
var reply *roachpb.BatchResponse
if test.pErr == nil {
reply = ba.CreateReply()
}
return reply, test.pErr
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient,
senderFn(senderFunc),
clock,
false,
stopper,
MakeTxnMetrics(metric.TestSampleInterval),
)
db := client.NewDB(ts)
//.........这里部分代码省略.........
示例11: TestTxnCoordSenderNoDuplicateIntents
// TestTxnCoordSenderNoDuplicateIntents verifies that TxnCoordSender does not
// generate duplicate intents and that it merges intents for overlapping ranges.
func TestTxnCoordSenderNoDuplicateIntents(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
var expectedIntents []roachpb.Span
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (
*roachpb.BatchResponse, *roachpb.Error) {
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if !reflect.DeepEqual(et.IntentSpans, expectedIntents) {
t.Errorf("Invalid intents: %+v; expected %+v", et.IntentSpans, expectedIntents)
}
}
br := ba.CreateReply()
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
br.Txn.Writing = true
return br, nil
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient,
senderFn(senderFunc),
clock,
false,
stopper,
MakeTxnMetrics(metric.TestSampleInterval),
)
defer stopper.Stop()
defer teardownHeartbeats(ts)
db := client.NewDB(ts)
txn := client.NewTxn(context.Background(), *db)
// Write to a, b, u-w before the final batch.
pErr := txn.Put(roachpb.Key("a"), []byte("value"))
if pErr != nil {
t.Fatal(pErr)
}
pErr = txn.Put(roachpb.Key("b"), []byte("value"))
if pErr != nil {
t.Fatal(pErr)
}
pErr = txn.DelRange(roachpb.Key("u"), roachpb.Key("w"))
if pErr != nil {
t.Fatal(pErr)
}
// The final batch overwrites key a and overlaps part of the u-w range.
b := txn.NewBatch()
b.Put(roachpb.Key("b"), []byte("value"))
b.Put(roachpb.Key("c"), []byte("value"))
b.DelRange(roachpb.Key("v"), roachpb.Key("z"), false)
// The expected intents are a, b, c, and u-z.
expectedIntents = []roachpb.Span{
{Key: roachpb.Key("a"), EndKey: nil},
{Key: roachpb.Key("b"), EndKey: nil},
{Key: roachpb.Key("c"), EndKey: nil},
{Key: roachpb.Key("u"), EndKey: roachpb.Key("z")},
}
pErr = txn.CommitInBatch(b)
if pErr != nil {
t.Fatal(pErr)
}
}
示例12: testAdminAPITableDetailsInner
func testAdminAPITableDetailsInner(t *testing.T, dbName, tblName string) {
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
escDBName := parser.Name(dbName).String()
escTblName := parser.Name(tblName).String()
ac := log.AmbientContext{Tracer: tracing.NewTracer()}
ctx, span := ac.AnnotateCtxWithSpan(context.Background(), "test")
defer span.Finish()
session := sql.NewSession(
ctx, sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil, &sql.MemoryMetrics{})
session.StartUnlimitedMonitor()
defer session.Finish(ts.sqlExecutor)
setupQueries := []string{
fmt.Sprintf("CREATE DATABASE %s", escDBName),
fmt.Sprintf(`CREATE TABLE %s.%s (
nulls_allowed INT,
nulls_not_allowed INT NOT NULL DEFAULT 1000,
default2 INT DEFAULT 2,
string_default STRING DEFAULT 'default_string'
)`, escDBName, escTblName),
fmt.Sprintf("GRANT SELECT ON %s.%s TO readonly", escDBName, escTblName),
fmt.Sprintf("GRANT SELECT,UPDATE,DELETE ON %s.%s TO app", escDBName, escTblName),
fmt.Sprintf("CREATE INDEX descIdx ON %s.%s (default2 DESC)", escDBName, escTblName),
}
for _, q := range setupQueries {
res := ts.sqlExecutor.ExecuteStatements(session, q, nil)
defer res.Close()
if res.ResultList[0].Err != nil {
t.Fatalf("error executing '%s': %s", q, res.ResultList[0].Err)
}
}
// Perform API call.
var resp serverpb.TableDetailsResponse
url := fmt.Sprintf("databases/%s/tables/%s", dbName, tblName)
if err := getAdminJSONProto(s, url, &resp); err != nil {
t.Fatal(err)
}
// Verify columns.
expColumns := []serverpb.TableDetailsResponse_Column{
{Name: "nulls_allowed", Type: "INT", Nullable: true, DefaultValue: ""},
{Name: "nulls_not_allowed", Type: "INT", Nullable: false, DefaultValue: "1000"},
{Name: "default2", Type: "INT", Nullable: true, DefaultValue: "2"},
{Name: "string_default", Type: "STRING", Nullable: true, DefaultValue: "'default_string'"},
{Name: "rowid", Type: "INT", Nullable: false, DefaultValue: "unique_rowid()"},
}
testutils.SortStructs(expColumns, "Name")
testutils.SortStructs(resp.Columns, "Name")
if a, e := len(resp.Columns), len(expColumns); a != e {
t.Fatalf("# of result columns %d != expected %d (got: %#v)", a, e, resp.Columns)
}
for i, a := range resp.Columns {
e := expColumns[i]
if a.String() != e.String() {
t.Fatalf("mismatch at column %d: actual %#v != %#v", i, a, e)
}
}
// Verify grants.
expGrants := []serverpb.TableDetailsResponse_Grant{
{User: security.RootUser, Privileges: []string{"ALL"}},
{User: "app", Privileges: []string{"DELETE", "SELECT", "UPDATE"}},
{User: "readonly", Privileges: []string{"SELECT"}},
}
testutils.SortStructs(expGrants, "User")
testutils.SortStructs(resp.Grants, "User")
if a, e := len(resp.Grants), len(expGrants); a != e {
t.Fatalf("# of grant columns %d != expected %d (got: %#v)", a, e, resp.Grants)
}
for i, a := range resp.Grants {
e := expGrants[i]
sort.Strings(a.Privileges)
sort.Strings(e.Privileges)
if a.String() != e.String() {
t.Fatalf("mismatch at index %d: actual %#v != %#v", i, a, e)
}
}
// Verify indexes.
expIndexes := []serverpb.TableDetailsResponse_Index{
{Name: "primary", Column: "rowid", Direction: "ASC", Unique: true, Seq: 1},
{Name: "descIdx", Column: "default2", Direction: "DESC", Unique: false, Seq: 1},
}
testutils.SortStructs(expIndexes, "Column")
testutils.SortStructs(resp.Indexes, "Column")
for i, a := range resp.Indexes {
e := expIndexes[i]
if a.String() != e.String() {
t.Fatalf("mismatch at index %d: actual %#v != %#v", i, a, e)
}
}
// Verify range count.
if a, e := resp.RangeCount, int64(1); a != e {
//.........这里部分代码省略.........
示例13: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) {
if _, err := net.ResolveTCPAddr("tcp", cfg.AdvertiseAddr); err != nil {
return nil, errors.Errorf("unable to resolve RPC address %q: %v", cfg.AdvertiseAddr, err)
}
if cfg.AmbientCtx.Tracer == nil {
cfg.AmbientCtx.Tracer = tracing.NewTracer()
}
// Try loading the TLS configs before anything else.
if _, err := cfg.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := cfg.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano, cfg.MaxOffset),
stopper: stopper,
cfg: cfg,
}
// Add a dynamic log tag value for the node ID.
//
// We need to pass an ambient context to the various server components, but we
// won't know the node ID until we Start(). At that point it's too late to
// change the ambient contexts in the components (various background processes
// will have already started using them).
//
// NodeIDContainer allows us to add the log tag to the context now and update
// the value asynchronously. It's not significantly more expensive than a
// regular tag since it's just doing an (atomic) load when a log/trace message
// is constructed. The node ID is set by the Store if this host was
// bootstrapped; otherwise a new one is allocated in Node.
s.cfg.AmbientCtx.AddLogTag("n", &s.nodeIDContainer)
ctx := s.AnnotateCtx(context.Background())
if s.cfg.Insecure {
log.Warning(ctx, "running in insecure mode, this is strongly discouraged. See --insecure.")
}
s.rpcContext = rpc.NewContext(s.cfg.AmbientCtx, s.cfg.Config, s.clock, s.stopper)
s.rpcContext.HeartbeatCB = func() {
if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil {
log.Fatal(ctx, err)
}
}
s.grpc = rpc.NewServer(s.rpcContext)
s.registry = metric.NewRegistry()
s.gossip = gossip.New(
s.cfg.AmbientCtx,
&s.nodeIDContainer,
s.rpcContext,
s.grpc,
s.cfg.GossipBootstrapResolvers,
s.stopper,
s.registry,
)
s.storePool = storage.NewStorePool(
s.cfg.AmbientCtx,
s.gossip,
s.clock,
s.rpcContext,
s.cfg.TimeUntilStoreDead,
s.stopper,
/* deterministic */ false,
)
// A custom RetryOptions is created which uses stopper.ShouldQuiesce() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
distSenderCfg := kv.DistSenderConfig{
AmbientCtx: s.cfg.AmbientCtx,
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}
s.distSender = kv.NewDistSender(distSenderCfg, s.gossip)
txnMetrics := kv.MakeTxnMetrics(s.cfg.MetricsSampleInterval)
s.registry.AddMetricStruct(txnMetrics)
s.txnCoordSender = kv.NewTxnCoordSender(
s.cfg.AmbientCtx,
s.distSender,
s.clock,
s.cfg.Linearizable,
s.stopper,
//.........这里部分代码省略.........
示例14: bootstrapCluster
// bootstrapCluster bootstraps a multiple stores using the provided
// engines and cluster ID. The first bootstrapped store contains a
// single range spanning all keys. Initial range lookup metadata is
// populated for the range. Returns the cluster ID.
func bootstrapCluster(engines []engine.Engine, txnMetrics kv.TxnMetrics) (uuid.UUID, error) {
clusterID := uuid.MakeV4()
stopper := stop.NewStopper()
defer stopper.Stop()
cfg := storage.StoreConfig{}
cfg.ScanInterval = 10 * time.Minute
cfg.MetricsSampleInterval = time.Duration(math.MaxInt64)
cfg.ConsistencyCheckInterval = 10 * time.Minute
cfg.Clock = hlc.NewClock(hlc.UnixNano)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
// Create a KV DB with a local sender.
stores := storage.NewStores(cfg.AmbientCtx, cfg.Clock)
sender := kv.NewTxnCoordSender(cfg.AmbientCtx, stores, cfg.Clock, false, stopper, txnMetrics)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
for i, eng := range engines {
sIdent := roachpb.StoreIdent{
ClusterID: clusterID,
NodeID: FirstNodeID,
StoreID: roachpb.StoreID(i + 1),
}
// The bootstrapping store will not connect to other nodes so its
// StoreConfig doesn't really matter.
s := storage.NewStore(cfg, eng, &roachpb.NodeDescriptor{NodeID: FirstNodeID})
// Verify the store isn't already part of a cluster.
if s.Ident.ClusterID != *uuid.EmptyUUID {
return uuid.UUID{}, errors.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
}
// Bootstrap store to persist the store ident.
if err := s.Bootstrap(sIdent); err != nil {
return uuid.UUID{}, err
}
// Create first range, writing directly to engine. Note this does
// not create the range, just its data. Only do this if this is the
// first store.
if i == 0 {
initialValues := GetBootstrapSchema().GetInitialValues()
if err := s.BootstrapRange(initialValues); err != nil {
return uuid.UUID{}, err
}
}
if err := s.Start(context.Background(), stopper); err != nil {
return uuid.UUID{}, err
}
stores.AddStore(s)
ctx := context.TODO()
// Initialize node and store ids. Only initialize the node once.
if i == 0 {
if nodeID, err := allocateNodeID(ctx, cfg.DB); nodeID != sIdent.NodeID || err != nil {
return uuid.UUID{}, errors.Errorf("expected to initialize node id allocator to %d, got %d: %s",
sIdent.NodeID, nodeID, err)
}
}
if storeID, err := allocateStoreIDs(ctx, sIdent.NodeID, 1, cfg.DB); storeID != sIdent.StoreID || err != nil {
return uuid.UUID{}, errors.Errorf("expected to initialize store id allocator to %d, got %d: %s",
sIdent.StoreID, storeID, err)
}
}
return clusterID, nil
}
示例15: TestClusterFlow
func TestClusterFlow(t *testing.T) {
defer leaktest.AfterTest(t)()
const numRows = 100
args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual}
tc := serverutils.StartTestCluster(t, 3, args)
defer tc.Stopper().Stop()
sumDigitsFn := func(row int) parser.Datum {
sum := 0
for row > 0 {
sum += row % 10
row /= 10
}
return parser.NewDInt(parser.DInt(sum))
}
sqlutils.CreateTable(t, tc.ServerConn(0), "t",
"num INT PRIMARY KEY, digitsum INT, numstr STRING, INDEX s (digitsum)",
numRows,
sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn))
kvDB := tc.Server(0).KVClient().(*client.DB)
desc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
makeIndexSpan := func(start, end int) TableReaderSpan {
var span roachpb.Span
prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID))
span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...)
span.EndKey = append(span.EndKey, prefix...)
span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...)
return TableReaderSpan{Span: span}
}
// Set up table readers on three hosts feeding data into a join reader on
// the third host. This is a basic test for the distributed flow
// infrastructure, including local and remote streams.
//
// Note that the ranges won't necessarily be local to the table readers, but
// that doesn't matter for the purposes of this test.
// Start a span (useful to look at spans using Lighstep).
sp, err := tracing.JoinOrNew(tracing.NewTracer(), nil, "cluster test")
if err != nil {
t.Fatal(err)
}
ctx := opentracing.ContextWithSpan(context.Background(), sp)
defer sp.Finish()
tr1 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(0, 8)},
}
tr2 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(8, 12)},
}
tr3 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(12, 100)},
}
jr := JoinReaderSpec{
Table: *desc,
OutputColumns: []uint32{2},
}
txn := client.NewTxn(ctx, *kvDB)
fid := FlowID{uuid.MakeV4()}
req1 := &SetupFlowRequest{Txn: txn.Proto}
req1.Flow = FlowSpec{
FlowID: fid,
Processors: []ProcessorSpec{{
Core: ProcessorCoreUnion{TableReader: &tr1},
Output: []OutputRouterSpec{{
Type: OutputRouterSpec_MIRROR,
Streams: []StreamEndpointSpec{
{StreamID: 0, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}},
},
}},
}},
}
req2 := &SetupFlowRequest{Txn: txn.Proto}
req2.Flow = FlowSpec{
FlowID: fid,
Processors: []ProcessorSpec{{
Core: ProcessorCoreUnion{TableReader: &tr2},
Output: []OutputRouterSpec{{
Type: OutputRouterSpec_MIRROR,
Streams: []StreamEndpointSpec{
{StreamID: 1, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}},
//.........这里部分代码省略.........