本文整理汇总了Golang中github.com/cockroachdb/cockroach/testutils.NewNodeTestBaseContext函数的典型用法代码示例。如果您正苦于以下问题:Golang NewNodeTestBaseContext函数的具体用法?Golang NewNodeTestBaseContext怎么用?Golang NewNodeTestBaseContext使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewNodeTestBaseContext函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
ltc.lSender = newRetryableLocalSender(NewLocalSender())
ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper)
ltc.DB = client.NewDB(ltc.Sender)
transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
ltc.Stopper.AddCloser(transport)
ctx := storage.TestStoreContext
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.lSender.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
}
示例2: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
nodeDesc := &proto.NodeDescriptor{NodeID: 1}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20, ltc.Stopper)
ltc.localSender = NewLocalSender()
var rpcSend rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr,
getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
_ *rpc.Context) ([]gogoproto.Message, error) {
// TODO(tschottdorf): remove getReply().
br, pErr := ltc.localSender.Send(context.Background(), *getArgs(nil).(*proto.BatchRequest))
if br == nil {
br = &proto.BatchResponse{}
}
if br.Error != nil {
panic(proto.ErrorUnexpectedlySet(ltc.localSender, br))
}
br.Error = pErr
return []gogoproto.Message{br}, nil
}
ltc.distSender = NewDistSender(&DistSenderContext{
Clock: ltc.Clock,
RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
RangeLookupMaxRanges: defaultRangeLookupMaxRanges,
LeaderCacheSize: defaultLeaderCacheSize,
RPCRetryOptions: &defaultRPCRetryOptions,
nodeDescriptor: nodeDesc,
RPCSend: rpcSend, // defined above
RangeDescriptorDB: ltc.localSender, // for descriptor lookup
}, ltc.Gossip)
ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
ltc.DB = client.NewDB(ltc.Sender)
transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
ltc.Stopper.AddCloser(transport)
ctx := storage.TestStoreContext
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.localSender.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
}
示例3: createTestDBWithContext
func createTestDBWithContext(
t testing.TB, dbCtx client.DBContext,
) (*localtestcluster.LocalTestCluster, *TxnCoordSender) {
s := &localtestcluster.LocalTestCluster{
DBContext: &dbCtx,
}
s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
return s, s.Sender.(*TxnCoordSender)
}
示例4: newNodeTestContext
// newNodeTestContext returns a rpc.Context for testing.
// It is meant to be used by nodes.
func newNodeTestContext(clock *hlc.Clock, stopper *stop.Stopper) *Context {
if clock == nil {
clock = hlc.NewClock(hlc.UnixNano)
}
ctx := NewContext(testutils.NewNodeTestBaseContext(), clock, stopper)
ctx.heartbeatInterval = 10 * time.Millisecond
ctx.heartbeatTimeout = 2 * defaultHeartbeatInterval
return ctx
}
示例5: TestHTTPSenderSend
// TestHTTPSenderSend verifies sending posts.
func TestHTTPSenderSend(t *testing.T) {
defer leaktest.AfterTest(t)
server, addr := startTestHTTPServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Make sure SSL certs were properly specified.
authenticationHook, err := security.AuthenticationHook(false /* !insecure */, r.TLS)
if err != nil {
t.Error(err)
}
if r.Method != "POST" {
t.Errorf("expected method POST; got %s", r.Method)
}
if r.URL.Path != KVDBEndpoint+"Put" {
t.Errorf("expected url %s; got %s", KVDBEndpoint+"Put", r.URL.Path)
}
// Unmarshal the request.
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("unexpected error reading body: %s", err)
}
args := &proto.PutRequest{}
if err := util.UnmarshalRequest(r, reqBody, args, util.AllEncodings); err != nil {
t.Errorf("unexpected error unmarshalling request: %s", err)
}
// Validate request against incoming user.
if err := authenticationHook(args, false /*not public*/); err != nil {
t.Error(err)
}
if !args.Key.Equal(testPutReq.Key) || !args.Timestamp.Equal(testPutReq.Timestamp) {
t.Errorf("expected parsed %+v to equal %+v", args, testPutReq)
}
body, contentType, err := util.MarshalResponse(r, testPutResp, util.AllEncodings)
if err != nil {
t.Errorf("failed to marshal response: %s", err)
}
w.Header().Set(util.ContentTypeHeader, contentType)
w.Write(body)
}))
defer server.Close()
sender, err := newHTTPSender(addr, testutils.NewNodeTestBaseContext(), defaultRetryOptions)
if err != nil {
t.Fatal(err)
}
reply := &proto.PutResponse{}
sender.Send(context.Background(), proto.Call{Args: testPutReq, Reply: reply})
if reply.GoError() != nil {
t.Errorf("expected success; got %s", reply.GoError())
}
if !reply.Timestamp.Equal(testPutResp.Timestamp) {
t.Errorf("expected received %+v to equal %+v", reply, testPutResp)
}
}
示例6: newRaftTransportTestContext
func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext {
rttc := &raftTransportTestContext{
t: t,
stopper: stop.NewStopper(),
transports: map[roachpb.NodeID]*storage.RaftTransport{},
}
rttc.nodeRPCContext = rpc.NewContext(testutils.NewNodeTestBaseContext(), nil, rttc.stopper)
server := rpc.NewServer(rttc.nodeRPCContext) // never started
rttc.gossip = gossip.New(rttc.nodeRPCContext, server, nil, rttc.stopper, metric.NewRegistry())
rttc.gossip.SetNodeID(1)
return rttc
}
示例7: TestHTTPSenderRetryHTTPSendError
// TestHTTPSenderRetryHTTPSendError verifies that send is retried
// on all errors sending HTTP requests.
func TestHTTPSenderRetryHTTPSendError(t *testing.T) {
defer leaktest.AfterTest(t)
retryOptions := defaultRetryOptions
retryOptions.InitialBackoff = 1 * time.Millisecond
testCases := []func(*httptest.Server, http.ResponseWriter){
// Send back an unparseable response but a success code on first try.
func(s *httptest.Server, w http.ResponseWriter) {
fmt.Fprintf(w, "\xff\xfe\x23\x44")
},
// Close the client connection.
func(s *httptest.Server, w http.ResponseWriter) {
s.CloseClientConnections()
},
}
for i, testFunc := range testCases {
count := 0
var s *httptest.Server
server, addr := startTestHTTPServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
count++
if count == 1 {
// On first retry, invoke the error function.
testFunc(s, w)
return
}
// Success on second try.
body, contentType, err := util.MarshalResponse(r, testPutResp, util.AllEncodings)
if err != nil {
t.Errorf("%d: failed to marshal response: %s", i, err)
}
w.Header().Set(util.ContentTypeHeader, contentType)
w.Write(body)
}))
s = server
sender, err := newHTTPSender(addr, testutils.NewNodeTestBaseContext(), retryOptions)
if err != nil {
t.Fatal(err)
}
reply := &proto.PutResponse{}
sender.Send(context.Background(), proto.Call{Args: testPutReq, Reply: reply})
if reply.GoError() != nil {
t.Errorf("%d: expected success; got %s", i, reply.GoError())
}
if count != 2 {
t.Errorf("%d: expected retry", i)
}
server.Close()
}
}
示例8: startTestHTTPServer
func startTestHTTPServer(handler http.Handler) (*httptest.Server, string) {
ctx := testutils.NewNodeTestBaseContext()
httpServer := httptest.NewUnstartedServer(handler)
tlsConfig, err := ctx.GetServerTLSConfig()
if err != nil {
log.Fatal(err)
}
httpServer.TLS = tlsConfig
httpServer.StartTLS()
addr := httpServer.Listener.Addr().String()
return httpServer, addr
}
示例9: checkConcurrency
// checkConcurrency creates a history verifier, starts a new database
// and runs the verifier.
func checkConcurrency(
name string,
isolations []enginepb.IsolationType,
txns []string,
verify *verifier,
t *testing.T,
) {
verifier := newHistoryVerifier(name, txns, verify, t)
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions = correctnessTestRetryOptions
s := &localtestcluster.LocalTestCluster{
DBContext: &dbCtx,
RangeRetryOptions: &correctnessTestRetryOptions,
}
s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
defer s.Stop()
verifier.run(isolations, s.DB, t)
}
示例10: benchmarkSingleRoundtripWithLatency
// benchmarkSingleRoundtripWithLatency runs a number of transactions writing to
// the same key back to back in a single round-trip. Latency is simulated
// by pausing before each RPC sent.
func benchmarkSingleRoundtripWithLatency(b *testing.B, latency time.Duration) {
s := &localtestcluster.LocalTestCluster{}
s.Latency = latency
s.Start(b, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
defer s.Stop()
defer b.StopTimer()
key := roachpb.Key("key")
b.ResetTimer()
for i := 0; i < b.N; i++ {
if tErr := s.DB.Txn(func(txn *client.Txn) error {
b := txn.NewBatch()
b.Put(key, fmt.Sprintf("value-%d", i))
return txn.CommitInBatch(b)
}); tErr != nil {
b.Fatal(tErr)
}
}
}
示例11: TestSendAndReceive
func TestSendAndReceive(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
nodeRPCContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), nil, stopper)
g := gossip.New(nodeRPCContext, nil, stopper)
g.SetNodeID(roachpb.NodeID(1))
// Create several servers, each of which has two stores (A raft
// node ID addresses a store). Node 1 has stores 1 and 2, node 2 has
// stores 3 and 4, etc.
//
// We suppose that range 1 is replicated across the odd-numbered
// stores in reverse order to ensure that the various IDs are not
// equal: replica 1 is store 5, replica 2 is store 3, and replica 3
// is store 1.
const numNodes = 3
const storesPerNode = 2
nextNodeID := roachpb.NodeID(2)
nextStoreID := roachpb.StoreID(2)
// Per-node state.
transports := map[roachpb.NodeID]*storage.RaftTransport{}
// Per-store state.
storeNodes := map[roachpb.StoreID]roachpb.NodeID{}
channels := map[roachpb.StoreID]channelServer{}
replicaIDs := map[roachpb.StoreID]roachpb.ReplicaID{
1: 3,
3: 2,
5: 1,
}
messageTypes := []raftpb.MessageType{
raftpb.MsgSnap,
raftpb.MsgHeartbeat,
}
for nodeIndex := 0; nodeIndex < numNodes; nodeIndex++ {
nodeID := nextNodeID
nextNodeID++
grpcServer := rpc.NewServer(nodeRPCContext)
ln, err := util.ListenAndServeGRPC(stopper, grpcServer, util.TestAddr)
if err != nil {
t.Fatal(err)
}
addr := ln.Addr()
// Have to call g.SetNodeID before call g.AddInfo.
g.ResetNodeID(roachpb.NodeID(nodeID))
if err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID),
&roachpb.NodeDescriptor{
Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
},
time.Hour); err != nil {
t.Fatal(err)
}
transport := storage.NewRaftTransport(storage.GossipAddressResolver(g), grpcServer, nodeRPCContext)
transports[nodeID] = transport
for storeIndex := 0; storeIndex < storesPerNode; storeIndex++ {
storeID := nextStoreID
nextStoreID++
storeNodes[storeID] = nodeID
channel := newChannelServer(numNodes*storesPerNode*len(messageTypes), 0)
transport.Listen(storeID, channel.RaftMessage)
channels[storeID] = channel
}
}
messageTypeCounts := make(map[roachpb.StoreID]map[raftpb.MessageType]int)
// Each store sends one snapshot and one heartbeat to each store, including
// itself.
for toStoreID, toNodeID := range storeNodes {
if _, ok := messageTypeCounts[toStoreID]; !ok {
messageTypeCounts[toStoreID] = make(map[raftpb.MessageType]int)
}
for fromStoreID, fromNodeID := range storeNodes {
baseReq := storage.RaftMessageRequest{
Message: raftpb.Message{
From: uint64(fromStoreID),
To: uint64(toStoreID),
},
FromReplica: roachpb.ReplicaDescriptor{
NodeID: fromNodeID,
StoreID: fromStoreID,
},
ToReplica: roachpb.ReplicaDescriptor{
NodeID: toNodeID,
StoreID: toStoreID,
},
}
for _, messageType := range messageTypes {
req := baseReq
//.........这里部分代码省略.........
示例12: TestInOrderDelivery
// TestInOrderDelivery verifies that for a given pair of nodes, raft
// messages are delivered in order.
func TestInOrderDelivery(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
nodeRPCContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), nil, stopper)
g := gossip.New(nodeRPCContext, nil, stopper)
grpcServer := rpc.NewServer(nodeRPCContext)
ln, err := util.ListenAndServeGRPC(stopper, grpcServer, util.TestAddr)
if err != nil {
t.Fatal(err)
}
const numMessages = 100
nodeID := roachpb.NodeID(roachpb.NodeID(2))
serverTransport := storage.NewRaftTransport(storage.GossipAddressResolver(g), grpcServer, nodeRPCContext)
serverChannel := newChannelServer(numMessages, 10*time.Millisecond)
serverTransport.Listen(roachpb.StoreID(nodeID), serverChannel.RaftMessage)
addr := ln.Addr()
// Have to set gossip.NodeID before call gossip.AddInofXXX.
g.SetNodeID(nodeID)
if err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID),
&roachpb.NodeDescriptor{
Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
},
time.Hour); err != nil {
t.Fatal(err)
}
clientNodeID := roachpb.NodeID(2)
clientTransport := storage.NewRaftTransport(storage.GossipAddressResolver(g), nil, nodeRPCContext)
for i := 0; i < numMessages; i++ {
req := &storage.RaftMessageRequest{
GroupID: 1,
Message: raftpb.Message{
To: uint64(nodeID),
From: uint64(clientNodeID),
Commit: uint64(i),
},
ToReplica: roachpb.ReplicaDescriptor{
NodeID: nodeID,
StoreID: roachpb.StoreID(nodeID),
ReplicaID: roachpb.ReplicaID(nodeID),
},
FromReplica: roachpb.ReplicaDescriptor{
NodeID: clientNodeID,
StoreID: roachpb.StoreID(clientNodeID),
ReplicaID: roachpb.ReplicaID(clientNodeID),
},
}
if err := clientTransport.Send(req); err != nil {
t.Errorf("failed to send message %d: %s", i, err)
}
}
for i := 0; i < numMessages; i++ {
req := <-serverChannel.ch
if req.Message.Commit != uint64(i) {
t.Errorf("messages out of order: got %d while expecting %d", req.Message.Commit, i)
}
}
}
示例13: createTestDB
// createTestDB creates a local test server and starts it. The caller
// is responsible for stopping the test server.
func createTestDB(t testing.TB) (*localtestcluster.LocalTestCluster, *TxnCoordSender) {
s := &localtestcluster.LocalTestCluster{}
s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
return s, s.Sender.(*TxnCoordSender)
}
示例14: TestUseCerts
// This is a fairly high-level test of CA and node certificates.
// We construct SSL server and clients and use the generated certs.
func TestUseCerts(t *testing.T) {
defer leaktest.AfterTest(t)()
// Do not mock cert access for this test.
security.ResetReadFileFn()
defer ResetTest()
certsDir := util.CreateTempDir(t, "certs_test")
defer util.CleanupDir(certsDir)
err := security.RunCreateCACert(
filepath.Join(certsDir, security.EmbeddedCACert),
filepath.Join(certsDir, security.EmbeddedCAKey),
512)
if err != nil {
t.Fatalf("Expected success, got %v", err)
}
err = security.RunCreateNodeCert(
filepath.Join(certsDir, security.EmbeddedCACert),
filepath.Join(certsDir, security.EmbeddedCAKey),
filepath.Join(certsDir, security.EmbeddedNodeCert),
filepath.Join(certsDir, security.EmbeddedNodeKey),
512, []string{"127.0.0.1"})
if err != nil {
t.Fatalf("Expected success, got %v", err)
}
err = security.RunCreateClientCert(
filepath.Join(certsDir, security.EmbeddedCACert),
filepath.Join(certsDir, security.EmbeddedCAKey),
filepath.Join(certsDir, security.EmbeddedRootCert),
filepath.Join(certsDir, security.EmbeddedRootKey),
512, security.RootUser)
if err != nil {
t.Fatalf("Expected success, got %v", err)
}
// Load TLS Configs. This is what TestServer and HTTPClient do internally.
_, err = security.LoadServerTLSConfig(
filepath.Join(certsDir, security.EmbeddedCACert),
filepath.Join(certsDir, security.EmbeddedNodeCert),
filepath.Join(certsDir, security.EmbeddedNodeKey))
if err != nil {
t.Fatalf("Expected success, got %v", err)
}
_, err = security.LoadClientTLSConfig(
filepath.Join(certsDir, security.EmbeddedCACert),
filepath.Join(certsDir, security.EmbeddedNodeCert),
filepath.Join(certsDir, security.EmbeddedNodeKey))
if err != nil {
t.Fatalf("Expected success, got %v", err)
}
// Start a test server and override certs.
// We use a real context since we want generated certs.
params := base.TestServerArgs{
SSLCA: filepath.Join(certsDir, security.EmbeddedCACert),
SSLCert: filepath.Join(certsDir, security.EmbeddedNodeCert),
SSLCertKey: filepath.Join(certsDir, security.EmbeddedNodeKey),
}
s, _, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
// Insecure mode.
clientContext := testutils.NewNodeTestBaseContext()
clientContext.Insecure = true
httpClient, err := clientContext.GetHTTPClient()
if err != nil {
t.Fatal(err)
}
req, err := http.NewRequest("GET", s.AdminURL()+"/_admin/v1/health", nil)
if err != nil {
t.Fatalf("could not create request: %v", err)
}
resp, err := httpClient.Do(req)
if err == nil {
resp.Body.Close()
t.Fatalf("Expected SSL error, got success")
}
// Secure mode but no Certs: permissive config.
clientContext = testutils.NewNodeTestBaseContext()
clientContext.Insecure = false
clientContext.SSLCert = ""
httpClient, err = clientContext.GetHTTPClient()
if err != nil {
t.Fatal(err)
}
// Endpoint that does not enforce client auth (see: server/authentication_test.go)
req, err = http.NewRequest("GET", s.AdminURL()+"/_admin/v1/health", nil)
if err != nil {
t.Fatalf("could not create request: %v", err)
}
resp, err = httpClient.Do(req)
if err != nil {
t.Fatalf("Expected success, got %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
//.........这里部分代码省略.........
示例15: newNodeTestContext
// newNodeTestContext returns a rpc.Context for testing.
// It is meant to be used by nodes.
func newNodeTestContext(clock *hlc.Clock, stopper *stop.Stopper) *rpc.Context {
ctx := rpc.NewContext(testutils.NewNodeTestBaseContext(), clock, stopper)
ctx.HeartbeatInterval = 10 * time.Millisecond
ctx.HeartbeatTimeout = 5 * time.Second
return ctx
}