本文整理匯總了Golang中github.com/cockroachdb/cockroach/kv.NewDistSender函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewDistSender函數的具體用法?Golang NewDistSender怎麽用?Golang NewDistSender使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewDistSender函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestMultiRangeScanReverseScanInconsistent
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)
s, db := setupMultipleRanges(t, "b")
defer s.Stop()
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := []string{"a", "b"}
ts := []time.Time{}
for i, key := range keys {
b := &client.Batch{}
b.Put(key, "value")
if err := db.Run(b); err != nil {
t.Fatal(err)
}
ts = append(ts, b.Results[0].Rows[0].Timestamp())
log.Infof("%d: %s", i, b.Results[0].Rows[0].Timestamp())
}
// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
// it does the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())
// Scan.
sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
reply, err := client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, sa)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
// ReverseScan.
rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
reply, err = client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rsa)
if err != nil {
t.Fatal(err)
}
rsr := reply.(*roachpb.ReverseScanResponse)
if l := len(rsr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(rsr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
示例2: createTestStoreWithEngine
// createTestStoreWithEngine creates a test store using the given engine and clock.
// The caller is responsible for closing the store on exit.
func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock,
bootstrap bool, sCtx *storage.StoreContext) (*storage.Store, *stop.Stopper) {
stopper := stop.NewStopper()
rpcContext := rpc.NewContext(&base.Context{}, clock, stopper)
if sCtx == nil {
// make a copy
ctx := storage.TestStoreContext
sCtx = &ctx
}
nodeDesc := &proto.NodeDescriptor{NodeID: 1}
sCtx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
localSender := kv.NewLocalSender()
rpcSend := func(_ rpc.Options, _ string, _ []net.Addr,
getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
_ *rpc.Context) ([]gogoproto.Message, error) {
call := proto.Call{
Args: getArgs(nil /* net.Addr */).(proto.Request),
Reply: getReply().(proto.Response),
}
localSender.Send(context.Background(), call)
return []gogoproto.Message{call.Reply}, call.Reply.Header().GoError()
}
// Mostly makes sure that we don't see a warning per request.
{
if err := sCtx.Gossip.AddInfoProto(gossip.MakeNodeIDKey(nodeDesc.NodeID), nodeDesc, time.Hour); err != nil {
t.Fatal(err)
}
if err := sCtx.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatal(err)
}
}
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: clock,
RPCSend: rpcSend, // defined above
RangeDescriptorDB: localSender, // for descriptor lookup
}, sCtx.Gossip)
sender := kv.NewTxnCoordSender(distSender, clock, false, nil, stopper)
sCtx.Clock = clock
sCtx.DB = client.NewDB(sender)
sCtx.Transport = multiraft.NewLocalRPCTransport(stopper)
// TODO(bdarnell): arrange to have the transport closed.
store := storage.NewStore(*sCtx, eng, nodeDesc)
if bootstrap {
if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
}
localSender.AddStore(store)
if bootstrap {
if err := store.BootstrapRange(sql.GetInitialSystemValues()); err != nil {
t.Fatal(err)
}
}
if err := store.Start(stopper); err != nil {
t.Fatal(err)
}
return store, stopper
}
示例3: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(addr, nodeRPCContext)
if err := rpcServer.Start(); err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS == addr {
gossipBS = rpcServer.Addr()
}
g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)})
g.Start(rpcServer, stopper)
}
ctx.Gossip = g
sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
node := NewNode(ctx)
return rpcServer, ctx.Clock, node, stopper
}
示例4: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = ts.stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, ts.Gossip())
tds := kv.NewTxnCoordSender(ds, ts.Clock(), ts.Ctx.Linearizable, tracing.NewTracer(),
ts.stopper, kv.NewTxnMetrics(metric.NewRegistry()))
for _, sk := range tc.splitKeys {
if err := ts.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(tds, nil, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
reply, err := client.SendWrapped(tds, nil, scan)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
}
}
示例5: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)
testCases := []struct {
splitKeys []proto.Key
keys []proto.Key
}{
{[]proto.Key{proto.Key("m")},
[]proto.Key{proto.Key("a"), proto.Key("z")}},
{[]proto.Key{proto.Key("h"), proto.Key("q")},
[]proto.Key{proto.Key("b"), proto.Key("f"), proto.Key("k"),
proto.Key("r"), proto.Key("w"), proto.Key("y")}},
}
for i, tc := range testCases {
s := StartTestServer(t)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, s.stopper)
for _, sk := range tc.splitKeys {
if err := s.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
var call proto.Call
for _, k := range tc.keys {
call = proto.PutCall(k, proto.Value{Bytes: k})
call.Args.Header().User = storage.UserRoot
tds.Send(context.Background(), call)
if err := call.Reply.Header().GoError(); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := proto.ScanCall(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
scan.Args.Header().Timestamp = call.Reply.Header().Timestamp
scan.Args.Header().User = storage.UserRoot
tds.Send(context.Background(), scan)
if err := scan.Reply.Header().GoError(); err != nil {
t.Fatal(err)
}
rows := scan.Reply.(*proto.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Fatalf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Fatalf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
defer s.Stop()
}
}
示例6: Start
func (m *multiTestContext) Start(t *testing.T, numStores int) {
m.t = t
if m.manualClock == nil {
m.manualClock = hlc.NewManualClock(0)
}
if m.clock == nil {
m.clock = hlc.NewClock(m.manualClock.UnixNano)
}
if m.gossip == nil {
rpcContext := rpc.NewContext(&base.Context{}, m.clock, nil)
m.gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
}
if m.clientStopper == nil {
m.clientStopper = stop.NewStopper()
}
if m.transport == nil {
m.transport = multiraft.NewLocalRPCTransport(m.clientStopper)
}
if m.storePool == nil {
if m.timeUntilStoreDead == 0 {
m.timeUntilStoreDead = storage.TestTimeUntilStoreDeadOff
}
m.storePool = storage.NewStorePool(m.gossip, m.timeUntilStoreDead, m.clientStopper)
}
// Always create the first sender.
m.senders = append(m.senders, kv.NewLocalSender())
rpcSend := func(_ rpc.Options, _ string, _ []net.Addr,
getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
_ *rpc.Context) ([]gogoproto.Message, error) {
call := proto.Call{
Args: getArgs(nil /* net.Addr */).(proto.Request),
Reply: getReply().(proto.Response),
}
m.senders[0].Send(context.Background(), call)
return []gogoproto.Message{call.Reply}, call.Reply.Header().GoError()
}
if m.db == nil {
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: m.clock,
RangeDescriptorDB: m.senders[0],
RPCSend: rpcSend,
}, m.gossip)
sender := kv.NewTxnCoordSender(distSender, m.clock, false, nil, m.clientStopper)
m.db = client.NewDB(sender)
}
for i := 0; i < numStores; i++ {
m.addStore()
}
if m.transportStopper == nil {
m.transportStopper = stop.NewStopper()
}
m.transportStopper.AddCloser(m.transport)
}
示例7: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
ctx.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCtx := makeTestContext()
g := gossip.New(
context.Background(),
nodeRPCContext,
grpcServer,
serverCtx.GossipBootstrapResolvers,
stopper,
metric.NewRegistry())
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(ln.Addr())
}
ctx.Gossip = g
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(&kv.DistSenderConfig{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
ctx.Ctx = tracing.WithTracer(context.Background(), tracing.NewTracer())
sender := kv.NewTxnCoordSender(ctx.Ctx, distSender, ctx.Clock, false, stopper,
kv.MakeTxnMetrics())
ctx.DB = client.NewDB(sender)
ctx.Transport = storage.NewDummyRaftTransport()
node := NewNode(ctx, status.NewMetricsRecorder(ctx.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例8: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s := StartTestServer(t)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip())
tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, nil, s.stopper)
for _, sk := range tc.splitKeys {
if err := s.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.Value{Bytes: k})
if _, err := client.SendWrapped(tds, nil, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
reply, err := client.SendWrapped(tds, nil, scan)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
defer s.Stop()
}
}
示例9: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(nodeRPCContext)
grpcServer := grpc.NewServer()
tlsConfig, err := nodeRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), addr, tlsConfig)
if err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers, stopper)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(grpcServer, ln.Addr())
}
ctx.Gossip = g
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
tracer := tracing.NewTracer()
sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = storage.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
ctx.Tracer = tracer
node := NewNode(ctx, metric.NewRegistry(), stopper, nil)
return rpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例10: TestMultiRangeScanInconsistent
// TestMultiRangeScanInconsistent verifies that a scan across ranges
// that doesn't require read consistency will set a timestamp using
// the clock local to the distributed sender.
func TestMultiRangeScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)
s, db := setupMultipleRanges(t, "b")
defer s.Stop()
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := []string{"a", "b"}
ts := []time.Time{}
b := &client.Batch{}
for _, key := range keys {
b.Put(key, "value")
}
if err := db.Run(b); err != nil {
t.Fatal(err)
}
for i := range keys {
ts = append(ts, b.Results[i].Rows[0].Timestamp)
log.Infof("%d: %s", i, b.Results[i].Rows[0].Timestamp)
}
// Do an inconsistent scan from a new dist sender and verify it does
// the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())
call := proto.ScanCall(proto.Key("a"), proto.Key("c"), 0)
sr := call.Reply.(*proto.ScanResponse)
sa := call.Args.(*proto.ScanRequest)
sa.ReadConsistency = proto.INCONSISTENT
sa.User = storage.UserRoot
ds.Send(context.Background(), call)
if err := sr.GoError(); err != nil {
t.Fatal(err)
}
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
示例11: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(nodeRPCContext)
tlsConfig, err := nodeRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
ln, err := util.ListenAndServe(stopper, rpcServer, addr, tlsConfig)
if err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS == addr {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(rpcServer, ln.Addr(), stopper)
}
ctx.Gossip = g
sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock, RPCContext: nodeRPCContext}, g)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = storage.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
node := NewNode(ctx, metric.NewRegistry(), stopper)
return rpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例12: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Error("ctx must not be null")
}
addr := ctx.Addr
_, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
rpcContext := rpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = rpc.NewServer(util.MakeUnresolvedAddr("tcp", addr), rpcContext)
s.stopper.AddCloser(s.rpc)
s.gossip = gossip.New(rpcContext, s.ctx.GossipInterval, s.ctx.GossipBootstrapResolvers)
feed := util.NewFeed(stopper)
tracer := tracer.NewTracer(feed, addr)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock}, s.gossip)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper)
if s.db, err = client.Open("//[email protected]", client.SenderOpt(sender)); err != nil {
return nil, err
}
s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, rpcContext)
if err != nil {
return nil, err
}
s.stopper.AddCloser(s.raftTransport)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender)
if s.ctx.ExperimentalRPCServer {
if err = s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
}
s.sqlServer = sql.NewServer(&s.ctx.Context, s.db)
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: tracer,
}
s.node = NewNode(nCtx)
s.admin = newAdminServer(s.db, s.stopper)
s.status = newStatusServer(s.db, s.gossip, ctx)
s.tsDB = ts.NewDB(s.db)
s.tsServer = ts.NewServer(s.tsDB)
return s, nil
}
示例13: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Errorf("ctx must not be null")
}
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = crpc.NewServer(s.rpcContext)
s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers)
s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)
feed := util.NewFeed(stopper)
tracer := tracer.NewTracer(feed, ctx.Addr)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock, RPCContext: s.rpcContext}, s.gossip)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper)
s.db = client.NewDB(sender)
var err error
s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, s.rpcContext)
if err != nil {
return nil, err
}
s.stopper.AddCloser(s.raftTransport)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender)
if err := s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
leaseMgr := sql.NewLeaseManager(0, *s.db, s.clock)
leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
s.sqlServer = sql.MakeServer(&s.ctx.Context, *s.db, s.gossip, leaseMgr)
if err := s.sqlServer.RegisterRPC(s.rpc); err != nil {
return nil, err
}
s.pgServer = pgwire.NewServer(&pgwire.Context{
Context: &s.ctx.Context,
Executor: s.sqlServer.Executor,
Stopper: stopper,
})
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: tracer,
StorePool: s.storePool,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
Mode: s.ctx.BalanceMode,
},
}
s.node = NewNode(nCtx)
s.admin = newAdminServer(s.db, s.stopper)
s.status = newStatusServer(s.db, s.gossip, ctx)
s.tsDB = ts.NewDB(s.db)
s.tsServer = ts.NewServer(s.tsDB)
return s, nil
}
示例14: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx Context, stopper *stop.Stopper) (*Server, error) {
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, errors.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning(context.TODO(), "running in insecure mode, this is strongly discouraged. See --insecure.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
Tracer: tracing.NewTracer(),
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = rpc.NewContext(ctx.Context, s.clock, s.stopper)
s.rpcContext.HeartbeatCB = func() {
if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil {
log.Fatal(context.TODO(), err)
}
}
s.grpc = rpc.NewServer(s.rpcContext)
s.registry = metric.NewRegistry()
s.gossip = gossip.New(s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry)
s.storePool = storage.NewStorePool(
s.gossip,
s.clock,
s.rpcContext,
ctx.ReservationsEnabled,
ctx.TimeUntilStoreDead,
s.stopper,
)
// A custom RetryOptions is created which uses stopper.ShouldQuiesce() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
s.distSender = kv.NewDistSender(&kv.DistSenderContext{
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}, s.gossip)
txnMetrics := kv.NewTxnMetrics(s.registry)
sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer,
s.stopper, txnMetrics)
s.db = client.NewDB(sender)
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(s.ctx.Context, sender, s.stopper)
roachpb.RegisterExternalServer(s.grpc, s.kvDB)
// Set up Lease Manager
var lmKnobs sql.LeaseManagerTestingKnobs
if ctx.TestingKnobs.SQLLeaseManager != nil {
lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs)
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
// Set up the DistSQL server
distSQLCtx := distsql.ServerContext{
Context: context.Background(),
DB: s.db,
RPCContext: s.rpcContext,
}
s.distSQLServer = distsql.NewServer(distSQLCtx)
distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer)
// Set up Executor
eCtx := sql.ExecutorContext{
Context: context.Background(),
DB: s.db,
Gossip: s.gossip,
LeaseManager: s.leaseMgr,
Clock: s.clock,
DistSQLSrv: s.distSQLServer,
}
if ctx.TestingKnobs.SQLExecutor != nil {
//.........這裏部分代碼省略.........
示例15: TestMultiRangeScanDeleteRange
// TestMultiRangeScanDeleteRange tests that commands that commands which access
// multiple ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
s := startTestServer(t)
defer s.Stop()
tds := kv.NewTxnCoordSender(kv.NewDistSender(s.Gossip()), s.Clock(), testContext.Linearizable)
defer tds.Close()
if err := s.node.db.Call(proto.AdminSplit,
&proto.AdminSplitRequest{
RequestHeader: proto.RequestHeader{
Key: proto.Key("m"),
},
SplitKey: proto.Key("m"),
}, &proto.AdminSplitResponse{}); err != nil {
t.Fatal(err)
}
writes := []proto.Key{proto.Key("a"), proto.Key("z")}
get := &client.Call{
Method: proto.Get,
Args: proto.GetArgs(writes[0]),
Reply: &proto.GetResponse{},
}
get.Args.Header().User = storage.UserRoot
get.Args.Header().EndKey = writes[len(writes)-1]
tds.Send(get)
if err := get.Reply.Header().GoError(); err == nil {
t.Errorf("able to call Get with a key range: %v", get)
}
var call *client.Call
for i, k := range writes {
call = &client.Call{
Method: proto.Put,
Args: proto.PutArgs(k, k),
Reply: &proto.PutResponse{},
}
call.Args.Header().User = storage.UserRoot
tds.Send(call)
if err := call.Reply.Header().GoError(); err != nil {
t.Fatal(err)
}
scan := &client.Call{
Method: proto.Scan,
Args: proto.ScanArgs(writes[0], writes[len(writes)-1].Next(), 0),
Reply: &proto.ScanResponse{},
}
// The Put ts may have been pushed by tsCache,
// so make sure we see their values in our Scan.
scan.Args.Header().Timestamp = call.Reply.Header().Timestamp
scan.Args.Header().User = storage.UserRoot
tds.Send(scan)
if err := scan.Reply.Header().GoError(); err != nil {
t.Fatal(err)
}
if scan.Reply.Header().Txn == nil {
t.Errorf("expected Scan to be wrapped in a Transaction")
}
if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) != i+1 {
t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
}
}
del := &client.Call{
Method: proto.DeleteRange,
Args: &proto.DeleteRangeRequest{
RequestHeader: proto.RequestHeader{
User: storage.UserRoot,
Key: writes[0],
EndKey: proto.Key(writes[len(writes)-1]).Next(),
Timestamp: call.Reply.Header().Timestamp,
},
},
Reply: &proto.DeleteRangeResponse{},
}
tds.Send(del)
if err := del.Reply.Header().GoError(); err != nil {
t.Fatal(err)
}
if del.Reply.Header().Txn == nil {
t.Errorf("expected DeleteRange to be wrapped in a Transaction")
}
if n := del.Reply.(*proto.DeleteRangeResponse).NumDeleted; n != int64(len(writes)) {
t.Errorf("expected %d keys to be deleted, but got %d instead",
len(writes), n)
}
scan := &client.Call{
Method: proto.Scan,
Args: proto.ScanArgs(writes[0], writes[len(writes)-1].Next(), 0),
Reply: &proto.ScanResponse{},
}
scan.Args.Header().Timestamp = del.Reply.Header().Timestamp
scan.Args.Header().User = storage.UserRoot
scan.Args.Header().Txn = &proto.Transaction{Name: "MyTxn"}
tds.Send(scan)
if err := scan.Reply.Header().GoError(); err != nil {
t.Fatal(err)
}
if txn := scan.Reply.Header().Txn; txn == nil || txn.Name != "MyTxn" {
t.Errorf("wanted Txn to persist, but it changed to %v", txn)
}
//.........這裏部分代碼省略.........