本文整理匯總了Golang中github.com/cockroachdb/cockroach/kv.GetDefaultDistSenderRetryOptions函數的典型用法代碼示例。如果您正苦於以下問題:Golang GetDefaultDistSenderRetryOptions函數的具體用法?Golang GetDefaultDistSenderRetryOptions怎麽用?Golang GetDefaultDistSenderRetryOptions使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了GetDefaultDistSenderRetryOptions函數的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s := StartTestServer(t)
defer s.Stop()
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = s.stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, s.Gossip())
tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, tracing.NewTracer(),
s.stopper, kv.NewTxnMetrics(metric.NewRegistry()))
for _, sk := range tc.splitKeys {
if err := s.node.ctx.DB.AdminSplit(sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(tds, nil, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next(),
int64(maxResults))
reply, err := client.SendWrapped(tds, nil, scan)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
}
}
示例2: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(nodeRPCContext)
grpcServer := grpc.NewServer()
tlsConfig, err := nodeRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), addr, tlsConfig)
if err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers, stopper)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(grpcServer, ln.Addr())
}
ctx.Gossip = g
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
tracer := tracing.NewTracer()
sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper)
ctx.DB = client.NewDB(sender)
// TODO(bdarnell): arrange to have the transport closed.
// (or attach LocalRPCTransport.Close to the stopper)
ctx.Transport = storage.NewLocalRPCTransport(stopper)
ctx.EventFeed = util.NewFeed(stopper)
ctx.Tracer = tracer
node := NewNode(ctx, metric.NewRegistry(), stopper, nil)
return rpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例3: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
ctx := storage.StoreContext{}
stopper := stop.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
ctx.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
ln, err := util.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
serverCtx := NewTestContext()
g := gossip.New(nodeRPCContext, serverCtx.GossipBootstrapResolvers, stopper)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(grpcServer, ln.Addr())
}
ctx.Gossip = g
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: ctx.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
tracer := tracing.NewTracer()
sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper,
kv.NewTxnMetrics(metric.NewRegistry()))
ctx.DB = client.NewDB(sender)
ctx.Transport = storage.NewDummyRaftTransport()
ctx.Tracer = tracer
node := NewNode(ctx, status.NewMetricsRecorder(ctx.Clock), stopper,
kv.NewTxnMetrics(metric.NewRegistry()), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), ctx.Clock, node, stopper
}
示例4: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Errorf("ctx must not be null")
}
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
Tracer: tracing.NewTracer(),
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = crpc.NewServer(s.rpcContext)
s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers, stopper)
s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)
feed := util.NewFeed(stopper)
// A custom RetryOptions is created which uses stopper.ShouldDrain() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}, s.gossip)
txnRegistry := metric.NewRegistry()
txnMetrics := kv.NewTxnMetrics(txnRegistry)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics)
s.db = client.NewDB(sender)
s.grpc = grpc.NewServer()
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender, stopper)
if err := s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
sqlRegistry := metric.NewRegistry()
s.sqlExecutor = sql.NewExecutor(*s.db, s.gossip, s.leaseMgr, s.stopper, sqlRegistry)
s.pgServer = pgwire.MakeServer(&s.ctx.Context, s.sqlExecutor, sqlRegistry)
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: s.Tracer,
StorePool: s.storePool,
SQLExecutor: sql.InternalExecutor{
LeaseManager: s.leaseMgr,
},
LogRangeEvents: true,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
Mode: s.ctx.BalanceMode,
},
}
//.........這裏部分代碼省略.........
示例5: TestMultiRangeScanDeleteRange
// TestMultiRangeScanDeleteRange tests that commands which access multiple
// ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
defer leaktest.AfterTest(t)()
s := StartTestServer(t)
defer s.Stop()
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = s.stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, s.Gossip())
tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, tracing.NewTracer(),
s.stopper, kv.NewTxnMetrics(metric.NewRegistry()))
if err := s.node.ctx.DB.AdminSplit("m"); err != nil {
t.Fatal(err)
}
writes := []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}
get := &roachpb.GetRequest{
Span: roachpb.Span{Key: writes[0]},
}
get.EndKey = writes[len(writes)-1]
if _, err := client.SendWrapped(tds, nil, get); err == nil {
t.Errorf("able to call Get with a key range: %v", get)
}
var delTS roachpb.Timestamp
for i, k := range writes {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
reply, err := client.SendWrapped(tds, nil, put)
if err != nil {
t.Fatal(err)
}
scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*roachpb.ScanRequest)
// The Put ts may have been pushed by tsCache,
// so make sure we see their values in our Scan.
delTS = reply.(*roachpb.PutResponse).Timestamp
reply, err = client.SendWrappedWith(tds, nil, roachpb.Header{Timestamp: delTS}, scan)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if sr.Txn != nil {
// This was the other way around at some point in the past.
// Same below for Delete, etc.
t.Errorf("expected no transaction in response header")
}
if rows := sr.Rows; len(rows) != i+1 {
t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
}
}
del := &roachpb.DeleteRangeRequest{
Span: roachpb.Span{
Key: writes[0],
EndKey: roachpb.Key(writes[len(writes)-1]).Next(),
},
ReturnKeys: true,
}
reply, err := client.SendWrappedWith(tds, nil, roachpb.Header{Timestamp: delTS}, del)
if err != nil {
t.Fatal(err)
}
dr := reply.(*roachpb.DeleteRangeResponse)
if dr.Txn != nil {
t.Errorf("expected no transaction in response header")
}
if !reflect.DeepEqual(dr.Keys, writes) {
t.Errorf("expected %d keys to be deleted, but got %d instead", writes, dr.Keys)
}
scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next(), 0).(*roachpb.ScanRequest)
txn := &roachpb.Transaction{Name: "MyTxn"}
reply, err = client.SendWrappedWith(tds, nil, roachpb.Header{Txn: txn}, scan)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" {
t.Errorf("wanted Txn to persist, but it changed to %v", txn)
}
if rows := sr.Rows; len(rows) > 0 {
t.Fatalf("scan after delete returned rows: %v", rows)
}
}