本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/rpc.Context類的典型用法代碼示例。如果您正苦於以下問題:Golang Context類的具體用法?Golang Context怎麽用?Golang Context使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Context類的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: NewSender
// NewSender returns an implementation of Sender which exposes the Key-Value
// database provided by a Cockroach cluster by connecting via RPC to a
// Cockroach node.
//
// This must not be used by server.Server or any of its components, only by
// clients talking to a Cockroach cluster through the external interface.
func NewSender(ctx *rpc.Context, target string) (Sender, error) {
conn, err := ctx.GRPCDial(target)
if err != nil {
return nil, err
}
return sender{roachpb.NewExternalClient(conn)}, nil
}
示例2: grpcTransportFactoryImpl
// grpcTransportFactoryImpl is the default TransportFactory, using GRPC.
// Do not use this directly - use grpcTransportFactory instead.
//
// During race builds, we wrap this to hold on to and read all obtained
// requests in a tight loop, exposing data races; see transport_race.go.
func grpcTransportFactoryImpl(
opts SendOptions, rpcContext *rpc.Context, replicas ReplicaSlice, args roachpb.BatchRequest,
) (Transport, error) {
clients := make([]batchClient, 0, len(replicas))
for _, replica := range replicas {
conn, err := rpcContext.GRPCDial(replica.NodeDesc.Address.String())
if err != nil {
return nil, err
}
argsCopy := args
argsCopy.Replica = replica.ReplicaDescriptor
remoteAddr := replica.NodeDesc.Address.String()
clients = append(clients, batchClient{
remoteAddr: remoteAddr,
conn: conn,
client: roachpb.NewInternalClient(conn),
args: argsCopy,
healthy: rpcContext.IsConnHealthy(remoteAddr),
})
}
// Put known-unhealthy clients last.
splitHealthy(clients)
return &grpcTransport{
opts: opts,
rpcContext: rpcContext,
orderedClients: clients,
}, nil
}
示例3: start
// start dials the remote addr and commences gossip once connected. Upon exit,
// the client is sent on the disconnected channel. This method starts client
// processing in a goroutine and returns immediately.
func (c *client) start(
g *Gossip,
disconnected chan *client,
rpcCtx *rpc.Context,
stopper *stop.Stopper,
nodeID roachpb.NodeID,
breaker *circuit.Breaker,
) {
stopper.RunWorker(func() {
ctx, cancel := context.WithCancel(c.AnnotateCtx(context.Background()))
var wg sync.WaitGroup
defer func() {
// This closes the outgoing stream, causing any attempt to send or
// receive to return an error.
//
// Note: it is still possible for incoming gossip to be processed after
// this point.
cancel()
// The stream is closed, but there may still be some incoming gossip
// being processed. Wait until that is complete to avoid racing the
// client's removal against the discovery of its remote's node ID.
wg.Wait()
disconnected <- c
}()
consecFailures := breaker.ConsecFailures()
var stream Gossip_GossipClient
if err := breaker.Call(func() error {
// Note: avoid using `grpc.WithBlock` here. This code is already
// asynchronous from the caller's perspective, so the only effect of
// `WithBlock` here is blocking shutdown - at the time of this writing,
// that ends ups up making `kv` tests take twice as long.
conn, err := rpcCtx.GRPCDial(c.addr.String())
if err != nil {
return err
}
if stream, err = NewGossipClient(conn).Gossip(ctx); err != nil {
return err
}
return c.requestGossip(g, stream)
}, 0); err != nil {
if consecFailures == 0 {
log.Warningf(ctx, "node %d: failed to start gossip client: %s", nodeID, err)
}
return
}
// Start gossiping.
log.Infof(ctx, "node %d: started gossip client to %s", nodeID, c.addr)
if err := c.gossip(ctx, g, stream, stopper, &wg); err != nil {
if !grpcutil.IsClosedConnection(err) {
g.mu.Lock()
if c.peerID != 0 {
log.Infof(ctx, "node %d: closing client to node %d (%s): %s", nodeID, c.peerID, c.addr, err)
} else {
log.Infof(ctx, "node %d: closing client to %s: %s", nodeID, c.addr, err)
}
g.mu.Unlock()
}
}
})
}