本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.Call.Method方法的典型用法代碼示例。如果您正苦於以下問題:Golang Call.Method方法的具體用法?Golang Call.Method怎麽用?Golang Call.Method使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/proto.Call
的用法示例。
在下文中一共展示了Call.Method方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Send
// Send implements the client.Sender interface.
func (rls *retryableLocalSender) Send(_ context.Context, call proto.Call) {
// Instant retry to handle the case of a range split, which is
// exposed here as a RangeKeyMismatchError.
retryOpts := retry.Options{
Tag: fmt.Sprintf("routing %s locally", call.Method()),
}
// In local tests, the RPCs are not actually sent over the wire. We
// need to clone the Txn in order to avoid unexpected sharing
// between TxnCoordSender and client.Txn.
if header := call.Args.Header(); header.Txn != nil {
header.Txn = gogoproto.Clone(header.Txn).(*proto.Transaction)
}
err := retry.WithBackoff(retryOpts, func() (retry.Status, error) {
call.Reply.Header().Error = nil
rls.LocalSender.Send(context.TODO(), call)
// Check for range key mismatch error (this could happen if
// range was split between lookup and execution). In this case,
// reset header.Replica and engage retry loop.
if err := call.Reply.Header().GoError(); err != nil {
if _, ok := err.(*proto.RangeKeyMismatchError); ok {
// Clear request replica.
call.Args.Header().Replica = proto.Replica{}
return retry.Continue, err
}
}
return retry.Break, nil
})
if err != nil {
panic(fmt.Sprintf("local sender did not succeed: %s", err))
}
}
示例2: Send
// Send implements the client.Sender interface. If the call is part
// of a transaction, the coordinator will initialize the transaction
// if it's not nil but has an empty ID.
func (tc *TxnCoordSender) Send(ctx context.Context, call proto.Call) {
header := call.Args.Header()
tc.maybeBeginTxn(header)
header.CmdID = header.GetOrCreateCmdID(tc.clock.PhysicalNow())
// This is the earliest point at which the request has a ClientCmdID and/or
// TxnID (if applicable). Begin a Trace which follows this request.
trace := tc.tracer.NewTrace(call.Args.Header())
defer trace.Finalize()
defer trace.Epoch(fmt.Sprintf("sending %s", call.Method()))()
defer func() {
if err := call.Reply.Header().GoError(); err != nil {
trace.Event(fmt.Sprintf("reply error: %T", err))
}
}()
ctx = tracer.ToCtx(ctx, trace)
// Process batch specially; otherwise, send via wrapped sender.
switch args := call.Args.(type) {
case *proto.BatchRequest:
trace.Event("batch processing")
tc.sendBatch(ctx, args, call.Reply.(*proto.BatchResponse))
default:
// TODO(tschottdorf): should treat all calls as Batch. After all, that
// will be almost all calls.
tc.sendOne(ctx, call)
}
}
示例3: sendOne
func (db *testSender) sendOne(call proto.Call) {
switch call.Args.(type) {
case *proto.EndTransactionRequest:
safeSetGoError(call.Reply, util.Errorf("%s method not supported", call.Method()))
return
}
// Lookup range and direct request.
header := call.Args.Header()
if rng := db.store.LookupRange(header.Key, header.EndKey); rng != nil {
header.RangeID = rng.Desc().RangeID
replica := rng.GetReplica()
if replica == nil {
safeSetGoError(call.Reply, util.Errorf("own replica missing in range"))
}
header.Replica = *replica
reply, err := db.store.ExecuteCmd(context.Background(), call.Args)
if reply != nil {
gogoproto.Merge(call.Reply, reply)
}
if call.Reply.Header().Error != nil {
panic(proto.ErrorUnexpectedlySet)
}
if err != nil {
call.Reply.Header().SetGoError(err)
}
} else {
safeSetGoError(call.Reply, proto.NewRangeKeyMismatchError(header.Key, header.EndKey, nil))
}
}
示例4: post
// post posts the call using the HTTP client. The call's method is
// appended to KVDBEndpoint and set as the URL path. The call's arguments
// are protobuf-serialized and written as the POST body. The content
// type is set to application/x-protobuf.
//
// On success, the response body is unmarshalled into call.Reply.
func (s *httpSender) post(call proto.Call) error {
retryOpts := s.retryOpts
retryOpts.Tag = fmt.Sprintf("%s %s", s.context.RequestScheme(), call.Method())
// Marshal the args into a request body.
body, err := gogoproto.Marshal(call.Args)
if err != nil {
return err
}
url := s.context.RequestScheme() + "://" + s.server + KVDBEndpoint + call.Method().String()
return retry.WithBackoff(retryOpts, func() (retry.Status, error) {
req, err := http.NewRequest("POST", url, bytes.NewReader(body))
if err != nil {
return retry.Break, err
}
req.Header.Add(util.ContentTypeHeader, util.ProtoContentType)
req.Header.Add(util.AcceptHeader, util.ProtoContentType)
req.Header.Add(util.AcceptEncodingHeader, util.SnappyEncoding)
resp, err := s.client.Do(req)
if err != nil {
return retry.Continue, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
// We're cool.
case http.StatusServiceUnavailable, http.StatusGatewayTimeout, StatusTooManyRequests:
// Retry on service unavailable and request timeout.
// TODO(spencer): consider respecting the Retry-After header for
// backoff / retry duration.
return retry.Continue, errors.New(resp.Status)
default:
// Can't recover from all other errors.
return retry.Break, errors.New(resp.Status)
}
if resp.Header.Get(util.ContentEncodingHeader) == util.SnappyEncoding {
resp.Body = &snappyReader{body: resp.Body}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return retry.Continue, err
}
if err := gogoproto.Unmarshal(b, call.Reply); err != nil {
return retry.Continue, err
}
return retry.Break, nil
})
}
示例5: Send
// Send implements the client.Sender interface. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each store's LookupRange method.
func (ls *LocalSender) Send(ctx context.Context, call proto.Call) {
var err error
var store *storage.Store
trace := tracer.FromCtx(ctx)
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
header := call.Args.Header()
if header.RaftID == 0 || header.Replica.StoreID == 0 {
var repl *proto.Replica
var raftID proto.RaftID
raftID, repl, err = ls.lookupReplica(header.Key, header.EndKey)
if err == nil {
header.RaftID = raftID
header.Replica = *repl
}
}
ctx = log.Add(ctx,
log.Method, call.Method(),
log.Key, header.Key,
log.RaftID, header.RaftID)
if err == nil {
store, err = ls.GetStore(header.Replica.StoreID)
}
var reply proto.Response
if err == nil {
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See proto.Transaction.CertainNodes for details.
if header.Txn != nil && header.Txn.CertainNodes.Contains(header.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
header.Txn.MaxTimestamp = header.Txn.Timestamp
}
reply, err = store.ExecuteCmd(ctx, call.Args)
}
if reply != nil {
gogoproto.Merge(call.Reply, reply)
}
if call.Reply.Header().Error != nil {
panic(proto.ErrorUnexpectedlySet)
}
if err != nil {
call.Reply.Header().SetGoError(err)
}
}
示例6: Send
// Send implements the client.Sender interface. If the call is part
// of a transaction, the coordinator will initialize the transaction
// if it's not nil but has an empty ID.
func (tc *TxnCoordSender) Send(ctx context.Context, call proto.Call) {
header := call.Args.Header()
tc.maybeBeginTxn(header)
header.CmdID = header.GetOrCreateCmdID(tc.clock.PhysicalNow())
// This is the earliest point at which the request has a ClientCmdID and/or
// TxnID (if applicable). Begin a Trace which follows this request.
trace := tc.tracer.NewTrace(call.Args.Header())
defer trace.Finalize()
defer trace.Epoch(fmt.Sprintf("sending %s", call.Method()))()
defer func() {
if err := call.Reply.Header().GoError(); err != nil {
trace.Event(fmt.Sprintf("reply error: %T", err))
}
}()
ctx = tracer.ToCtx(ctx, trace)
// Process batch specially; otherwise, send via wrapped sender.
switch args := call.Args.(type) {
case *proto.InternalBatchRequest:
trace.Event("batch processing")
tc.sendBatch(ctx, args, call.Reply.(*proto.InternalBatchResponse))
case *proto.BatchRequest:
// Convert the batch request to internal-batch request.
internalArgs := &proto.InternalBatchRequest{RequestHeader: args.RequestHeader}
internalReply := &proto.InternalBatchResponse{}
for i := range args.Requests {
internalArgs.Add(args.Requests[i].GetValue().(proto.Request))
}
tc.sendBatch(ctx, internalArgs, internalReply)
reply := call.Reply.(*proto.BatchResponse)
reply.ResponseHeader = internalReply.ResponseHeader
// Convert from internal-batch response to batch response.
for i := range internalReply.Responses {
reply.Add(internalReply.Responses[i].GetValue().(proto.Response))
}
default:
tc.sendOne(ctx, call)
}
}
示例7: Send
//.........這裏部分代碼省略.........
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
return ds.sendRPC(desc.RaftID, replicas, order, args, curReply)
}()
if err != nil {
// For an RPC error to occur, we must've been unable to contact any
// replicas. In this case, likely all nodes are down (or not getting back
// to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date replicas, so
// clearing the descriptor here should be a good idea.
// TODO(tschottdorf): If a replica group goes dead, this will cause clients
// to put high read pressure on the first range, so there should be some
// rate limiting here.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
} else {
err = curReply.Header().GoError()
}
if err != nil {
if log.V(1) {
log.Warningf("failed to invoke %s: %s", call.Method(), err)
}
// If retryable, allow retry. For range not found or range
// key mismatch errors, we don't backoff on the retry,
// but reset the backoff loop so we can retry immediately.
switch tErr := err.(type) {
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
// Range descriptor might be out of date - evict it.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
// On addressing errors, don't backoff; retry immediately.
r.Reset()
if log.V(1) {
log.Warning(err)
}
continue
case *proto.NotLeaderError:
newLeader := tErr.GetLeader()
// Verify that leader is a known replica according to the
// descriptor. If not, we've got a stale replica; evict cache.
// Next, cache the new leader.
if newLeader != nil {
if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 {
if log.V(1) {
log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc)
}
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
}
} else {
newLeader = &proto.Replica{}
}
ds.updateLeaderCache(proto.RaftID(desc.RaftID), *newLeader)
if log.V(1) {
示例8: sendOne
//.........這裏部分代碼省略.........
trace.Event("coordinator spawns")
txnMeta = &txnMetadata{
txn: *txn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[id] = txnMeta
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(id)
}) {
// The system is already draining and we can't start the
// heartbeat. We refuse new transactions for now because
// they're likely not going to have all intents committed.
// In principle, we can relax this as needed though.
call.Reply.Header().SetGoError(&proto.NodeUnavailableError{})
tc.Unlock()
tc.unregisterTxn(id)
return
}
}
txnMeta.addKeyRange(header.Key, header.EndKey)
}
// Update our record of this transaction.
if txnMeta != nil {
txnMeta.txn = *txn
txnMeta.setLastUpdate(tc.clock.PhysicalNow())
}
}
tc.Unlock()
}
// Cleanup intents and transaction map if end of transaction.
switch t := call.Reply.Header().GoError().(type) {
case *proto.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider it dead.
tc.cleanupTxn(trace, t.Txn)
case *proto.TransactionAbortedError:
// If already aborted, cleanup the txn on this TxnCoordSender.
tc.cleanupTxn(trace, t.Txn)
case *proto.OpRequiresTxnError:
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing", call.Method())
}
// TODO(tschottdorf): this part is awkward. Consider resending here
// without starting a new call, which is hard to trace. Plus, the
// below depends on default configuration.
tmpDB, err := client.Open(
fmt.Sprintf("//%s?priority=%d",
call.Args.Header().User, call.Args.Header().GetUserPriority()),
client.SenderOpt(tc))
if err != nil {
log.Warning(err)
return
}
call.Reply.Reset()
if err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
b.InternalAddCall(call)
return txn.CommitInBatch(b)
}); err != nil {
log.Warning(err)
}
case nil:
if txn := call.Reply.Header().Txn; txn != nil {
if _, ok := call.Args.(*proto.EndTransactionRequest); ok {
// If the --linearizable flag is set, we want to make sure that
// all the clocks in the system are past the commit timestamp
// of the transaction. This is guaranteed if either
// - the commit timestamp is MaxOffset behind startNS
// - MaxOffset ns were spent in this function
// when returning to the client. Below we choose the option
// that involves less waiting, which is likely the first one
// unless a transaction commits with an odd timestamp.
if tsNS := txn.Timestamp.WallTime; startNS > tsNS {
startNS = tsNS
}
sleepNS := tc.clock.MaxOffset() -
time.Duration(tc.clock.PhysicalNow()-startNS)
if tc.linearizable && sleepNS > 0 {
defer func() {
if log.V(1) {
log.Infof("%v: waiting %s on EndTransaction for linearizability", txn.Short(), util.TruncateDuration(sleepNS, time.Millisecond))
}
time.Sleep(sleepNS)
}()
}
if txn.Status != proto.PENDING {
tc.cleanupTxn(trace, *txn)
}
}
}
}
}
示例9: Send
// Send implements the client.Sender interface. It verifies
// permissions and looks up the appropriate range based on the
// supplied key and sends the RPC according to the specified options.
//
// If the request spans multiple ranges (which is possible for Scan or
// DeleteRange requests), Send sends requests to the individual ranges
// sequentially and combines the results transparently.
//
// This may temporarily adjust the request headers, so the proto.Call
// must not be used concurrently until Send has returned.
func (ds *DistSender) Send(_ context.Context, call proto.Call) {
args := call.Args
finalReply := call.Reply
endKey := args.Header().EndKey
// Verify permissions.
if err := ds.verifyPermissions(call.Args); err != nil {
call.Reply.Header().SetGoError(err)
return
}
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if args.Header().ReadConsistency == proto.INCONSISTENT && args.Header().Timestamp.Equal(proto.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp proto.Timestamp) {
args.Header().Timestamp = timestamp
}(args.Header().Timestamp)
args.Header().Timestamp = ds.clock.Now()
}
// If this is a bounded request, we will change its bound as we receive
// replies. This undoes that when we return.
boundedArgs, _ := args.(proto.Bounded)
if boundedArgs != nil {
defer func(n int64) {
boundedArgs.SetBound(n)
}(boundedArgs.GetBound())
}
// Retry logic for lookup of range by key and RPCs to range replicas.
retryOpts := ds.rpcRetryOptions
retryOpts.Tag = "routing " + call.Method().String() + " rpc"
curReply := finalReply
for {
call.Reply = curReply
curReply.Header().Reset()
var desc, descNext *proto.RangeDescriptor
err := retry.WithBackoff(retryOpts, func() (retry.Status, error) {
var err error
// Get range descriptor (or, when spanning range, descriptors).
// sendAttempt below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
desc, descNext, err = ds.getDescriptors(call)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(util.Retryable); ok && rErr.CanRetry() {
return retry.Continue, err
}
return retry.Break, err
}
// Truncate the request to our current range, making sure not to
// touch it unless we have to (it is illegal to send EndKey on
// commands which do not operate on ranges).
if descNext != nil {
args.Header().EndKey = desc.EndKey
defer func() {
// "Untruncate" EndKey to original.
args.Header().EndKey = endKey
}()
}
return ds.sendAttempt(desc, call)
})
// Immediately return if querying a range failed non-retryably.
// For multi-range requests, we return the failing range's reply.
if err != nil {
call.Reply.Header().SetGoError(err)
return
}
if finalReply != curReply {
// This was the second or later call in a multi-range request.
// Combine the new response with the existing one.
if cFinalReply, ok := finalReply.(proto.Combinable); ok {
cFinalReply.Combine(curReply)
} else {
// This should never apply in practice, as we'll only end up here
// for range-spanning requests.
call.Reply.Header().SetGoError(util.Errorf("multi-range request with non-combinable response type"))
return
}
}
// If this request has a bound, such as MaxResults in
// ScanRequest, check whether enough rows have been retrieved.
//.........這裏部分代碼省略.........
示例10: sendAttempt
// sendAttempt is invoked by Send and handles retry logic and cache eviction
// for a call sent to a single range. It returns a retry status, which is Break
// on success and either Break, Continue or Reset depending on error condition.
// This method is expected to be invoked from within a backoff / retry loop to
// retry the send repeatedly (e.g. to continue processing after a critical node
// becomes available after downtime or the range descriptor is refreshed via
// lookup).
func (ds *DistSender) sendAttempt(desc *proto.RangeDescriptor, call proto.Call) (retry.Status, error) {
leader := ds.leaderCache.Lookup(proto.RaftID(desc.RaftID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
args := call.Args
reply := call.Reply
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsRead(args) && args.Header().ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
err := ds.sendRPC(desc.RaftID, replicas, order, args, reply)
if err != nil {
// For an RPC error to occur, we must've been unable to contact any
// replicas. In this case, likely all nodes are down (or not getting back
// to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date replicas, so
// clearing the descriptor here should be a good idea.
// TODO(tschottdorf): If a replica group goes dead, this will cause clients
// to put high read pressure on the first range, so there should be some
// rate limiting here.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
} else {
err = reply.Header().GoError()
}
if err != nil {
if log.V(1) {
log.Warningf("failed to invoke %s: %s", call.Method(), err)
}
// If retryable, allow retry. For range not found or range
// key mismatch errors, we don't backoff on the retry,
// but reset the backoff loop so we can retry immediately.
switch tErr := err.(type) {
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
// Range descriptor might be out of date - evict it.
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
// On addressing errors, don't backoff; retry immediately.
return retry.Reset, err
case *proto.NotLeaderError:
newLeader := tErr.GetLeader()
// Verify that leader is a known replica according to the
// descriptor. If not, we've got a stale replica; evict cache.
// Next, cache the new leader.
if newLeader != nil {
if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 {
if log.V(1) {
log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc)
}
ds.rangeCache.EvictCachedRangeDescriptor(args.Header().Key, desc)
}
} else {
newLeader = &proto.Replica{}
}
ds.updateLeaderCache(proto.RaftID(desc.RaftID), *newLeader)
return retry.Reset, err
case util.Retryable:
if tErr.CanRetry() {
return retry.Continue, err
}
}
return retry.Break, err
}
return retry.Break, nil
}
示例11: sendOne
// sendOne sends a single call via the wrapped sender. If the call is
// part of a transaction, the TxnCoordSender adds the transaction to a
// map of active transactions and begins heartbeating it. Every
// subsequent call for the same transaction updates the lastUpdate
// timestamp to prevent live transactions from being considered
// abandoned and garbage collected. Read/write mutating requests have
// their key or key range added to the transaction's interval tree of
// key ranges for eventual cleanup via resolved write intents.
//
// On success, and if the call is part of a transaction, the affected
// key range is recorded as live intents for eventual cleanup upon
// transaction commit. Upon successful txn commit, initiates cleanup
// of intents.
func (tc *TxnCoordSender) sendOne(call proto.Call) {
var startNS int64
header := call.Args.Header()
// If this call is part of a transaction...
if header.Txn != nil {
// Set the timestamp to the original timestamp for read-only
// commands and to the transaction timestamp for read/write
// commands.
if proto.IsReadOnly(call.Args) {
header.Timestamp = header.Txn.OrigTimestamp
} else {
header.Timestamp = header.Txn.Timestamp
}
// EndTransaction must have its key set to that of the txn.
if _, ok := call.Args.(*proto.EndTransactionRequest); ok {
header.Key = header.Txn.Key
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
}
}
// Send the command through wrapped sender.
tc.wrapped.Send(context.TODO(), call)
if header.Txn != nil {
// If not already set, copy the request txn.
if call.Reply.Header().Txn == nil {
call.Reply.Header().Txn = gogoproto.Clone(header.Txn).(*proto.Transaction)
}
tc.updateResponseTxn(header, call.Reply.Header())
}
if txn := call.Reply.Header().Txn; txn != nil {
tc.Lock()
txnMeta := tc.txns[string(txn.ID)]
// If this transactional command leaves transactional intents, add the key
// or key range to the intents map. If the transaction metadata doesn't yet
// exist, create it.
if call.Reply.Header().GoError() == nil {
if proto.IsTransactionWrite(call.Args) {
if txnMeta == nil {
txnMeta = &txnMetadata{
txn: *txn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
id := string(txn.ID)
tc.txns[id] = txnMeta
tc.heartbeat(id)
}
txnMeta.addKeyRange(header.Key, header.EndKey)
}
// Update our record of this transaction.
if txnMeta != nil {
txnMeta.txn = *txn
txnMeta.setLastUpdate(tc.clock.PhysicalNow())
}
}
tc.Unlock()
}
// Cleanup intents and transaction map if end of transaction.
switch t := call.Reply.Header().GoError().(type) {
case *proto.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider it dead.
tc.cleanupTxn(t.Txn, nil)
case *proto.TransactionAbortedError:
// If already aborted, cleanup the txn on this TxnCoordSender.
tc.cleanupTxn(t.Txn, nil)
case *proto.OpRequiresTxnError:
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing", call.Method())
}
tmpDB, err := client.Open(
fmt.Sprintf("//%s?priority=%d",
call.Args.Header().User, call.Args.Header().GetUserPriority()),
client.SenderOpt(tc))
if err != nil {
log.Warning(err)
return
}
//.........這裏部分代碼省略.........
示例12: Send
//.........這裏部分代碼省略.........
}
continue
}
break
}
// At this point reply.Header().Error may be non-nil!
curReply, err = ds.sendAttempt(trace, args, desc)
descKey := args.Header().Key
if isReverseScan {
descKey = args.Header().EndKey
}
if err != nil {
trace.Event(fmt.Sprintf("send error: %T", err))
// For an RPC error to occur, we must've been unable to contact any
// replicas. In this case, likely all nodes are down (or not getting back
// to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date replicas, so
// clearing the descriptor here should be a good idea.
// TODO(tschottdorf): If a replica group goes dead, this will cause clients
// to put high read pressure on the first range, so there should be some
// rate limiting here.
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, isReverseScan)
} else {
err = curReply.Header().GoError()
}
if err == nil {
break
}
if log.V(1) {
log.Warningf("failed to invoke %s: %s", call.Method(), err)
}
// If retryable, allow retry. For range not found or range
// key mismatch errors, we don't backoff on the retry,
// but reset the backoff loop so we can retry immediately.
switch tErr := err.(type) {
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
trace.Event(fmt.Sprintf("reply error: %T", err))
// Range descriptor might be out of date - evict it.
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, isReverseScan)
// On addressing errors, don't backoff; retry immediately.
r.Reset()
if log.V(1) {
log.Warning(err)
}
continue
case *proto.NotLeaderError:
trace.Event(fmt.Sprintf("reply error: %T", err))
newLeader := tErr.GetLeader()
// Verify that leader is a known replica according to the
// descriptor. If not, we've got a stale replica; evict cache.
// Next, cache the new leader.
if newLeader != nil {
if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 {
if log.V(1) {
log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc)
}
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, isReverseScan)
}
} else {
newLeader = &proto.Replica{}
}
示例13: Send
// Send sends call to Cockroach via an HTTP post. HTTP response codes
// which are retryable are retried with backoff in a loop using the
// default retry options.
func (s *httpSender) Send(_ context.Context, call proto.Call) {
if err := HTTPPost(s.ctx, call.Args, call.Reply, call.Method()); err != nil {
call.Reply.Header().SetGoError(err)
}
}