本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/tracer.FromCtx函数的典型用法代码示例。如果您正苦于以下问题:Golang FromCtx函数的具体用法?Golang FromCtx怎么用?Golang FromCtx使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FromCtx函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: AddCmd
// AddCmd adds a command for execution on this range. The command's
// affected keys are verified to be contained within the range and the
// range's leadership is confirmed. The command is then dispatched
// either along the read-only execution path or the read-write Raft
// command queue.
func (r *Range) AddCmd(ctx context.Context, call proto.Call) error {
args := call.Args
// TODO(tschottdorf) Some (internal) requests go here directly, so they
// won't be traced.
trace := tracer.FromCtx(ctx)
// Differentiate between admin, read-only and read-write.
var reply proto.Response
var err error
if proto.IsAdmin(args) {
defer trace.Epoch("admin path")()
reply, err = r.addAdminCmd(ctx, args)
} else if proto.IsReadOnly(args) {
defer trace.Epoch("read-only path")()
reply, err = r.addReadOnlyCmd(ctx, args)
} else if proto.IsWrite(args) {
defer trace.Epoch("read-write path")()
reply, err = r.addWriteCmd(ctx, args, nil)
} else {
panic(fmt.Sprintf("don't know how to handle command %T", args))
}
if reply != nil {
gogoproto.Merge(call.Reply, reply)
}
if err != nil {
replyHeader := call.Reply.Header()
if replyHeader.Error != nil {
panic("the world is on fire")
}
replyHeader.SetGoError(err)
}
return err
}
示例2: getLeaseForGossip
// getLeaseForGossip tries to obtain a leader lease. Only one of the replicas
// should gossip; the bool returned indicates whether it's us.
func (r *Range) getLeaseForGossip(ctx context.Context) (bool, error) {
// If no Gossip available (some tests) or range too fresh, noop.
if r.rm.Gossip() == nil || !r.isInitialized() {
return false, util.Errorf("no gossip or range not initialized")
}
var hasLease bool
var err error
if !r.rm.Stopper().RunTask(func() {
timestamp := r.rm.Clock().Now()
// Check for or obtain the lease, if none active.
err = r.redirectOnOrAcquireLeaderLease(tracer.FromCtx(ctx), timestamp)
hasLease = err == nil
if err != nil {
switch e := err.(type) {
// NotLeaderError means there is an active lease, leaseRejectedError
// means we tried to get one but someone beat us to it.
case *proto.NotLeaderError, *proto.LeaseRejectedError:
err = nil
default:
// Any other error is worth being logged visibly.
log.Warningc(ctx, "could not acquire lease for range gossip: %s", e)
}
}
}) {
err = util.Errorf("node is stopping")
}
return hasLease, err
}
示例3: SendBatch
// SendBatch implements batch.Sender.
func (ls *LocalSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
trace := tracer.FromCtx(ctx)
var store *storage.Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *proto.Replica
var rangeID proto.RangeID
rangeID, repl, err = ls.lookupReplica(ba.Key, ba.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.Method, ba.Method(), // TODO(tschottdorf): Method() always `Batch`.
log.Key, ba.Key,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
var br *proto.BatchResponse
if err == nil {
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See proto.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
ba.Txn.MaxTimestamp = ba.Txn.Timestamp
}
{
var tmpR proto.Response
// TODO(tschottdorf): &ba -> ba
tmpR, err = store.ExecuteCmd(ctx, &ba)
// TODO(tschottdorf): remove this dance once BatchResponse is returned.
if tmpR != nil {
br = tmpR.(*proto.BatchResponse)
if br.Error != nil {
panic(proto.ErrorUnexpectedlySet)
}
}
}
}
// TODO(tschottdorf): Later error needs to be associated to an index
// and ideally individual requests don't even have an error in their
// header. See #1891.
return br, err
}
示例4: Send
// Send implements the client.Sender interface. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each store's LookupRange method.
func (ls *LocalSender) Send(ctx context.Context, call proto.Call) {
var err error
var store *storage.Store
trace := tracer.FromCtx(ctx)
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
header := call.Args.Header()
if header.RaftID == 0 || header.Replica.StoreID == 0 {
var repl *proto.Replica
var raftID proto.RaftID
raftID, repl, err = ls.lookupReplica(header.Key, header.EndKey)
if err == nil {
header.RaftID = raftID
header.Replica = *repl
}
}
ctx = log.Add(ctx,
log.Method, call.Method(),
log.Key, header.Key,
log.RaftID, header.RaftID)
if err == nil {
store, err = ls.GetStore(header.Replica.StoreID)
}
var reply proto.Response
if err == nil {
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See proto.Transaction.CertainNodes for details.
if header.Txn != nil && header.Txn.CertainNodes.Contains(header.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
header.Txn.MaxTimestamp = header.Txn.Timestamp
}
reply, err = store.ExecuteCmd(ctx, call.Args)
}
if reply != nil {
gogoproto.Merge(call.Reply, reply)
}
if call.Reply.Header().Error != nil {
panic(proto.ErrorUnexpectedlySet)
}
if err != nil {
call.Reply.Header().SetGoError(err)
}
}
示例5: addReadOnlyCmd
// addReadOnlyCmd updates the read timestamp cache and waits for any
// overlapping writes currently processing through Raft ahead of us to
// clear via the read queue.
func (r *Range) addReadOnlyCmd(ctx context.Context, args proto.Request, reply proto.Response) error {
header := args.Header()
if err := r.checkCmdHeader(header); err != nil {
reply.Header().SetGoError(err)
return err
}
// If read-consistency is set to INCONSISTENT, run directly.
if header.ReadConsistency == proto.INCONSISTENT {
// But disallow any inconsistent reads within txns.
if header.Txn != nil {
reply.Header().SetGoError(util.Error("cannot allow inconsistent reads within a transaction"))
return reply.Header().GoError()
}
if header.Timestamp.Equal(proto.ZeroTimestamp) {
header.Timestamp = r.rm.Clock().Now()
}
intents, err := r.executeCmd(r.rm.Engine(), nil, args, reply)
if err == nil {
r.handleSkippedIntents(args, intents)
}
return err
} else if header.ReadConsistency == proto.CONSENSUS {
reply.Header().SetGoError(util.Error("consensus reads not implemented"))
return reply.Header().GoError()
}
// Add the read to the command queue to gate subsequent
// overlapping commands until this command completes.
cmdKey := r.beginCmd(header, true)
// This replica must have leader lease to process a consistent read.
if err := r.redirectOnOrAcquireLeaderLease(tracer.FromCtx(ctx), header.Timestamp); err != nil {
r.endCmd(cmdKey, args, err, true /* readOnly */)
reply.Header().SetGoError(err)
return err
}
// Execute read-only command.
intents, err := r.executeCmd(r.rm.Engine(), nil, args, reply)
// Only update the timestamp cache if the command succeeded.
r.endCmd(cmdKey, args, err, true /* readOnly */)
if err == nil {
r.handleSkippedIntents(args, intents)
}
return err
}
示例6: AddCmd
// AddCmd adds a command for execution on this range. The command's
// affected keys are verified to be contained within the range and the
// range's leadership is confirmed. The command is then dispatched
// either along the read-only execution path or the read-write Raft
// command queue.
func (r *Range) AddCmd(ctx context.Context, call proto.Call) error {
args, reply := call.Args, call.Reply
// TODO(tschottdorf) Some (internal) requests go here directly, so they
// won't be traced.
trace := tracer.FromCtx(ctx)
// Differentiate between admin, read-only and read-write.
if proto.IsAdmin(args) {
defer trace.Epoch("admin path")()
return r.addAdminCmd(ctx, args, reply)
} else if proto.IsReadOnly(args) {
defer trace.Epoch("read path")()
return r.addReadOnlyCmd(ctx, args, reply)
}
return r.addWriteCmd(ctx, args, reply, nil)
}
示例7: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
trace := tracer.FromCtx(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
ba.Txn.MaxTimestamp = ba.Txn.Timestamp
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例8: processRaftCommand
// processRaftCommand processes a raft command by unpacking the command
// struct to get args and reply and then applying the command to the
// state machine via applyRaftCommand(). The error result is sent on
// the command's done channel, if available.
func (r *Range) processRaftCommand(idKey cmdIDKey, index uint64, raftCmd proto.InternalRaftCommand) error {
if index == 0 {
log.Fatalc(r.context(), "processRaftCommand requires a non-zero index")
}
r.Lock()
cmd := r.pendingCmds[idKey]
delete(r.pendingCmds, idKey)
r.Unlock()
args := raftCmd.Cmd.GetValue().(proto.Request)
var reply proto.Response
var ctx context.Context
if cmd != nil {
// We initiated this command, so use the caller-supplied reply.
reply = cmd.Reply
ctx = cmd.ctx
} else {
// This command originated elsewhere so we must create a new reply buffer.
reply = args.CreateReply()
// TODO(tschottdorf): consider the Trace situation here.
ctx = r.context()
}
execDone := tracer.FromCtx(ctx).Epoch(fmt.Sprintf("applying %s", args.Method()))
// applyRaftCommand will return "expected" errors, but may also indicate
// replica corruption (as of now, signaled by a replicaCorruptionError).
// We feed its return through maybeSetCorrupt to act when that happens.
err := r.maybeSetCorrupt(
r.applyRaftCommand(ctx, index, proto.RaftNodeID(raftCmd.OriginNodeID), args, reply),
)
execDone()
if cmd != nil {
cmd.done <- err
} else if err != nil && log.V(1) {
log.Errorc(r.context(), "error executing raft command %s: %s", args.Method(), err)
}
return err
}
示例9: AddCmd
// AddCmd adds a command for execution on this range. The command's
// affected keys are verified to be contained within the range and the
// range's leadership is confirmed. The command is then dispatched
// either along the read-only execution path or the read-write Raft
// command queue.
func (r *Replica) AddCmd(ctx context.Context, args proto.Request) (proto.Response, error) {
// TODO(tschottdorf) Some (internal) requests go here directly, so they
// won't be traced.
trace := tracer.FromCtx(ctx)
// Differentiate between admin, read-only and read-write.
var reply proto.Response
var err error
if proto.IsAdmin(args) {
defer trace.Epoch("admin path")()
reply, err = r.addAdminCmd(ctx, args)
} else if proto.IsReadOnly(args) {
defer trace.Epoch("read-only path")()
reply, err = r.addReadOnlyCmd(ctx, args)
} else if proto.IsWrite(args) {
defer trace.Epoch("read-write path")()
reply, err = r.addWriteCmd(ctx, args, nil)
} else {
panic(fmt.Sprintf("don't know how to handle command %T", args))
}
return reply, err
}
示例10: addAdminCmd
// addAdminCmd executes the command directly. There is no interaction
// with the command queue or the timestamp cache, as admin commands
// are not meant to consistently access or modify the underlying data.
// Admin commands must run on the leader replica.
func (r *Range) addAdminCmd(ctx context.Context, args proto.Request) (proto.Response, error) {
header := args.Header()
if err := r.checkCmdHeader(header); err != nil {
return nil, err
}
// Admin commands always require the leader lease.
if err := r.redirectOnOrAcquireLeaderLease(tracer.FromCtx(ctx), header.Timestamp); err != nil {
return nil, err
}
switch tArgs := args.(type) {
case *proto.AdminSplitRequest:
resp, err := r.AdminSplit(tArgs)
return &resp, err
case *proto.AdminMergeRequest:
resp, err := r.AdminMerge(tArgs)
return &resp, err
default:
return nil, util.Error("unrecognized admin command")
}
}
示例11: addAdminCmd
// addAdminCmd executes the command directly. There is no interaction
// with the command queue or the timestamp cache, as admin commands
// are not meant to consistently access or modify the underlying data.
// Admin commands must run on the leader replica.
func (r *Range) addAdminCmd(ctx context.Context, args proto.Request, reply proto.Response) error {
header := args.Header()
if err := r.checkCmdHeader(header); err != nil {
reply.Header().SetGoError(err)
return err
}
// Admin commands always require the leader lease.
if err := r.redirectOnOrAcquireLeaderLease(tracer.FromCtx(ctx), header.Timestamp); err != nil {
reply.Header().SetGoError(err)
return err
}
switch args.(type) {
case *proto.AdminSplitRequest:
r.AdminSplit(args.(*proto.AdminSplitRequest), reply.(*proto.AdminSplitResponse))
case *proto.AdminMergeRequest:
r.AdminMerge(args.(*proto.AdminMergeRequest), reply.(*proto.AdminMergeResponse))
default:
return util.Error("unrecognized admin command")
}
return reply.Header().GoError()
}
示例12: sendOne
// sendOne sends a single call via the wrapped sender. If the call is
// part of a transaction, the TxnCoordSender adds the transaction to a
// map of active transactions and begins heartbeating it. Every
// subsequent call for the same transaction updates the lastUpdate
// timestamp to prevent live transactions from being considered
// abandoned and garbage collected. Read/write mutating requests have
// their key or key range added to the transaction's interval tree of
// key ranges for eventual cleanup via resolved write intents.
//
// On success, and if the call is part of a transaction, the affected
// key range is recorded as live intents for eventual cleanup upon
// transaction commit. Upon successful txn commit, initiates cleanup
// of intents.
func (tc *TxnCoordSender) sendOne(ctx context.Context, call proto.Call) {
var startNS int64
header := call.Args.Header()
trace := tracer.FromCtx(ctx)
var id string // optional transaction ID
if header.Txn != nil {
// If this call is part of a transaction...
id = string(header.Txn.ID)
// Verify that if this Transaction is not read-only, we have it on
// file. If not, refuse writes - the client must have issued a write on
// another coordinator previously.
if header.Txn.Writing && proto.IsTransactionWrite(call.Args) {
tc.Lock()
_, ok := tc.txns[id]
tc.Unlock()
if !ok {
call.Reply.Header().SetGoError(util.Errorf(
"transaction must not write on multiple coordinators"))
return
}
}
// Set the timestamp to the original timestamp for read-only
// commands and to the transaction timestamp for read/write
// commands.
if proto.IsReadOnly(call.Args) {
header.Timestamp = header.Txn.OrigTimestamp
} else {
header.Timestamp = header.Txn.Timestamp
}
if args, ok := call.Args.(*proto.EndTransactionRequest); ok {
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
// EndTransaction must have its key set to that of the txn.
header.Key = header.Txn.Key
if len(args.Intents) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
call.Reply.Header().SetGoError(util.Errorf(
"client must not pass intents to EndTransaction"))
return
}
tc.Lock()
txnMeta, metaOK := tc.txns[id]
if id != "" && metaOK {
args.Intents = txnMeta.intents()
}
tc.Unlock()
if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
call.Reply.Header().SetGoError(util.Errorf(
"transaction is already committed or aborted"))
return
} else if len(args.Intents) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state in
// the client.
call.Reply.Header().SetGoError(util.Errorf(
"cannot commit a read-only transaction"))
return
}
}
}
// Send the command through wrapped sender.
tc.wrapped.Send(ctx, call)
// For transactional calls, need to track & update the transaction.
if header.Txn != nil {
respHeader := call.Reply.Header()
if respHeader.Txn == nil {
// When empty, simply use the request's transaction.
// This is expected: the Range doesn't bother copying unless the
// object changes.
respHeader.Txn = gogoproto.Clone(header.Txn).(*proto.Transaction)
}
tc.updateResponseTxn(header, respHeader)
//.........这里部分代码省略.........
示例13: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs := keys.Range(ba)
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
considerIntents := false
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var needAnother bool
var pErr *roachpb.Error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, pErr = ds.getDescriptors(rs, considerIntents, isReverse)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
if (isReverse && !desc.ContainsKeyRange(desc.StartKey, rs.EndKey)) || (!isReverse && !desc.ContainsKeyRange(rs.Key, desc.EndKey)) {
evictDesc()
continue
}
curReply, pErr = func() (*roachpb.BatchResponse, *roachpb.Error) {
// Truncate the request to our current key range.
intersected, iErr := rs.Intersect(desc)
if iErr != nil {
return nil, roachpb.NewError(iErr)
}
truncBA, numActive, trErr := truncate(ba, intersected)
if numActive == 0 && trErr == nil {
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, roachpb.NewErrorf("truncation resulted in empty batch on [%s,%s): %s",
rs.Key, rs.EndKey, ba)
}
if trErr != nil {
return nil, roachpb.NewError(trErr)
}
return ds.sendSingleRange(trace, truncBA, desc)
}()
// If sending succeeded, break this loop.
if pErr == nil {
break
}
//.........这里部分代码省略.........
示例14: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error) *roachpb.Error {
trace := tracer.FromCtx(ctx)
newTxn := &roachpb.Transaction{}
newTxn.Update(ba.GetTxn())
// TODO(tamird): remove this clone. It's currently needed to avoid race conditions.
pErr = proto.Clone(pErr).(*roachpb.Error)
err := pErr.GoError()
// TODO(bdarnell): We're writing to errors here (and where using ErrorWithIndex);
// since there's no concept of ownership copy-on-write is always preferable.
switch t := err.(type) {
case nil:
newTxn.Update(br.Txn)
// Move txn timestamp forward to response timestamp if applicable.
// TODO(tschottdorf): see (*Replica).executeBatch and comments within.
// Looks like this isn't necessary any more, nor did it prevent a bug
// referenced in a TODO there.
newTxn.Timestamp.Forward(br.Timestamp)
case *roachpb.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.OpRequiresTxnError:
panic("OpRequiresTxnError must not happen at this level")
case *roachpb.ReadWithinUncertaintyIntervalError:
// Mark the host as certain. See the protobuf comment for
// Transaction.CertainNodes for details.
if t.NodeID == 0 {
panic("no replica set in header on uncertainty restart")
}
newTxn.Update(&t.Txn)
newTxn.CertainNodes.Add(t.NodeID)
// If the reader encountered a newer write within the uncertainty
// interval, move the timestamp forward, just past that write or
// up to MaxTimestamp, whichever comes first.
candidateTS := newTxn.MaxTimestamp
candidateTS.Backward(t.ExistingTimestamp.Add(0, 1))
newTxn.Timestamp.Forward(candidateTS)
newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case *roachpb.TransactionAbortedError:
trace.SetError()
newTxn.Update(&t.Txn)
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Priority = t.Txn.Priority
t.Txn = *newTxn
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.TransactionPushError:
newTxn.Update(t.Txn)
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
t.Txn = newTxn
case *roachpb.TransactionRetryError:
newTxn.Update(&t.Txn)
newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case roachpb.TransactionRestartError:
// Assertion: The above cases should exhaust all ErrorDetails which
// carry a Transaction.
if pErr.Detail != nil {
panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
}
default:
trace.SetError()
}
return func() *roachpb.Error {
if len(newTxn.ID) <= 0 {
return pErr
}
id := string(newTxn.ID)
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[id]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation.
// A tricky edge case is that of a transaction which "fails" on the
// first writing request, but actually manages to write some intents
// (for example, due to being multi-range). In this case, there will
// be an error, but the transaction will be marked as Writing and the
// coordinator must track the state, for the client's retry will be
// performed with a Writing transaction which the coordinator rejects
// unless it is tracking it (on top of it making sense to track it;
// after all, it **has** laid down intents and only the coordinator
// can augment a potential EndTransaction call).
// consider re-using those.
if intents := ba.GetIntents(); len(intents) > 0 && (err == nil || newTxn.Writing) {
if txnMeta == nil {
if !newTxn.Writing {
panic("txn with intents marked as non-writing")
}
//.........这里部分代码省略.........
示例15: addWriteCmd
// addWriteCmd first adds the keys affected by this command as pending writes
// to the command queue. Next, the timestamp cache is checked to determine if
// any newer accesses to this command's affected keys have been made. If so,
// the command's timestamp is moved forward. Finally, the command is submitted
// to Raft. Upon completion, the write is removed from the read queue and any
// error returned. If a WaitGroup is supplied, it is signaled when the command
// enters Raft or the function returns with a preprocessing error, whichever
// happens earlier.
func (r *Range) addWriteCmd(ctx context.Context, args proto.Request, wg *sync.WaitGroup) (proto.Response, error) {
signal := func() {
if wg != nil {
wg.Done()
wg = nil
}
}
// This happens more eagerly below, but it's important to guarantee that
// early returns do not skip this.
defer signal()
header := args.Header()
if err := r.checkCmdHeader(args.Header()); err != nil {
return nil, err
}
trace := tracer.FromCtx(ctx)
// Add the write to the command queue to gate subsequent overlapping
// Commands until this command completes. Note that this must be
// done before getting the max timestamp for the key(s), as
// timestamp cache is only updated after preceding commands have
// been run to successful completion.
qDone := trace.Epoch("command queue")
cmdKey := r.beginCmd(header, false)
qDone()
// This replica must have leader lease to process a write.
if err := r.redirectOnOrAcquireLeaderLease(trace, header.Timestamp); err != nil {
r.endCmd(cmdKey, args, err, false /* !readOnly */)
return nil, err
}
// Two important invariants of Cockroach: 1) encountering a more
// recently written value means transaction restart. 2) values must
// be written with a greater timestamp than the most recent read to
// the same key. Check the timestamp cache for reads/writes which
// are at least as recent as the timestamp of this write. For
// writes, send WriteTooOldError; for reads, update the write's
// timestamp. When the write returns, the updated timestamp will
// inform the final commit timestamp.
if usesTimestampCache(args) {
r.Lock()
rTS, wTS := r.tsCache.GetMax(header.Key, header.EndKey, header.Txn.GetID())
r.Unlock()
// Always push the timestamp forward if there's been a read which
// occurred after our txn timestamp.
if !rTS.Less(header.Timestamp) {
header.Timestamp = rTS.Next()
}
// If there's a newer write timestamp...
if !wTS.Less(header.Timestamp) {
// If we're in a txn, we still go ahead and try the write since
// we want to avoid restarting the transaction in the event that
// there isn't an intent or the intent can be pushed by us.
//
// If we're not in a txn, it's trivial to just advance our timestamp.
if header.Txn == nil {
header.Timestamp = wTS.Next()
}
}
}
defer trace.Epoch("raft")()
errChan, pendingCmd := r.proposeRaftCommand(ctx, args)
signal()
// First wait for raft to commit or abort the command.
var err error
var reply proto.Response
if err = <-errChan; err == nil {
// Next if the command was committed, wait for the range to apply it.
respWithErr := <-pendingCmd.done
reply, err = respWithErr.reply, respWithErr.err
} else if err == multiraft.ErrGroupDeleted {
// This error needs to be converted appropriately so that
// clients will retry.
err = proto.NewRangeNotFoundError(r.Desc().RaftID)
}
// As for reads, update timestamp cache with the timestamp
// of this write on success. This ensures a strictly higher
// timestamp for successive writes to the same key or key range.
r.endCmd(cmdKey, args, err, false /* !readOnly */)
return reply, err
}