本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/tracer.ToCtx函数的典型用法代码示例。如果您正苦于以下问题:Golang ToCtx函数的具体用法?Golang ToCtx怎么用?Golang ToCtx使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ToCtx函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Send
// Send implements the client.Sender interface. If the call is part
// of a transaction, the coordinator will initialize the transaction
// if it's not nil but has an empty ID.
func (tc *TxnCoordSender) Send(ctx context.Context, call proto.Call) {
header := call.Args.Header()
tc.maybeBeginTxn(header)
header.CmdID = header.GetOrCreateCmdID(tc.clock.PhysicalNow())
// This is the earliest point at which the request has a ClientCmdID and/or
// TxnID (if applicable). Begin a Trace which follows this request.
trace := tc.tracer.NewTrace(call.Args.Header())
defer trace.Finalize()
defer trace.Epoch(fmt.Sprintf("sending %s", call.Method()))()
defer func() {
if err := call.Reply.Header().GoError(); err != nil {
trace.Event(fmt.Sprintf("reply error: %T", err))
}
}()
ctx = tracer.ToCtx(ctx, trace)
// Process batch specially; otherwise, send via wrapped sender.
switch args := call.Args.(type) {
case *proto.BatchRequest:
trace.Event("batch processing")
tc.sendBatch(ctx, args, call.Reply.(*proto.BatchResponse))
default:
// TODO(tschottdorf): should treat all calls as Batch. After all, that
// will be almost all calls.
tc.sendOne(ctx, call)
}
}
示例2: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
f := func() {
// TODO(tschottdorf) get a hold of the client's ID, add it to the
// context before dispatching, and create an ID for tracing the request.
trace := n.ctx.Tracer.NewTrace(tracer.Node, ba)
defer trace.Finalize()
defer trace.Epoch("node")()
ctx := tracer.ToCtx((*Node)(n).context(), trace)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
trace.Event(fmt.Sprintf("error: %T", pErr.GoError()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.feed.CallComplete(*ba, time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
示例3: executeCmd
// executeCmd creates a proto.Call struct and sends it via our local sender.
func (n *nodeServer) executeCmd(args proto.Request, reply proto.Response) error {
// TODO(tschottdorf) get a hold of the client's ID, add it to the
// context before dispatching, and create an ID for tracing the request.
header := args.Header()
header.CmdID = header.GetOrCreateCmdID(n.ctx.Clock.PhysicalNow())
trace := n.ctx.Tracer.NewTrace(header)
defer trace.Finalize()
defer trace.Epoch("node")()
ctx := tracer.ToCtx((*Node)(n).context(), trace)
n.lSender.Send(ctx, proto.Call{Args: args, Reply: reply})
n.feed.CallComplete(args, reply)
if err := reply.Header().GoError(); err != nil {
trace.Event(fmt.Sprintf("error: %T", err))
}
return nil
}
示例4: heartbeatLoop
// heartbeatLoop periodically sends a HeartbeatTxn RPC to an extant
// transaction, stopping in the event the transaction is aborted or
// committed after attempting to resolve the intents. When the
// heartbeat stops, the transaction is unregistered from the
// coordinator,
func (tc *TxnCoordSender) heartbeatLoop(id string) {
var tickChan <-chan time.Time
{
ticker := time.NewTicker(tc.heartbeatInterval)
tickChan = ticker.C
defer ticker.Stop()
}
defer func() {
tc.Lock()
tc.unregisterTxnLocked(id)
tc.Unlock()
}()
var closer <-chan struct{}
var trace *tracer.Trace
{
tc.Lock()
txnMeta := tc.txns[id] // do not leak to outer scope
closer = txnMeta.txnEnd
trace = tc.tracer.NewTrace(tracer.Coord, &txnMeta.txn)
defer trace.Finalize()
tc.Unlock()
}
if closer == nil {
// Avoid race in which a Txn is cleaned up before the heartbeat
// goroutine gets a chance to start.
return
}
ctx := tracer.ToCtx(context.Background(), trace)
// Loop with ticker for periodic heartbeats.
for {
select {
case <-tickChan:
if !tc.heartbeat(id, trace, ctx) {
return
}
case <-closer:
// Transaction finished normally.
return
case <-tc.stopper.ShouldDrain():
return
}
}
}
示例5: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
// TODO(tschottdorf) get a hold of the client's ID, add it to the
// context before dispatching, and create an ID for tracing the request.
trace := n.ctx.Tracer.NewTrace(tracer.Node, ba)
defer trace.Finalize()
defer trace.Epoch("node")()
ctx := tracer.ToCtx((*Node)(n).context(), trace)
br, pErr := n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
trace.Event(fmt.Sprintf("error: %T", pErr.GoError()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.feed.CallComplete(*ba, pErr)
br.Error = pErr
return br, nil
}
示例6: Send
// Send implements the client.Sender interface. If the call is part
// of a transaction, the coordinator will initialize the transaction
// if it's not nil but has an empty ID.
func (tc *TxnCoordSender) Send(ctx context.Context, call proto.Call) {
header := call.Args.Header()
tc.maybeBeginTxn(header)
header.CmdID = header.GetOrCreateCmdID(tc.clock.PhysicalNow())
// This is the earliest point at which the request has a ClientCmdID and/or
// TxnID (if applicable). Begin a Trace which follows this request.
trace := tc.tracer.NewTrace(call.Args.Header())
defer trace.Finalize()
defer trace.Epoch(fmt.Sprintf("sending %s", call.Method()))()
defer func() {
if err := call.Reply.Header().GoError(); err != nil {
trace.Event(fmt.Sprintf("reply error: %T", err))
}
}()
ctx = tracer.ToCtx(ctx, trace)
// Process batch specially; otherwise, send via wrapped sender.
switch args := call.Args.(type) {
case *proto.InternalBatchRequest:
trace.Event("batch processing")
tc.sendBatch(ctx, args, call.Reply.(*proto.InternalBatchResponse))
case *proto.BatchRequest:
// Convert the batch request to internal-batch request.
internalArgs := &proto.InternalBatchRequest{RequestHeader: args.RequestHeader}
internalReply := &proto.InternalBatchResponse{}
for i := range args.Requests {
internalArgs.Add(args.Requests[i].GetValue().(proto.Request))
}
tc.sendBatch(ctx, internalArgs, internalReply)
reply := call.Reply.(*proto.BatchResponse)
reply.ResponseHeader = internalReply.ResponseHeader
// Convert from internal-batch response to batch response.
for i := range internalReply.Responses {
reply.Add(internalReply.Responses[i].GetValue().(proto.Response))
}
default:
tc.sendOne(ctx, call)
}
}
示例7: executeCmd
// executeCmd creates a proto.Call struct and sends it via our local sender.
func (n *Node) executeCmd(argsI gogoproto.Message) (gogoproto.Message, error) {
args := argsI.(proto.Request)
// TODO(tschottdorf) get a hold of the client's ID, add it to the
// context before dispatching, and create an ID for tracing the request.
header := args.Header()
header.CmdID = header.GetOrCreateCmdID(n.ctx.Clock.PhysicalNow())
trace := n.ctx.Tracer.NewTrace(header)
defer trace.Finalize()
defer trace.Epoch("node")()
ctx := tracer.ToCtx((*Node)(n).context(), trace)
ba, unwrap := client.MaybeWrap(args)
br, pErr := n.lSender.Send(ctx, *ba)
if pErr != nil {
br = &proto.BatchResponse{}
trace.Event(fmt.Sprintf("error: %T", pErr.GoError()))
}
if br.Error != nil {
panic(proto.ErrorUnexpectedlySet(n.lSender, br))
}
br.Error = pErr
n.feed.CallComplete(ba, br)
return unwrap(br), nil
}
示例8: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
ba.CmdID = ba.GetOrCreateCmdID(tc.clock.PhysicalNow())
var startNS int64
// This is the earliest point at which the request has a ClientCmdID and/or
// TxnID (if applicable). Begin a Trace which follows this request.
trace := tc.tracer.NewTrace(tracer.Coord, &ba)
defer trace.Finalize()
defer trace.Epoch("sending batch")()
ctx = tracer.ToCtx(ctx, trace)
var id string // optional transaction ID
if ba.Txn != nil {
// If this request is part of a transaction...
id = string(ba.Txn.ID)
// Verify that if this Transaction is not read-only, we have it on
// file. If not, refuse writes - the client must have issued a write on
// another coordinator previously.
if ba.Txn.Writing && ba.IsTransactionWrite() {
tc.Lock()
_, ok := tc.txns[id]
tc.Unlock()
if !ok {
return nil, roachpb.NewError(util.Errorf("transaction must not write on multiple coordinators"))
}
}
// Set the timestamp to the original timestamp for read-only
// commands and to the transaction timestamp for read/write
// commands.
if ba.IsReadOnly() {
ba.Timestamp = ba.Txn.OrigTimestamp
} else {
ba.Timestamp = ba.Txn.Timestamp
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewError(util.Errorf("EndTransaction must not have a Key set"))
}
et.Key = ba.Txn.Key
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
if len(et.Intents) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewError(util.Errorf("client must not pass intents to EndTransaction"))
}
tc.Lock()
txnMeta, metaOK := tc.txns[id]
if id != "" && metaOK {
et.Intents = txnMeta.intents()
}
tc.Unlock()
if intents := ba.GetIntents(); len(intents) > 0 {
// Writes in Batch, so EndTransaction is fine. Should add
// outstanding intents to EndTransaction, though.
// TODO(tschottdorf): possible issues when the batch fails,
// but the intents have been added anyways.
// TODO(tschottdorf): some of these intents may be covered
// by others, for example {[a,b), a}). This can lead to
// some extra requests when those are non-local to the txn
// record. But it doesn't seem worth optimizing now.
et.Intents = append(et.Intents, intents...)
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, roachpb.NewError(util.Errorf("transaction is already committed or aborted"))
}
if len(et.Intents) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state in
// the client.
return nil, roachpb.NewError(util.Errorf("cannot commit a read-only transaction"))
}
if log.V(1) {
for _, intent := range et.Intents {
trace.Event(fmt.Sprintf("intent: [%s,%s)", intent.Key, intent.EndKey))
}
//.........这里部分代码省略.........
示例9: heartbeat
// heartbeat periodically sends a HeartbeatTxn RPC to an extant
// transaction, stopping in the event the transaction is aborted or
// committed after attempting to resolve the intents. When the
// heartbeat stops, the transaction is unregistered from the
// coordinator,
func (tc *TxnCoordSender) heartbeat(id string) {
var tickChan <-chan time.Time
{
ticker := time.NewTicker(tc.heartbeatInterval)
tickChan = ticker.C
defer ticker.Stop()
}
defer tc.unregisterTxn(id)
var closer <-chan struct{}
var trace *tracer.Trace
{
tc.Lock()
txnMeta := tc.txns[id] // do not leak to outer scope
closer = txnMeta.txnEnd
trace = tc.tracer.NewTrace(&txnMeta.txn)
tc.Unlock()
}
if closer == nil {
// Avoid race in which a Txn is cleaned up before the heartbeat
// goroutine gets a chance to start.
return
}
ctx := tracer.ToCtx(context.Background(), trace)
defer trace.Finalize()
// Loop with ticker for periodic heartbeats.
for {
select {
case <-tickChan:
tc.Lock()
proceed := true
txnMeta := tc.txns[id]
// Before we send a heartbeat, determine whether this transaction
// should be considered abandoned. If so, exit heartbeat.
if txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) {
// TODO(tschottdorf): should we be more proactive here?
// The client might be continuing the transaction
// through another coordinator, but in the most likely
// case it's just gone and the open transaction record
// could block concurrent operations.
if log.V(1) {
log.Infof("transaction %s abandoned; stopping heartbeat",
txnMeta.txn)
}
proceed = false
}
// txnMeta.txn is possibly replaced concurrently,
// so grab a copy before unlocking.
txn := txnMeta.txn
tc.Unlock()
if !proceed {
return
}
request := &proto.HeartbeatTxnRequest{
RequestHeader: proto.RequestHeader{
Key: txn.Key,
User: security.RootUser,
Txn: &txn,
},
}
request.Header().Timestamp = tc.clock.Now()
reply := &proto.HeartbeatTxnResponse{}
call := proto.Call{
Args: request,
Reply: reply,
}
epochEnds := trace.Epoch("heartbeat")
tc.wrapped.Send(ctx, call)
epochEnds()
// If the transaction is not in pending state, then we can stop
// the heartbeat. It's either aborted or committed, and we resolve
// write intents accordingly.
if reply.GoError() != nil {
log.Warningf("heartbeat to %s failed: %s", txn, reply.GoError())
} else if reply.Txn != nil && reply.Txn.Status != proto.PENDING {
// Signal cleanup. Doesn't do much but stop this goroutine, but
// let's be future-proof.
tc.cleanupTxn(trace, *reply.Txn)
return
}
case <-closer:
// Transaction finished normally.
return
}
}
}
示例10: resolveIntents
// resolveIntents resolves the given intents. For those which are local to the
// range, we submit directly to the range-local Raft instance; the call returns
// as soon as all resolve commands have been **proposed** (not executed). This
// ensures that if a waiting client retries immediately after conflict
// resolution, it will not hit the same intents again. All non-local intents
// are resolved asynchronously in a batch.
// TODO(tschottdorf): once Txn records have a list of possibly open intents,
// resolveIntents should send an RPC to update the transaction(s) as well (for
// those intents with non-pending Txns).
func (r *Replica) resolveIntents(ctx context.Context, intents []proto.Intent) {
trace := tracer.FromCtx(ctx)
tracer.ToCtx(ctx, nil) // we're doing async stuff below; those need new traces
trace.Event("resolving intents [async]")
var wg sync.WaitGroup
bArgs := &proto.BatchRequest{}
bArgs.User = security.RootUser
for i := range intents {
intent := intents[i] // avoids a race in `i, intent := range ...`
var resolveArgs proto.Request
var local bool // whether this intent lives on this Range
{
header := proto.RequestHeader{
// Use the pushee's timestamp, which might be lower than the
// pusher's request timestamp. No need to push the intent higher
// than the pushee's txn!
Timestamp: intent.Txn.Timestamp,
Key: intent.Key,
EndKey: intent.EndKey,
User: security.RootUser,
Txn: &intent.Txn,
}
if len(intent.EndKey) == 0 {
resolveArgs = &proto.ResolveIntentRequest{RequestHeader: header}
local = r.ContainsKey(intent.Key)
} else {
resolveArgs = &proto.ResolveIntentRangeRequest{RequestHeader: header}
local = r.ContainsKeyRange(intent.Key, intent.EndKey)
}
}
// If the intent isn't (completely) local, we'll need to send an external request.
// We'll batch them all up and send at the end.
if !local {
bArgs.Add(resolveArgs)
continue
}
// If it is local, it goes directly into Raft.
// TODO(tschottdorf): this may be premature optimization. Consider just
// treating everything as an external request. This means having to
// wait for complete execution of the command (whereas now we just wait
// for proposition) and some more overhead sending things around.
wg.Add(1)
action := func() {
// Trace this under the ID of the intent owner.
ctx := tracer.ToCtx(ctx, r.rm.Tracer().NewTrace(resolveArgs.Header().Txn))
if _, err := r.addWriteCmd(ctx, resolveArgs, &wg); err != nil && log.V(1) {
log.Warningc(ctx, "resolve for key %s failed: %s", intent.Key, err)
}
}
if !r.rm.Stopper().RunAsyncTask(action) {
// Still run the task. Our caller already has a task and going async
// here again is merely for performance, but some intents need to
// be resolved because they might block other tasks. See #1684.
// Note that handleSkippedIntents has a TODO in case #1684 comes
// back.
action()
}
}
// Resolve all of the intents which aren't local to the Range. This is a
// no-op if all are local.
b := &client.Batch{}
b.InternalAddCall(proto.Call{Args: bArgs, Reply: &proto.BatchResponse{}})
action := func() {
// TODO(tschottdorf): no tracing here yet. Probably useful at some point,
// but needs a) the corresponding interface and b) facilities for tracing
// multiple tracees at the same time (batch full of possibly individual
// txns).
if err := r.rm.DB().Run(b); err != nil {
if log.V(1) {
log.Infoc(ctx, "%s", err)
}
}
}
if !r.rm.Stopper().RunAsyncTask(action) {
// As with local intents, try async to not keep the caller waiting, but
// when draining just go ahead and do it synchronously. See #1684.
action()
}
// Wait until all the local `ResolveIntent`s have been submitted to raft.
// No-op if all were external.
wg.Wait()
}
示例11: heartbeat
// heartbeat periodically sends an InternalHeartbeatTxn RPC to an
// extant transaction, stopping in the event the transaction is
// aborted or committed or if the TxnCoordSender is closed.
func (tc *TxnCoordSender) heartbeat(id string) {
tc.stopper.RunWorker(func() {
ticker := time.NewTicker(tc.heartbeatInterval)
defer ticker.Stop()
tc.Lock()
var closer chan struct{}
if txnMeta, ok := tc.txns[id]; ok {
closer = txnMeta.txnEnd
}
tc.Unlock()
if closer == nil {
return
}
// Loop with ticker for periodic heartbeats.
for {
select {
case <-ticker.C:
tc.Lock()
var txn proto.Transaction
_, proceed := tc.txns[id]
if proceed {
txnMeta := tc.txns[id] // assign only here for local scope
// Before we send a heartbeat, determine whether this transaction
// should be considered abandoned. If so, exit heartbeat.
if txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) {
tc.unregisterTxnLocked(txnMeta)
// TODO(tschottdorf): should we be more proactive here?
// The client might be continuing the transaction
// through another coordinator, but in the most likely
// case it's just gone and the open transaction record
// could block concurrent operations.
if log.V(1) {
log.Infof("transaction %s abandoned; stopping heartbeat",
txnMeta.txn)
}
proceed = false
}
// txnMeta.txn is possibly replaced concurrently,
// so grab a copy.
txn = txnMeta.txn
}
tc.Unlock()
if !proceed {
return
}
request := &proto.InternalHeartbeatTxnRequest{
RequestHeader: proto.RequestHeader{
Key: txn.Key,
User: security.RootUser,
Txn: &txn,
},
}
request.Header().Timestamp = tc.clock.Now()
reply := &proto.InternalHeartbeatTxnResponse{}
call := proto.Call{
Args: request,
Reply: reply,
}
tc.stopper.RunTask(func() {
// Each heartbeat gets its own Trace.
trace := tc.tracer.NewTrace(&txn)
ctx := tracer.ToCtx(context.Background(), trace)
epochEnds := trace.Epoch("heartbeat")
tc.wrapped.Send(ctx, call)
epochEnds()
// If the transaction is not in pending state, then we can stop
// the heartbeat. It's either aborted or committed, and we resolve
// write intents accordingly.
if reply.GoError() != nil {
log.Warningf("heartbeat to %s failed: %s", txn, reply.GoError())
} else if reply.Txn != nil && reply.Txn.Status != proto.PENDING {
tc.cleanupTxn(trace, *reply.Txn, nil)
proceed = false
}
trace.Finalize()
})
if !proceed {
return
}
case <-closer:
// Transaction finished.
return
case <-tc.stopper.ShouldStop():
// System shutdown.
return
}
}
})
}
示例12: close
// close sends resolve intent commands for all key ranges this
// transaction has covered, clears the keys cache and closes the
// metadata heartbeat. Any keys listed in the resolved slice have
// already been resolved and do not receive resolve intent commands.
func (tm *txnMetadata) close(trace *tracer.Trace, txn *proto.Transaction, resolved []proto.Key, sender client.Sender, stopper *stop.Stopper) {
close(tm.txnEnd) // stop heartbeat
trace.Event("coordinator stops")
if tm.keys.Len() > 0 {
if log.V(2) {
log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn)
}
}
// TODO(tschottdorf): Should create a Batch here.
for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) {
// If the op was range based, end key != start key: resolve a range.
var call proto.Call
key := o.Key.Start().(proto.Key)
endKey := o.Key.End().(proto.Key)
if !key.Next().Equal(endKey) {
call.Args = &proto.InternalResolveIntentRangeRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
EndKey: endKey,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentRangeResponse{}
} else {
// Check if the key has already been resolved; skip if yes.
found := false
for _, k := range resolved {
if key.Equal(k) {
found = true
}
}
if found {
continue
}
call.Args = &proto.InternalResolveIntentRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentResponse{}
}
// We don't care about the reply channel; these are best
// effort. We simply fire and forget, each in its own goroutine.
ctx := tracer.ToCtx(context.Background(), trace.Fork())
stopper.RunAsyncTask(func() {
if log.V(2) {
log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn)
}
sender.Send(ctx, call)
if call.Reply.Header().Error != nil {
log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError())
}
})
}
tm.keys.Clear()
}
示例13: resolve
// resolve sends resolve intent commands for all key ranges this transaction
// has covered. Any keys listed in the resolved slice have already been
// resolved and are skipped.
func (tm *txnMetadata) resolve(trace *tracer.Trace, resolved []proto.Key, sender client.Sender) {
txn := &tm.txn
if tm.keys.Len() > 0 {
if log.V(2) {
log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn)
}
}
// TODO(tschottdorf): Should create a Batch here. However, we're resolving
// intents and if those are on meta records, there may be a certain order
// in which they need to be resolved so that they can get routed to the
// correct range. Since a batch runs its commands one by one and we don't
// know the correct order, we prefer to fire them off in parallel.
var wg sync.WaitGroup
for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) {
// If the op was range based, end key != start key: resolve a range.
var call proto.Call
key := o.Key.Start().(proto.Key)
endKey := o.Key.End().(proto.Key)
if !key.Next().Equal(endKey) {
call.Args = &proto.InternalResolveIntentRangeRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
EndKey: endKey,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentRangeResponse{}
} else {
// Check if the key has already been resolved; skip if yes.
found := false
for _, k := range resolved {
if key.Equal(k) {
if log.V(2) {
log.Warningf("skipping previously resolved intent at %q", k)
}
found = true
}
}
if found {
continue
}
call.Args = &proto.InternalResolveIntentRequest{
RequestHeader: proto.RequestHeader{
Timestamp: txn.Timestamp,
Key: key,
User: security.RootUser,
Txn: txn,
},
}
call.Reply = &proto.InternalResolveIntentResponse{}
}
ctx := tracer.ToCtx(context.Background(), trace.Fork())
if log.V(2) {
log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn)
}
// Each operation gets their own goroutine. We only want to return to
// the caller after the operations have finished.
wg.Add(1)
go func() {
sender.Send(ctx, call)
wg.Done()
if call.Reply.Header().Error != nil {
log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError())
}
}()
}
defer trace.Epoch("waiting for intent resolution")()
wg.Wait()
tm.keys.Clear()
}