本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/roachpb.BatchRequest.TraceContext方法的典型用法代码示例。如果您正苦于以下问题:Golang BatchRequest.TraceContext方法的具体用法?Golang BatchRequest.TraceContext怎么用?Golang BatchRequest.TraceContext使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/pkg/roachpb.BatchRequest
的用法示例。
在下文中一共展示了BatchRequest.TraceContext方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Send
//.........这里部分代码省略.........
var distinct bool
// The request might already be used by an outgoing goroutine, so
// we can't safely mutate anything in-place (as MergeSpans does).
et.IntentSpans = append([]roachpb.Span(nil), et.IntentSpans...)
et.IntentSpans, distinct = roachpb.MergeSpans(et.IntentSpans)
ba.Header.DistinctSpans = distinct && distinctSpans
if len(et.IntentSpans) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state
// in the client.
return roachpb.NewErrorf("cannot commit a read-only transaction")
}
if txnMeta != nil {
txnMeta.keys = et.IntentSpans
}
return nil
}(); pErr != nil {
return nil, pErr
}
if hasET && log.V(1) {
for _, intent := range et.IntentSpans {
log.Eventf(ctx, "intent: [%s,%s)", intent.Key, intent.EndKey)
}
}
}
// Embed the trace metadata into the header for use by RPC recipients. We need
// to do this after the maybeBeginTxn call above.
// TODO(tschottdorf): To get rid of the spurious alloc below we need to
// implement the carrier interface on ba.Header or make Span non-nullable,
// both of which force all of ba on the Heap. It's already there, so may
// not be a big deal, but ba should live on the stack. Also not easy to use
// a buffer pool here since anything that goes into the RPC layer could be
// used by goroutines we didn't wait for.
if ba.TraceContext == nil {
ba.TraceContext = &tracing.SpanContextCarrier{}
} else {
// We didn't make this object but are about to mutate it, so we
// have to take a copy - the original might already have been
// passed to the RPC layer.
ba.TraceContext = protoutil.Clone(ba.TraceContext).(*tracing.SpanContextCarrier)
}
if err := tracer.Inject(sp.Context(), basictracer.Delegator, ba.TraceContext); err != nil {
return nil, roachpb.NewError(err)
}
// Send the command through wrapped sender, taking appropriate measures
// on error.
var br *roachpb.BatchResponse
{
var pErr *roachpb.Error
br, pErr = tc.wrapped.Send(ctx, ba)
if _, ok := pErr.GetDetail().(*roachpb.OpRequiresTxnError); ok {
// TODO(tschottdorf): needs to keep the trace.
br, pErr = tc.resendWithTxn(ba)
}
if pErr = tc.updateState(ctx, startNS, ba, br, pErr); pErr != nil {
log.Eventf(ctx, "error: %s", pErr)
return nil, pErr
}
}
if br.Txn == nil {
return br, nil
}
if _, ok := ba.GetArg(roachpb.EndTransaction); !ok {
return br, nil
}
// If the --linearizable flag is set, we want to make sure that
// all the clocks in the system are past the commit timestamp
// of the transaction. This is guaranteed if either
// - the commit timestamp is MaxOffset behind startNS
// - MaxOffset ns were spent in this function
// when returning to the client. Below we choose the option
// that involves less waiting, which is likely the first one
// unless a transaction commits with an odd timestamp.
if tsNS := br.Txn.Timestamp.WallTime; startNS > tsNS {
startNS = tsNS
}
sleepNS := tc.clock.MaxOffset() -
time.Duration(tc.clock.PhysicalNow()-startNS)
if tc.linearizable && sleepNS > 0 {
defer func() {
if log.V(1) {
log.Infof(ctx, "%v: waiting %s on EndTransaction for linearizability", br.Txn.Short(), util.TruncateDuration(sleepNS, time.Millisecond))
}
time.Sleep(sleepNS)
}()
}
if br.Txn.Status != roachpb.PENDING {
tc.Lock()
tc.cleanupTxnLocked(ctx, *br.Txn)
tc.Unlock()
}
return br, nil
}