本文整理汇总了Golang中github.com/cockroachdb/cockroach/roachpb.BatchRequest.SetNewRequest方法的典型用法代码示例。如果您正苦于以下问题:Golang BatchRequest.SetNewRequest方法的具体用法?Golang BatchRequest.SetNewRequest怎么用?Golang BatchRequest.SetNewRequest使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/roachpb.BatchRequest
的用法示例。
在下文中一共展示了BatchRequest.SetNewRequest方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// TODO(tschottdorf): see about using only the top-level *roachpb.Error
// information for this restart logic (includes adding the Txn).
err := pErr.GoError()
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if err == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if abrtErr, ok := err.(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
if abrtTxn := abrtErr.Transaction(); abrtTxn != nil {
// Acts as a minimum priority on restart.
ts.Proto.Priority = abrtTxn.Priority
}
} else if txnErr, ok := err.(roachpb.TransactionRestartError); ok {
ts.Proto.Update(txnErr.Transaction())
}
return nil, pErr
}
示例2: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GoError().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
示例3: Send
// Send implements Sender.
// TODO(tschottdorf): We actually don't want to chop EndTransaction off for
// single-range requests (but that happens now since EndTransaction has the
// isAlone flag). Whether it is one or not is unknown right now (you can only
// find out after you've sent to the Range/looked up a descriptor that suggests
// that you're multi-range. In those cases, the wrapped sender should return an
// error so that we split and retry once the chunk which contains
// EndTransaction (i.e. the last one).
func (cs *chunkingSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if len(ba.Requests) < 1 {
panic("empty batch")
}
parts := ba.Split()
var rplChunks []*roachpb.BatchResponse
for _, part := range parts {
ba.Requests = part
// Increase the sequence counter to account for the fact that while
// chunking, we're likely sending multiple requests to the same Replica.
ba.SetNewRequest()
rpl, err := cs.f(ctx, ba)
if err != nil {
return nil, err
}
// Propagate transaction from last reply to next request. The final
// update is taken and put into the response's main header.
ba.Txn.Update(rpl.Header().Txn)
rplChunks = append(rplChunks, rpl)
}
reply := rplChunks[0]
for _, rpl := range rplChunks[1:] {
reply.Responses = append(reply.Responses, rpl.Responses...)
}
lastHeader := rplChunks[len(rplChunks)-1].BatchResponse_Header
reply.Error = lastHeader.Error
reply.Timestamp = lastHeader.Timestamp
reply.Txn = ba.Txn
return reply, nil
}
示例4: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
if ts.UserPriority > 0 {
ba.UserPriority = ts.UserPriority
}
ctx = opentracing.ContextWithSpan(ctx, ts.Trace)
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
if br != nil {
for _, encSp := range br.CollectedSpans {
var newSp basictracer.RawSpan
if err := tracing.DecodeRawSpan(encSp, &newSp); err != nil {
return nil, roachpb.NewError(err)
}
ts.CollectedSpans = append(ts.CollectedSpans, newSp)
}
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
TxnMeta: roachpb.TxnMeta{
Isolation: ts.Proto.Isolation,
},
Name: ts.Proto.Name,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
示例5: sendSingleRange
// sendSingleRange gathers and rearranges the replicas, and makes an RPC call.
func (ds *DistSender) sendSingleRange(trace opentracing.Span, ba roachpb.BatchRequest, desc *roachpb.RangeDescriptor) (*roachpb.BatchResponse, *roachpb.Error) {
trace.LogEvent(fmt.Sprintf("sending RPC to [%s, %s)", desc.StartKey, desc.EndKey))
leader := ds.leaderCache.Lookup(roachpb.RangeID(desc.RangeID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(ba.IsReadOnly() && ba.ReadConsistency == roachpb.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = orderStable
}
}
// Increase the sequence counter in the per-range loop (not
// outside) since we might hit the same range twice by
// accident. For example, we might send multiple requests to
// the same Replica if (1) the descriptor cache has post-split
// descriptors that are still write intents and (2) the split
// has not yet been completed.
ba.SetNewRequest()
// TODO(tschottdorf): should serialize the trace here, not higher up.
br, pErr := ds.sendRPC(trace, desc.RangeID, replicas, order, ba)
if pErr != nil {
return nil, pErr
}
// Untangle the error from the received response.
pErr = br.Error
br.Error = nil // scrub the response error
return br, pErr
}
示例6: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
// TODO(radu): when contexts are properly plumbed, we should be able to get
// the tracer from ctx, not from the DistSender.
ctx, cleanup := tracing.EnsureContext(ctx, tracing.TracerFromCtx(ds.Ctx))
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken *evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
var numAttempts int
for r := retry.StartWithCtx(ctx, ds.rpcRetryOptions); r.Next(); {
numAttempts++
{
const magicLogCurAttempt = 20
var seq int32
if ba.Txn != nil {
seq = ba.Txn.Sequence
}
if numAttempts%magicLogCurAttempt == 0 || seq%magicLogCurAttempt == 0 {
// Log a message if a request appears to get stuck for a long
// time or, potentially, forever. See #8975.
// The local counter captures this loop here; the Sequence number
// should capture anything higher up (as it needs to be
// incremented every time this method is called).
log.Warningf(
ctx,
"%d retries for an RPC at sequence %d, last error was: %s, remaining key ranges %s: %s",
numAttempts, seq, pErr, rs, ba,
)
}
}
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
var err error
desc, needAnother, evictToken, err = ds.getDescriptors(ctx, rs, evictToken, isReverse)
// getDescriptors may fail retryably if, for example, the first
// range isn't available via Gossip. Assume that all errors at
// this level are retryable. Non-retryable errors would be for
// things like malformed requests which we should have checked
// for before reaching this point.
if err != nil {
log.Trace(ctx, "range descriptor lookup failed: "+err.Error())
if log.V(1) {
log.Warning(ctx, err)
}
pErr = roachpb.NewError(err)
continue
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
//.........这里部分代码省略.........
示例7: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
var startNS int64
ba.SetNewRequest()
// This is the earliest point at which the request has an ID (if
// applicable). Begin a Trace which follows this request.
trace := tc.tracer.NewTrace(tracer.Coord, &ba)
defer trace.Finalize()
defer trace.Epoch("sending batch")()
ctx = tracer.ToCtx(ctx, trace)
var id string // optional transaction ID
if ba.Txn != nil {
// If this request is part of a transaction...
id = string(ba.Txn.ID)
// Verify that if this Transaction is not read-only, we have it on
// file. If not, refuse writes - the client must have issued a write on
// another coordinator previously.
if ba.Txn.Writing && ba.IsTransactionWrite() {
tc.Lock()
_, ok := tc.txns[id]
tc.Unlock()
if !ok {
return nil, roachpb.NewError(util.Errorf("transaction must not write on multiple coordinators"))
}
}
// Set the timestamp to the original timestamp for read-only
// commands and to the transaction timestamp for read/write
// commands.
if ba.IsReadOnly() {
ba.Timestamp = ba.Txn.OrigTimestamp
} else {
ba.Timestamp = ba.Txn.Timestamp
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewError(util.Errorf("EndTransaction must not have a Key set"))
}
et.Key = ba.Txn.Key
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
if len(et.IntentSpans) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewError(util.Errorf("client must not pass intents to EndTransaction"))
}
tc.Lock()
txnMeta, metaOK := tc.txns[id]
if id != "" && metaOK {
et.IntentSpans = txnMeta.intentSpans()
}
tc.Unlock()
if intentSpans := ba.GetIntentSpans(); len(intentSpans) > 0 {
// Writes in Batch, so EndTransaction is fine. Should add
// outstanding intents to EndTransaction, though.
// TODO(tschottdorf): possible issues when the batch fails,
// but the intents have been added anyways.
// TODO(tschottdorf): some of these intents may be covered
// by others, for example {[a,b), a}). This can lead to
// some extra requests when those are non-local to the txn
// record. But it doesn't seem worth optimizing now.
et.IntentSpans = append(et.IntentSpans, intentSpans...)
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, roachpb.NewError(util.Errorf("transaction is already committed or aborted"))
}
if len(et.IntentSpans) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state in
// the client.
return nil, roachpb.NewError(util.Errorf("cannot commit a read-only transaction"))
}
if log.V(1) {
for _, intent := range et.IntentSpans {
trace.Event(fmt.Sprintf("intent: [%s,%s)", intent.Key, intent.EndKey))
}
//.........这里部分代码省略.........
示例8: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
ctx, cleanup := tracing.EnsureContext(ctx, ds.Tracer)
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
desc, needAnother, evictToken, pErr = ds.getDescriptors(rs, evictToken, isReverse)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
log.Trace(ctx, "range descriptor lookup failed: "+pErr.String())
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
} else {
log.Trace(ctx, "looked up range descriptor")
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
includesFrontOfCurSpan := func(rd *roachpb.RangeDescriptor) bool {
if isReverse {
// This approach is needed because rs.EndKey is exclusive.
return desc.ContainsKeyRange(desc.StartKey, rs.EndKey)
}
return desc.ContainsKey(rs.Key)
}
if !includesFrontOfCurSpan(desc) {
if err := evictToken.Evict(); err != nil {
//.........这里部分代码省略.........
示例9: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Start new or pick up active trace and embed its trace metadata into
// header for use by RPC recipients. From here on, there's always an active
// Trace, though its overhead is small unless it's sampled.
sp, cleanupSp := tracing.SpanFromContext(opTxnCoordSender, tc.tracer, ctx)
defer cleanupSp()
// TODO(tschottdorf): To get rid of the spurious alloc below we need to
// implement the carrier interface on ba.Header or make Span non-nullable,
// both of which force all of ba on the Heap. It's already there, so may
// not be a big deal, but ba should live on the stack. Also not easy to use
// a buffer pool here since anything that goes into the RPC layer could be
// used by goroutines we didn't wait for.
if ba.Header.Trace == nil {
ba.Header.Trace = &tracing.Span{}
}
if err := tc.tracer.Inject(sp, basictracer.Delegator, ba.Trace); err != nil {
return nil, roachpb.NewError(err)
}
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
var startNS int64
ba.SetNewRequest()
// This is the earliest point at which the request has an ID (if
// applicable). Begin a Trace which follows this request.
ctx = opentracing.ContextWithSpan(ctx, sp)
if ba.Txn != nil {
// If this request is part of a transaction...
txnID := *ba.Txn.ID
// Verify that if this Transaction is not read-only, we have it on
// file. If not, refuse writes - the client must have issued a write on
// another coordinator previously.
if ba.Txn.Writing && ba.IsTransactionWrite() {
tc.Lock()
_, ok := tc.txns[txnID]
tc.Unlock()
if !ok {
return nil, roachpb.NewErrorf("transaction must not write on multiple coordinators")
}
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewErrorf("EndTransaction must not have a Key set")
}
et.Key = ba.Txn.Key
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
if len(et.IntentSpans) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewErrorf("client must not pass intents to EndTransaction")
}
tc.Lock()
txnMeta, metaOK := tc.txns[txnID]
{
// Populate et.IntentSpans, taking into account both existing
// writes (if any) and new writes in this batch, and taking
// care to perform proper deduplication.
var keys interval.RangeGroup
if metaOK {
keys = txnMeta.keys
} else {
keys = interval.NewRangeTree()
}
ba.IntentSpanIterate(func(key, endKey roachpb.Key) {
addKeyRange(keys, key, endKey)
})
et.IntentSpans = collectIntentSpans(keys)
}
tc.Unlock()
if len(et.IntentSpans) > 0 {
// All good, proceed.
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, roachpb.NewErrorf("transaction is already committed or aborted")
}
if len(et.IntentSpans) == 0 {
//.........这里部分代码省略.........