本文整理匯總了Golang中github.com/cockroachdb/cockroach/roachpb.BatchRequest.IsReverse方法的典型用法代碼示例。如果您正苦於以下問題:Golang BatchRequest.IsReverse方法的具體用法?Golang BatchRequest.IsReverse怎麽用?Golang BatchRequest.IsReverse使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/roachpb.BatchRequest
的用法示例。
在下文中一共展示了BatchRequest.IsReverse方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs := keys.Range(ba)
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
considerIntents := false
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var needAnother bool
var pErr *roachpb.Error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, pErr = ds.getDescriptors(rs, considerIntents, isReverse)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
if (isReverse && !desc.ContainsKeyRange(desc.StartKey, rs.EndKey)) || (!isReverse && !desc.ContainsKeyRange(rs.Key, desc.EndKey)) {
evictDesc()
continue
}
curReply, pErr = func() (*roachpb.BatchResponse, *roachpb.Error) {
// Truncate the request to our current key range.
intersected, iErr := rs.Intersect(desc)
if iErr != nil {
return nil, roachpb.NewError(iErr)
}
truncBA, numActive, trErr := truncate(ba, intersected)
if numActive == 0 && trErr == nil {
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, roachpb.NewErrorf("truncation resulted in empty batch on [%s,%s): %s",
rs.Key, rs.EndKey, ba)
}
if trErr != nil {
return nil, roachpb.NewError(trErr)
}
return ds.sendSingleRange(trace, truncBA, desc)
}()
// If sending succeeded, break this loop.
if pErr == nil {
break
}
//.........這裏部分代碼省略.........
示例2: fillSkippedResponses
// fillSkippedResponses after meeting the batch key max limit for range
// requests.
func fillSkippedResponses(
ba roachpb.BatchRequest, br *roachpb.BatchResponse, nextSpan roachpb.RSpan,
) {
// Some requests might have NoopResponses; we must replace them with empty
// responses of the proper type.
for i, req := range ba.Requests {
if _, ok := br.Responses[i].GetInner().(*roachpb.NoopResponse); !ok {
continue
}
var reply roachpb.Response
switch t := req.GetInner().(type) {
case *roachpb.ScanRequest:
reply = &roachpb.ScanResponse{}
case *roachpb.ReverseScanRequest:
reply = &roachpb.ReverseScanResponse{}
case *roachpb.DeleteRangeRequest:
reply = &roachpb.DeleteRangeResponse{}
case *roachpb.BeginTransactionRequest, *roachpb.EndTransactionRequest:
continue
default:
panic(fmt.Sprintf("bad type %T", t))
}
union := roachpb.ResponseUnion{}
union.MustSetInner(reply)
br.Responses[i] = union
}
// Set the ResumeSpan for future batch requests.
isReverse := ba.IsReverse()
for i, resp := range br.Responses {
req := ba.Requests[i].GetInner()
if !roachpb.IsRange(req) {
continue
}
hdr := resp.GetInner().Header()
origSpan := req.Header()
if isReverse {
if hdr.ResumeSpan != nil {
// The ResumeSpan.Key might be set to the StartKey of a range;
// correctly set it to the Key of the original request span.
hdr.ResumeSpan.Key = origSpan.Key
} else if roachpb.RKey(origSpan.Key).Less(nextSpan.EndKey) {
// Some keys have yet to be processed.
hdr.ResumeSpan = &origSpan
if nextSpan.EndKey.Less(roachpb.RKey(origSpan.EndKey)) {
// The original span has been partially processed.
hdr.ResumeSpan.EndKey = nextSpan.EndKey.AsRawKey()
}
}
} else {
if hdr.ResumeSpan != nil {
// The ResumeSpan.EndKey might be set to the EndKey of a
// range; correctly set it to the EndKey of the original
// request span.
hdr.ResumeSpan.EndKey = origSpan.EndKey
} else if nextSpan.Key.Less(roachpb.RKey(origSpan.EndKey)) {
// Some keys have yet to be processed.
hdr.ResumeSpan = &origSpan
if roachpb.RKey(origSpan.Key).Less(nextSpan.Key) {
// The original span has been partially processed.
hdr.ResumeSpan.Key = nextSpan.Key.AsRawKey()
}
}
}
br.Responses[i].GetInner().SetHeader(hdr)
}
}
示例3: Send
// Send implements the batch.Sender interface. It subdivides
// the Batch into batches admissible for sending (preventing certain
// illegal mixtures of requests), executes each individual part
// (which may span multiple ranges), and recombines the response.
// When the request spans ranges, it is split up and the corresponding
// ranges queried serially, in ascending order.
// In particular, the first write in a transaction may not be part of the first
// request sent. This is relevant since the first write is a BeginTransaction
// request, thus opening up a window of time during which there may be intents
// of a transaction, but no entry. Pushing such a transaction will succeed, and
// may lead to the transaction being aborted early.
func (ds *DistSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
tracing.AnnotateTrace()
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
if ba.ReadConsistency == roachpb.INCONSISTENT && ba.Timestamp.Equal(hlc.ZeroTimestamp) {
ba.Timestamp = ds.clock.Now()
}
if ba.Txn != nil {
// Make a copy here since the code below modifies it in different places.
// TODO(tschottdorf): be smarter about this - no need to do it for
// requests that don't get split.
txnClone := ba.Txn.Clone()
ba.Txn = &txnClone
if len(ba.Txn.ObservedTimestamps) == 0 {
// Ensure the local NodeID is marked as free from clock offset;
// the transaction's timestamp was taken off the local clock.
if nDesc := ds.getNodeDescriptor(); nDesc != nil {
// TODO(tschottdorf): future refactoring should move this to txn
// creation in TxnCoordSender, which is currently unaware of the
// NodeID (and wraps *DistSender through client.Sender since it
// also needs test compatibility with *LocalSender).
//
// Taking care below to not modify any memory referenced from
// our BatchRequest which may be shared with others.
//
// We already have a clone of our txn (see above), so we can
// modify it freely.
//
// Zero the existing data. That makes sure that if we had
// something of size zero but with capacity, we don't re-use the
// existing space (which others may also use). This is just to
// satisfy paranoia/OCD and not expected to matter in practice.
ba.Txn.ResetObservedTimestamps()
// OrigTimestamp is the HLC timestamp at which the Txn started, so
// this effectively means no more uncertainty on this node.
ba.Txn.UpdateObservedTimestamp(nDesc.NodeID, ba.Txn.OrigTimestamp)
}
}
}
if len(ba.Requests) < 1 {
panic("empty batch")
}
if ba.MaxSpanRequestKeys != 0 {
// Verify that the batch contains only specific range requests or the
// Begin/EndTransactionRequest. Verify that a batch with a ReverseScan
// only contains ReverseScan range requests.
isReverse := ba.IsReverse()
for _, req := range ba.Requests {
inner := req.GetInner()
switch inner.(type) {
case *roachpb.ScanRequest, *roachpb.DeleteRangeRequest:
// Accepted range requests. All other range requests are still
// not supported.
// TODO(vivek): don't enumerate all range requests.
if isReverse {
return nil, roachpb.NewErrorf("batch with limit contains both forward and reverse scans")
}
case *roachpb.BeginTransactionRequest, *roachpb.EndTransactionRequest, *roachpb.ReverseScanRequest:
continue
default:
return nil, roachpb.NewErrorf("batch with limit contains %T request", inner)
}
}
}
var rplChunks []*roachpb.BatchResponse
parts := ba.Split(false /* don't split ET */)
if len(parts) > 1 && ba.MaxSpanRequestKeys != 0 {
// We already verified above that the batch contains only scan requests of the same type.
// Such a batch should never need splitting.
panic("batch with MaxSpanRequestKeys needs splitting")
}
for len(parts) > 0 {
part := parts[0]
ba.Requests = part
rpl, pErr, shouldSplitET := ds.sendChunk(ctx, ba)
if shouldSplitET {
// If we tried to send a single round-trip EndTransaction but
// it looks like it's going to hit multiple ranges, split it
// here and try again.
if len(parts) != 1 {
panic("EndTransaction not in last chunk of batch")
//.........這裏部分代碼省略.........
示例4: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
// TODO(radu): when contexts are properly plumbed, we should be able to get
// the tracer from ctx, not from the DistSender.
ctx, cleanup := tracing.EnsureContext(ctx, tracing.TracerFromCtx(ds.Ctx))
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken *evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
var numAttempts int
for r := retry.StartWithCtx(ctx, ds.rpcRetryOptions); r.Next(); {
numAttempts++
{
const magicLogCurAttempt = 20
var seq int32
if ba.Txn != nil {
seq = ba.Txn.Sequence
}
if numAttempts%magicLogCurAttempt == 0 || seq%magicLogCurAttempt == 0 {
// Log a message if a request appears to get stuck for a long
// time or, potentially, forever. See #8975.
// The local counter captures this loop here; the Sequence number
// should capture anything higher up (as it needs to be
// incremented every time this method is called).
log.Warningf(
ctx,
"%d retries for an RPC at sequence %d, last error was: %s, remaining key ranges %s: %s",
numAttempts, seq, pErr, rs, ba,
)
}
}
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
var err error
desc, needAnother, evictToken, err = ds.getDescriptors(ctx, rs, evictToken, isReverse)
// getDescriptors may fail retryably if, for example, the first
// range isn't available via Gossip. Assume that all errors at
// this level are retryable. Non-retryable errors would be for
// things like malformed requests which we should have checked
// for before reaching this point.
if err != nil {
log.Trace(ctx, "range descriptor lookup failed: "+err.Error())
if log.V(1) {
log.Warning(ctx, err)
}
pErr = roachpb.NewError(err)
continue
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
//.........這裏部分代碼省略.........
示例5: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
ctx, cleanup := tracing.EnsureContext(ctx, ds.Tracer)
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
desc, needAnother, evictToken, pErr = ds.getDescriptors(rs, evictToken, isReverse)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
log.Trace(ctx, "range descriptor lookup failed: "+pErr.String())
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
} else {
log.Trace(ctx, "looked up range descriptor")
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
includesFrontOfCurSpan := func(rd *roachpb.RangeDescriptor) bool {
if isReverse {
// This approach is needed because rs.EndKey is exclusive.
return desc.ContainsKeyRange(desc.StartKey, rs.EndKey)
}
return desc.ContainsKey(rs.Key)
}
if !includesFrontOfCurSpan(desc) {
if err := evictToken.Evict(); err != nil {
//.........這裏部分代碼省略.........