本文整理匯總了Golang中github.com/cockroachdb/cockroach/roachpb.BatchRequest類的典型用法代碼示例。如果您正苦於以下問題:Golang BatchRequest類的具體用法?Golang BatchRequest怎麽用?Golang BatchRequest使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了BatchRequest類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: send
// send runs the specified calls synchronously in a single batch and returns
// any errors. Returns a nil response for empty input (no requests).
func (db *DB) send(maxScanResults int64, reqs ...roachpb.Request) (
*roachpb.BatchResponse, *roachpb.Error) {
if len(reqs) == 0 {
return nil, nil
}
ba := roachpb.BatchRequest{}
ba.Add(reqs...)
ba.MaxScanResults = maxScanResults
if db.userPriority != 1 {
ba.UserPriority = db.userPriority
}
tracing.AnnotateTrace()
br, pErr := db.sender.Send(context.TODO(), ba)
if pErr != nil {
if log.V(1) {
log.Infof("failed batch: %s", pErr)
}
return nil, pErr
}
return br, nil
}
示例2: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// TODO(tschottdorf): see about using only the top-level *roachpb.Error
// information for this restart logic (includes adding the Txn).
err := pErr.GoError()
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if err == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if abrtErr, ok := err.(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
if abrtTxn := abrtErr.Transaction(); abrtTxn != nil {
// Acts as a minimum priority on restart.
ts.Proto.Priority = abrtTxn.Priority
}
} else if txnErr, ok := err.(roachpb.TransactionRestartError); ok {
ts.Proto.Update(txnErr.Transaction())
}
return nil, pErr
}
示例3: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GoError().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
示例4: sendAndFill
// sendAndFill is a helper which sends the given batch and fills its results,
// returning the appropriate error which is either from the first failing call,
// or an "internal" error.
func sendAndFill(
send func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error),
b *Batch,
) error {
// Errors here will be attached to the results, so we will get them from
// the call to fillResults in the regular case in which an individual call
// fails. But send() also returns its own errors, so there's some dancing
// here to do because we want to run fillResults() so that the individual
// result gets initialized with an error from the corresponding call.
var ba roachpb.BatchRequest
// TODO(tschottdorf): this nonsensical copy is required since (at least at
// the time of writing, the chunking and masking in DistSender operates on
// the original data (as attested to by a whole bunch of test failures).
ba.Requests = append([]roachpb.RequestUnion(nil), b.reqs...)
ba.Header = b.Header
b.response, b.pErr = send(ba)
if b.pErr != nil {
// Discard errors from fillResults.
_ = b.fillResults()
return b.pErr.GoError()
}
if err := b.fillResults(); err != nil {
b.pErr = roachpb.NewError(err)
return err
}
return nil
}
示例5: RangeLookup
// RangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
trace := ds.Tracer.StartSpan("range lookup")
defer trace.Finish()
// TODO(tschottdorf): Ideally we would use the trace of the request which
// caused this lookup instead of a new one.
br, err := ds.sendRPC(trace, desc.RangeID, replicas, orderRandom, ba)
if err != nil {
return nil, err
}
if br.Error != nil {
return nil, br.Error
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例6: Send
// Send implements the batch.Sender interface. It subdivides
// the Batch into batches admissible for sending (preventing certain
// illegal mixtures of requests), executes each individual part
// (which may span multiple ranges), and recombines the response.
func (ds *DistSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
// TODO(tschottdorf): right place for this?
if ba.ReadConsistency == roachpb.INCONSISTENT && ba.Timestamp.Equal(roachpb.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp roachpb.Timestamp) {
ba.Timestamp = timestamp
}(ba.Timestamp)
ba.Timestamp = ds.clock.Now()
}
if ba.Txn != nil && len(ba.Txn.CertainNodes.Nodes) == 0 {
// Ensure the local NodeID is marked as free from clock offset;
// the transaction's timestamp was taken off the local clock.
if nDesc := ds.getNodeDescriptor(); nDesc != nil {
// TODO(tschottdorf): bad style to assume that ba.Txn is ours.
// No race here, but should have a better way of doing this.
// TODO(tschottdorf): future refactoring should move this to txn
// creation in TxnCoordSender, which is currently unaware of the
// NodeID (and wraps *DistSender through client.Sender since it
// also needs test compatibility with *LocalSender).
ba.Txn.CertainNodes.Add(nDesc.NodeID)
}
}
// TODO(tschottdorf): provisional instantiation.
return newChunkingSender(ds.sendChunk).Send(ctx, ba)
}
示例7: RangeLookup
// RangeLookup implements the RangeDescriptorDB interface.
// RangeLookup dispatches a RangeLookup request for the given metadata
// key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(
key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool,
) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
replicas.Shuffle()
// TODO(tschottdorf): Ideally we would use the trace of the request which
// caused this lookup.
_ = context.TODO()
br, err := ds.sendRPC(ds.Ctx, desc.RangeID, replicas, ba)
if err != nil {
return nil, nil, roachpb.NewError(err)
}
if br.Error != nil {
return nil, nil, br.Error
}
resp := br.Responses[0].GetInner().(*roachpb.RangeLookupResponse)
return resp.Ranges, resp.PrefetchedRanges, nil
}
示例8: RangeLookup
// RangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) RangeLookup(key roachpb.RKey, desc *roachpb.RangeDescriptor, considerIntents, useReverseScan bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
Span: roachpb.Span{
// We can interpret the RKey as a Key here since it's a metadata
// lookup; those are never local.
Key: key.AsRawKey(),
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: considerIntents,
Reverse: useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
// TODO(tschottdorf) consider a Trace here, potentially that of the request
// that had the cache miss and waits for the result.
br, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, ba)
if err != nil {
return nil, err
}
if br.Error != nil {
return nil, br.Error
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例9: rangeLookup
// rangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) rangeLookup(key roachpb.Key, options lookupOptions,
desc *roachpb.RangeDescriptor) ([]roachpb.RangeDescriptor, error) {
ba := roachpb.BatchRequest{}
ba.ReadConsistency = roachpb.INCONSISTENT
ba.Add(&roachpb.RangeLookupRequest{
RequestHeader: roachpb.RequestHeader{
Key: key,
ReadConsistency: roachpb.INCONSISTENT,
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: options.considerIntents,
Reverse: options.useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
// TODO(tschottdorf) consider a Trace here, potentially that of the request
// that had the cache miss and waits for the result.
br, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, ba)
if err != nil {
return nil, err
}
if err := br.GoError(); err != nil {
return nil, err
}
return br.Responses[0].GetInner().(*roachpb.RangeLookupResponse).Ranges, nil
}
示例10: Send
// Send forwards the call to the single store. This is a poor man's
// version of kv.TxnCoordSender, but it serves the purposes of
// supporting tests in this package. Transactions are not supported.
// Since kv/ depends on storage/, we can't get access to a
// TxnCoordSender from here.
// TODO(tschottdorf): {kv->storage}.LocalSender
func (db *testSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if et, ok := ba.GetArg(roachpb.EndTransaction); ok {
return nil, roachpb.NewError(util.Errorf("%s method not supported", et.Method()))
}
// Lookup range and direct request.
key, endKey := keys.Range(ba)
rng := db.store.LookupReplica(key, endKey)
if rng == nil {
return nil, roachpb.NewError(roachpb.NewRangeKeyMismatchError(key, endKey, nil))
}
ba.RangeID = rng.Desc().RangeID
replica := rng.GetReplica()
if replica == nil {
return nil, roachpb.NewError(util.Errorf("own replica missing in range"))
}
ba.Replica = *replica
br, pErr := db.store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(db.store, br))
}
if pErr != nil {
return nil, pErr
}
return br, nil
}
示例11: Send
// Send implements Sender.
// TODO(tschottdorf): We actually don't want to chop EndTransaction off for
// single-range requests (but that happens now since EndTransaction has the
// isAlone flag). Whether it is one or not is unknown right now (you can only
// find out after you've sent to the Range/looked up a descriptor that suggests
// that you're multi-range. In those cases, the wrapped sender should return an
// error so that we split and retry once the chunk which contains
// EndTransaction (i.e. the last one).
func (cs *chunkingSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if len(ba.Requests) < 1 {
panic("empty batch")
}
parts := ba.Split()
var rplChunks []*roachpb.BatchResponse
for _, part := range parts {
ba.Requests = part
// Increase the sequence counter to account for the fact that while
// chunking, we're likely sending multiple requests to the same Replica.
ba.SetNewRequest()
rpl, err := cs.f(ctx, ba)
if err != nil {
return nil, err
}
// Propagate transaction from last reply to next request. The final
// update is taken and put into the response's main header.
ba.Txn.Update(rpl.Header().Txn)
rplChunks = append(rplChunks, rpl)
}
reply := rplChunks[0]
for _, rpl := range rplChunks[1:] {
reply.Responses = append(reply.Responses, rpl.Responses...)
}
lastHeader := rplChunks[len(rplChunks)-1].BatchResponse_Header
reply.Error = lastHeader.Error
reply.Timestamp = lastHeader.Timestamp
reply.Txn = ba.Txn
return reply, nil
}
示例12: sendAttempt
// sendAttempt gathers and rearranges the replicas, and makes an RPC call.
func (ds *DistSender) sendAttempt(trace *tracer.Trace, ba roachpb.BatchRequest, desc *roachpb.RangeDescriptor) (*roachpb.BatchResponse, *roachpb.Error) {
defer trace.Epoch("sending RPC")()
leader := ds.leaderCache.Lookup(roachpb.RangeID(desc.RangeID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(ba.IsReadOnly() && ba.ReadConsistency == roachpb.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
br, err := ds.sendRPC(trace, desc.RangeID, replicas, order, ba)
if err != nil {
return nil, roachpb.NewError(err)
}
// Untangle the error from the received response.
pErr := br.Error
br.Error = nil // scrub the response error
return br, pErr
}
示例13: testPut
func testPut() roachpb.BatchRequest {
var ba roachpb.BatchRequest
ba.Timestamp = testTS
put := &roachpb.PutRequest{}
put.Key = testKey
ba.Add(put)
return ba
}
示例14: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
var store *Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, err = ls.lookupReplica(rs.Key, rs.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
if err != nil {
return nil, roachpb.NewError(err)
}
sp, cleanupSp := tracing.SpanFromContext(opStores, store.Tracer(), ctx)
defer cleanupSp()
if ba.Txn != nil {
// For calls that read data within a txn, we keep track of timestamps
// observed from the various participating nodes' HLC clocks. If we have
// a timestamp on file for this Node which is smaller than MaxTimestamp,
// we can lower MaxTimestamp accordingly. If MaxTimestamp drops below
// OrigTimestamp, we effectively can't see uncertainty restarts any
// more.
// Note that it's not an issue if MaxTimestamp propagates back out to
// the client via a returned Transaction update - when updating a Txn
// from another, the larger MaxTimestamp wins.
if maxTS, ok := ba.Txn.GetObservedTimestamp(ba.Replica.NodeID); ok && maxTS.Less(ba.Txn.MaxTimestamp) {
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// The uncertainty window is [OrigTimestamp, maxTS), so if that window
// is empty, there won't be any uncertainty restarts.
if !ba.Txn.OrigTimestamp.Less(maxTS) {
sp.LogEvent("read has no clock uncertainty")
}
shallowTxn.MaxTimestamp.Backward(maxTS)
ba.Txn = &shallowTxn
}
}
br, pErr := store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例15: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
sp := tracing.SpanFromContext(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=OrigTimestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
sp.LogEvent("read has no clock uncertainty")
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// We set to OrigTimestamp because that works for both SNAPSHOT and
// SERIALIZABLE: If we used Timestamp instead, we could run into
// unnecessary retries at SNAPSHOT. For example, a SNAPSHOT txn at
// OrigTimestamp = 1000.0, Timestamp = 2000.0, MaxTimestamp = 3000.0
// will always read at 1000, so a MaxTimestamp of 2000 will still let
// it restart with uncertainty when it finds a value in (1000, 2000).
shallowTxn.MaxTimestamp = ba.Txn.OrigTimestamp
ba.Txn = &shallowTxn
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}