本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.BatchRequest類的典型用法代碼示例。如果您正苦於以下問題:Golang BatchRequest類的具體用法?Golang BatchRequest怎麽用?Golang BatchRequest使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了BatchRequest類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: rangeLookup
// rangeLookup dispatches an RangeLookup request for the given
// metadata key to the replicas of the given range. Note that we allow
// inconsistent reads when doing range lookups for efficiency. Getting
// stale data is not a correctness problem but instead may
// infrequently result in additional latency as additional range
// lookups may be required. Note also that rangeLookup bypasses the
// DistSender's Send() method, so there is no error inspection and
// retry logic here; this is not an issue since the lookup performs a
// single inconsistent read only.
func (ds *DistSender) rangeLookup(key proto.Key, options lookupOptions,
desc *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) {
ba := proto.BatchRequest{}
ba.ReadConsistency = proto.INCONSISTENT
ba.Add(&proto.RangeLookupRequest{
RequestHeader: proto.RequestHeader{
Key: key,
ReadConsistency: proto.INCONSISTENT,
},
MaxRanges: ds.rangeLookupMaxRanges,
ConsiderIntents: options.considerIntents,
Reverse: options.useReverseScan,
})
replicas := newReplicaSlice(ds.gossip, desc)
// TODO(tschottdorf) consider a Trace here, potentially that of the request
// that had the cache miss and waits for the result.
reply, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, &ba)
if err != nil {
return nil, err
}
br := reply.(*proto.BatchResponse)
if err := br.GoError(); err != nil {
return nil, err
}
return br.Responses[0].GetInner().(*proto.RangeLookupResponse).Ranges, nil
}
示例2: send
// send runs the specified calls synchronously in a single batch and
// returns any errors.
func (db *DB) send(reqs ...proto.Request) (*proto.BatchResponse, *proto.Error) {
if len(reqs) == 0 {
return &proto.BatchResponse{}, nil
}
if len(reqs) == 1 {
// We only send BatchRequest. Everything else needs to go into one.
if ba, ok := reqs[0].(*proto.BatchRequest); ok {
if ba.UserPriority == nil && db.userPriority != 0 {
ba.UserPriority = gogoproto.Int32(db.userPriority)
}
resetClientCmdID(ba)
br, pErr := db.sender.Send(context.TODO(), *ba)
if pErr != nil {
if log.V(1) {
log.Infof("failed %s: %s", ba.Method(), pErr)
}
return nil, pErr
}
return br, nil
}
}
ba := proto.BatchRequest{}
ba.Add(reqs...)
br, pErr := db.send(&ba)
if pErr != nil {
return nil, pErr
}
return br, nil
}
示例3: testPut
func testPut() proto.BatchRequest {
var ba proto.BatchRequest
ba.Timestamp = testTS
put := &proto.PutRequest{}
put.Key = testKey
ba.Add(put)
return ba
}
示例4: SendBatch
// SendBatch implements batch.Sender.
func (ls *LocalSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
trace := tracer.FromCtx(ctx)
var store *storage.Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *proto.Replica
var rangeID proto.RangeID
rangeID, repl, err = ls.lookupReplica(ba.Key, ba.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.Method, ba.Method(), // TODO(tschottdorf): Method() always `Batch`.
log.Key, ba.Key,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
var br *proto.BatchResponse
if err == nil {
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See proto.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
ba.Txn.MaxTimestamp = ba.Txn.Timestamp
}
{
var tmpR proto.Response
// TODO(tschottdorf): &ba -> ba
tmpR, err = store.ExecuteCmd(ctx, &ba)
// TODO(tschottdorf): remove this dance once BatchResponse is returned.
if tmpR != nil {
br = tmpR.(*proto.BatchResponse)
if br.Error != nil {
panic(proto.ErrorUnexpectedlySet)
}
}
}
}
// TODO(tschottdorf): Later error needs to be associated to an index
// and ideally individual requests don't even have an error in their
// header. See #1891.
return br, err
}
示例5: SendBatch
// SendBatch implements the batch.Sender interface. It subdivides
// the Batch into batches admissible for sending (preventing certain
// illegal mixtures of requests), executes each individual part
// (which may span multiple ranges), and recombines the response.
func (ds *DistSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
// In the event that timestamp isn't set and read consistency isn't
// required, set the timestamp using the local clock.
// TODO(tschottdorf): right place for this?
if ba.ReadConsistency == proto.INCONSISTENT && ba.Timestamp.Equal(proto.ZeroTimestamp) {
// Make sure that after the call, args hasn't changed.
defer func(timestamp proto.Timestamp) {
ba.Timestamp = timestamp
}(ba.Timestamp)
ba.Timestamp = ds.clock.Now()
}
// TODO(tschottdorf): provisional instantiation.
return newChunkingSender(ds.sendChunk).SendBatch(ctx, ba)
}
示例6: Send
func (ts *txnSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(proto.ErrorUnexpectedlySet(ts.wrapped, br))
}
// TODO(tschottdorf): see about using only the top-level *proto.Error
// information for this restart logic (includes adding the Txn).
err := pErr.GoError()
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if err == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if abrtErr, ok := err.(*proto.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = proto.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
// Acts as a minimum priority on restart.
Priority: abrtErr.Transaction().GetPriority(),
}
} else if txnErr, ok := err.(proto.TransactionRestartError); ok {
ts.Proto.Update(txnErr.Transaction())
}
return nil, pErr
}
示例7: SendBatch
// SendBatch implements Sender.
// TODO(tschottdorf): We actually don't want to chop EndTransaction off for
// single-range requests (but that happens now since EndTransaction has the
// isAlone flag). Whether it is one or not is unknown right now (you can only
// find out after you've sent to the Range/looked up a descriptor that suggests
// that you're multi-range. In those cases, the wrapped sender should return an
// error so that we split and retry once the chunk which contains
// EndTransaction (i.e. the last one).
func (cs *chunkingSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
if len(ba.Requests) < 1 {
panic("empty batch")
}
// Deterministically create ClientCmdIDs for all parts of the batch if
// a CmdID is already set (otherwise, leave them empty).
var nextID func() proto.ClientCmdID
empty := proto.ClientCmdID{}
if empty == ba.CmdID {
nextID = func() proto.ClientCmdID {
return empty
}
} else {
rng := rand.New(rand.NewSource(ba.CmdID.Random))
id := ba.CmdID
nextID = func() proto.ClientCmdID {
curID := id // copy
id.Random = rng.Int63() // adjust for next call
return curID
}
}
parts := ba.Split()
var rplChunks []*proto.BatchResponse
for _, part := range parts {
ba.Requests = part
ba.CmdID = nextID()
rpl, err := cs.f(ctx, ba)
if err != nil {
return nil, err
}
// Propagate transaction from last reply to next request. The final
// update is taken and put into the response's main header.
ba.Txn.Update(rpl.Header().Txn)
rplChunks = append(rplChunks, rpl)
}
reply := rplChunks[0]
for _, rpl := range rplChunks[1:] {
reply.Responses = append(reply.Responses, rpl.Responses...)
}
reply.ResponseHeader = rplChunks[len(rplChunks)-1].ResponseHeader
reply.Txn = ba.Txn
return reply, nil
}
示例8: rangeLookup
// rangeLookup implements the rangeDescriptorDB interface. It looks up
// the descriptors for the given (meta) key.
func (ls *LocalSender) rangeLookup(key proto.Key, options lookupOptions, _ *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) {
ba := proto.BatchRequest{}
ba.ReadConsistency = proto.INCONSISTENT
ba.Add(&proto.RangeLookupRequest{
RequestHeader: proto.RequestHeader{
Key: key,
ReadConsistency: proto.INCONSISTENT,
},
MaxRanges: 1,
ConsiderIntents: options.considerIntents,
Reverse: options.useReverseScan,
})
br, pErr := ls.Send(context.Background(), ba)
if pErr != nil {
return nil, pErr.GoError()
}
return br.Responses[0].GetInner().(*proto.RangeLookupResponse).Ranges, nil
}
示例9: TestBatchPrevNext
// TestBatchPrevNext tests batch.{Prev,Next}.
func TestBatchPrevNext(t *testing.T) {
defer leaktest.AfterTest(t)
span := func(strs ...string) []keys.Span {
var r []keys.Span
for i, str := range strs {
if i%2 == 0 {
r = append(r, keys.Span{Start: proto.Key(str)})
} else {
r[len(r)-1].End = proto.Key(str)
}
}
return r
}
max, min := string(proto.KeyMax), string(proto.KeyMin)
abc := span("a", "", "b", "", "c", "")
testCases := []struct {
spans []keys.Span
key, expFW, expBW string
}{
{spans: span("a", "c", "b", ""), key: "b", expFW: "b", expBW: "b"},
{spans: span("a", "c", "b", ""), key: "a", expFW: "a", expBW: "a"},
{spans: span("a", "c", "d", ""), key: "c", expFW: "d", expBW: "c"},
{spans: span("a", "c\x00", "d", ""), key: "c", expFW: "c", expBW: "c"},
{spans: abc, key: "b", expFW: "b", expBW: "b"},
{spans: abc, key: "b\x00", expFW: "c", expBW: "b\x00"},
{spans: abc, key: "bb", expFW: "c", expBW: "b"},
{spans: span(), key: "whatevs", expFW: max, expBW: min},
}
for i, test := range testCases {
var ba proto.BatchRequest
for _, span := range test.spans {
args := &proto.ScanRequest{}
args.Key, args.EndKey = span.Start, span.End
ba.Add(args)
}
if next := next(ba, proto.Key(test.key)); !bytes.Equal(next, proto.Key(test.expFW)) {
t.Errorf("%d: next: expected %q, got %q", i, test.expFW, next)
}
if prev := prev(ba, proto.Key(test.key)); !bytes.Equal(prev, proto.Key(test.expBW)) {
t.Errorf("%d: prev: expected %q, got %q", i, test.expBW, prev)
}
}
}
示例10: maybeBeginTxn
// maybeBeginTxn begins a new transaction if a txn has been specified
// in the request but has a nil ID. The new transaction is initialized
// using the name and isolation in the otherwise uninitialized txn.
// The Priority, if non-zero is used as a minimum.
func (tc *TxnCoordSender) maybeBeginTxn(ba *proto.BatchRequest) {
if ba.Txn == nil {
return
}
if len(ba.Requests) == 0 {
panic("empty batch with txn")
}
if len(ba.Txn.ID) == 0 {
// TODO(tschottdorf): should really choose the first txn write here.
firstKey := ba.Requests[0].GetInner().Header().Key
newTxn := proto.NewTransaction(ba.Txn.Name, keys.KeyAddress(firstKey), ba.GetUserPriority(),
ba.Txn.Isolation, tc.clock.Now(), tc.clock.MaxOffset().Nanoseconds())
// Use existing priority as a minimum. This is used on transaction
// aborts to ratchet priority when creating successor transaction.
if newTxn.Priority < ba.Txn.Priority {
newTxn.Priority = ba.Txn.Priority
}
ba.Txn = newTxn
}
}
示例11: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
return ba.CreateReply().(*proto.BatchResponse), nil
}), clock, false, nil, stopper)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba proto.BatchRequest
put := &proto.PutRequest{}
put.Key = proto.Key("test")
ba.Add(put)
ba.Add(&proto.EndTransactionRequest{})
ba.Txn = &proto.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例12: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority())
br := &proto.BatchResponse{}
if err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
for _, arg := range ba.Requests {
req := arg.GetValue().(proto.Request)
call := proto.Call{Args: req, Reply: req.CreateReply()}
b.InternalAddCall(call)
br.Add(call.Reply)
}
return txn.CommitInBatch(b)
}); err != nil {
return nil, err
}
return br, nil
}
示例13: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority())
var br *proto.BatchResponse
err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
for _, arg := range ba.Requests {
req := arg.GetInner()
b.InternalAddRequest(req)
}
var err error
br, err = txn.CommitInBatchWithResponse(b)
return err
})
if err != nil {
return nil, proto.NewError(err)
}
br.Txn = nil // hide the evidence
return br, nil
}
示例14: heartbeat
func (tc *TxnCoordSender) heartbeat(id string, trace *tracer.Trace, ctx context.Context) bool {
tc.Lock()
proceed := true
txnMeta := tc.txns[id]
// Before we send a heartbeat, determine whether this transaction
// should be considered abandoned. If so, exit heartbeat.
if txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) {
// TODO(tschottdorf): should we be more proactive here?
// The client might be continuing the transaction
// through another coordinator, but in the most likely
// case it's just gone and the open transaction record
// could block concurrent operations.
if log.V(1) {
log.Infof("transaction %s abandoned; stopping heartbeat",
txnMeta.txn)
}
proceed = false
}
// txnMeta.txn is possibly replaced concurrently,
// so grab a copy before unlocking.
txn := txnMeta.txn
tc.Unlock()
if !proceed {
return false
}
hb := &proto.HeartbeatTxnRequest{}
hb.Key = txn.Key
ba := proto.BatchRequest{}
ba.Timestamp = tc.clock.Now()
ba.Key = txn.Key
ba.Txn = &txn
ba.Add(hb)
epochEnds := trace.Epoch("heartbeat")
_, err := tc.wrapped.Send(ctx, ba)
epochEnds()
// If the transaction is not in pending state, then we can stop
// the heartbeat. It's either aborted or committed, and we resolve
// write intents accordingly.
if err != nil {
log.Warningf("heartbeat to %s failed: %s", txn, err)
}
// TODO(bdarnell): once we have gotten a heartbeat response with
// Status != PENDING, future heartbeats are useless. However, we
// need to continue the heartbeatLoop until the client either
// commits or abandons the transaction. We could save a little
// pointless work by restructuring this loop to stop sending
// heartbeats between the time that the transaction is aborted and
// the client finds out. Furthermore, we could use this information
// to send TransactionAbortedErrors to the client so it can restart
// immediately instead of running until its EndTransaction.
return true
}
示例15: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc).
func (ds *DistSender) sendChunk(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
// TODO(tschottdorf): prepare for removing Key and EndKey from BatchRequest,
// making sure that anything that relies on them goes bust.
ba.Key, ba.EndKey = nil, nil
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
from, to := keys.Range(ba)
var br *proto.BatchResponse
// Send the request to one range per iteration.
for {
options := lookupOptions{
useReverseScan: isReverse,
}
var curReply *proto.BatchResponse
var desc *proto.RangeDescriptor
var needAnother bool
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, err = ds.getDescriptors(from, to, options)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(retry.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if needAnother && ba.Txn == nil && ba.IsRange() &&
ba.ReadConsistency != proto.INCONSISTENT {
return nil, &proto.OpRequiresTxnError{}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
if (isReverse && !desc.ContainsKeyRange(desc.StartKey, to)) || (!isReverse && !desc.ContainsKeyRange(from, desc.EndKey)) {
evictDesc()
continue
}
curReply, err = func() (*proto.BatchResponse, error) {
// Truncate the request to our current key range.
untruncate, numActive, trErr := truncate(&ba, desc, from, to)
if numActive == 0 {
untruncate()
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, util.Errorf("truncation resulted in empty batch on [%s,%s): %s",
from, to, ba)
}
defer untruncate()
if trErr != nil {
return nil, trErr
}
// TODO(tschottdorf): make key range on batch redundant. The
// requests within dictate it anyways.
ba.Key, ba.EndKey = keys.Range(ba)
reply, err := ds.sendAttempt(trace, ba, desc)
ba.Key, ba.EndKey = nil, nil
if err != nil {
if log.V(0 /* TODO(tschottdorf): 1 */) {
log.Warningf("failed to invoke %s: %s", ba, err)
}
}
return reply, err
}()
// If sending succeeded, break this loop.
if err == nil {
break
}
//.........這裏部分代碼省略.........