本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.BatchResponse類的典型用法代碼示例。如果您正苦於以下問題:Golang BatchResponse類的具體用法?Golang BatchResponse怎麽用?Golang BatchResponse使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了BatchResponse類的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Send
// Batch sends a request to Cockroach via RPC. Errors which are retryable are
// retried with backoff in a loop using the default retry options. Other errors
// sending the request are retried indefinitely using the same client command
// ID to avoid reporting failure when in fact the command may have gone through
// and been executed successfully. We retry here to eventually get through with
// the same client command ID and be given the cached response.
func (s *rpcSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
var err error
var br proto.BatchResponse
for r := retry.Start(s.retryOpts); r.Next(); {
select {
case <-s.client.Healthy():
default:
err = fmt.Errorf("failed to send RPC request %s: client is unhealthy", method)
log.Warning(err)
continue
}
if err = s.client.Call(method, &ba, &br); err != nil {
br.Reset() // don't trust anyone.
// Assume all errors sending request are retryable. The actual
// number of things that could go wrong is vast, but we don't
// want to miss any which should in theory be retried with the
// same client command ID. We log the error here as a warning so
// there's visiblity that this is happening. Some of the errors
// we'll sweep up in this net shouldn't be retried, but we can't
// really know for sure which.
log.Warningf("failed to send RPC request %s: %s", method, err)
continue
}
// On successful post, we're done with retry loop.
break
}
if err != nil {
return nil, proto.NewError(err)
}
pErr := br.Error
br.Error = nil
return &br, pErr
}
示例2: newTestSender
func newTestSender(pre, post func(proto.BatchRequest) (*proto.BatchResponse, *proto.Error)) SenderFunc {
txnKey := proto.Key("test-txn")
txnID := []byte(uuid.NewUUID4())
return func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
ba.UserPriority = gogoproto.Int32(-1)
if ba.Txn != nil && len(ba.Txn.ID) == 0 {
ba.Txn.Key = txnKey
ba.Txn.ID = txnID
}
var br *proto.BatchResponse
var pErr *proto.Error
if pre != nil {
br, pErr = pre(ba)
} else {
br = &proto.BatchResponse{}
}
if pErr != nil {
return nil, pErr
}
var writing bool
status := proto.PENDING
if _, ok := ba.GetArg(proto.Put); ok {
br.Add(gogoproto.Clone(testPutResp).(proto.Response))
writing = true
}
if args, ok := ba.GetArg(proto.EndTransaction); ok {
et := args.(*proto.EndTransactionRequest)
writing = true
if et.Commit {
status = proto.COMMITTED
} else {
status = proto.ABORTED
}
}
br.Txn = gogoproto.Clone(ba.Txn).(*proto.Transaction)
if br.Txn != nil && pErr == nil {
br.Txn.Writing = writing
br.Txn.Status = status
}
if post != nil {
br, pErr = post(ba)
}
return br, pErr
}
}
示例3: sendBatch
// sendBatch unrolls a batched command and sends each constituent
// command in parallel.
// TODO(tschottdorf): modify sendBatch so that it sends truly parallel requests
// when outside of a Transaction. This can then be used to address the TODO in
// (*TxnCoordSender).resolve().
func (tc *TxnCoordSender) sendBatch(ctx context.Context, batchArgs *proto.BatchRequest, batchReply *proto.BatchResponse) {
// Prepare the calls by unrolling the batch. If the batchReply is
// pre-initialized with replies, use those; otherwise create replies
// as needed.
// TODO(spencer): send calls in parallel.
batchReply.Txn = batchArgs.Txn
for i := range batchArgs.Requests {
args := batchArgs.Requests[i].GetValue().(proto.Request)
if err := updateForBatch(args, batchArgs.RequestHeader); err != nil {
batchReply.Header().SetGoError(err)
return
}
call := proto.Call{Args: args}
// Create a reply from the method type and add to batch response.
if i >= len(batchReply.Responses) {
call.Reply = args.CreateReply()
batchReply.Add(call.Reply)
} else {
call.Reply = batchReply.Responses[i].GetValue().(proto.Response)
}
tc.sendOne(ctx, call)
// Amalgamate transaction updates and propagate first error, if applicable.
if batchReply.Txn != nil {
batchReply.Txn.Update(call.Reply.Header().Txn)
}
if call.Reply.Header().Error != nil {
batchReply.Error = call.Reply.Header().Error
return
}
}
}
示例4: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority())
var br *proto.BatchResponse
err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
for _, arg := range ba.Requests {
req := arg.GetInner()
b.InternalAddRequest(req)
}
var err error
br, err = txn.CommitInBatchWithResponse(b)
return err
})
if err != nil {
return nil, proto.NewError(err)
}
br.Txn = nil // hide the evidence
return br, nil
}
示例5: sendBatch
// sendBatch unrolls a batched command and sends each constituent
// command in parallel.
func (tc *TxnCoordSender) sendBatch(batchArgs *proto.BatchRequest, batchReply *proto.BatchResponse) {
// Prepare the calls by unrolling the batch. If the batchReply is
// pre-initialized with replies, use those; otherwise create replies
// as needed.
// TODO(spencer): send calls in parallel.
batchReply.Txn = batchArgs.Txn
for i := range batchArgs.Requests {
// Initialize args header values where appropriate.
args := batchArgs.Requests[i].GetValue().(proto.Request)
method, err := proto.MethodForRequest(args)
call := &client.Call{Method: method, Args: args}
if err != nil {
batchReply.SetGoError(err)
return
}
if args.Header().User == "" {
args.Header().User = batchArgs.User
}
if args.Header().UserPriority == nil {
args.Header().UserPriority = batchArgs.UserPriority
}
args.Header().Txn = batchArgs.Txn
// Create a reply from the method type and add to batch response.
if i >= len(batchReply.Responses) {
if call.Reply, err = proto.CreateReply(method); err != nil {
batchReply.SetGoError(util.Errorf("unsupported method in batch: %s", method))
return
}
batchReply.Add(call.Reply)
} else {
call.Reply = batchReply.Responses[i].GetValue().(proto.Response)
}
tc.sendOne(call)
// Amalgamate transaction updates and propagate first error, if applicable.
if batchReply.Txn != nil {
batchReply.Txn.Update(call.Reply.Header().Txn)
}
if call.Reply.Header().Error != nil {
batchReply.Error = call.Reply.Header().Error
return
}
}
}
示例6: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(ctx context.Context, ba proto.BatchRequest, br *proto.BatchResponse, pErr *proto.Error) *proto.Error {
trace := tracer.FromCtx(ctx)
newTxn := &proto.Transaction{}
newTxn.Update(ba.GetTxn())
err := pErr.GoError()
switch t := err.(type) {
case nil:
newTxn.Update(br.GetTxn())
// Move txn timestamp forward to response timestamp if applicable.
// TODO(tschottdorf): see (*Replica).executeBatch and comments within.
// Looks like this isn't necessary any more, nor did it prevent a bug
// referenced in a TODO there.
newTxn.Timestamp.Forward(br.Timestamp)
case *proto.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(trace, t.Txn)
case *proto.OpRequiresTxnError:
// TODO(tschottdorf): range-spanning autowrap currently broken.
panic("TODO(tschottdorf): disabled")
case *proto.ReadWithinUncertaintyIntervalError:
// Mark the host as certain. See the protobuf comment for
// Transaction.CertainNodes for details.
if t.NodeID == 0 {
panic("no replica set in header on uncertainty restart")
}
newTxn.CertainNodes.Add(t.NodeID)
// If the reader encountered a newer write within the uncertainty
// interval, move the timestamp forward, just past that write or
// up to MaxTimestamp, whichever comes first.
candidateTS := newTxn.MaxTimestamp
candidateTS.Backward(t.ExistingTimestamp.Add(0, 1))
newTxn.Timestamp.Forward(candidateTS)
newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case *proto.TransactionAbortedError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Priority = t.Txn.Priority
t.Txn = *newTxn
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(trace, t.Txn)
case *proto.TransactionPushError:
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
t.Txn = newTxn
case *proto.TransactionRetryError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case proto.TransactionRestartError:
// Assertion: The above cases should exhaust all ErrorDetails which
// carry a Transaction.
if pErr.Detail != nil {
panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
}
}
return func() *proto.Error {
if len(newTxn.ID) <= 0 {
return pErr
}
id := string(newTxn.ID)
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[id]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation
// TODO(tschottdorf): already computed the intents prior to sending,
// consider re-using those.
if intents := ba.GetIntents(); len(intents) > 0 && err == nil {
if txnMeta == nil {
newTxn.Writing = true
txnMeta = &txnMetadata{
txn: *newTxn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[id] = txnMeta
// If the transaction is already over, there's no point in
// launching a one-off coordinator which will shut down right
// away.
if _, isEnding := ba.GetArg(proto.EndTransaction); !isEnding {
trace.Event("coordinator spawns")
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(id)
}) {
// The system is already draining and we can't start the
//.........這裏部分代碼省略.........
示例7: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc).
func (ds *DistSender) sendChunk(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
// TODO(tschottdorf): prepare for removing Key and EndKey from BatchRequest,
// making sure that anything that relies on them goes bust.
ba.Key, ba.EndKey = nil, nil
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
from, to := keys.Range(ba)
var br *proto.BatchResponse
// Send the request to one range per iteration.
for {
options := lookupOptions{
useReverseScan: isReverse,
}
var curReply *proto.BatchResponse
var desc *proto.RangeDescriptor
var needAnother bool
var err error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, err = ds.getDescriptors(from, to, options)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if err != nil {
if rErr, ok := err.(retry.Retryable); ok && rErr.CanRetry() {
if log.V(1) {
log.Warning(err)
}
continue
}
break
}
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if needAnother && ba.Txn == nil && ba.IsRange() &&
ba.ReadConsistency != proto.INCONSISTENT {
return nil, &proto.OpRequiresTxnError{}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
if (isReverse && !desc.ContainsKeyRange(desc.StartKey, to)) || (!isReverse && !desc.ContainsKeyRange(from, desc.EndKey)) {
evictDesc()
continue
}
curReply, err = func() (*proto.BatchResponse, error) {
// Truncate the request to our current key range.
untruncate, numActive, trErr := truncate(&ba, desc, from, to)
if numActive == 0 {
untruncate()
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, util.Errorf("truncation resulted in empty batch on [%s,%s): %s",
from, to, ba)
}
defer untruncate()
if trErr != nil {
return nil, trErr
}
// TODO(tschottdorf): make key range on batch redundant. The
// requests within dictate it anyways.
ba.Key, ba.EndKey = keys.Range(ba)
reply, err := ds.sendAttempt(trace, ba, desc)
ba.Key, ba.EndKey = nil, nil
if err != nil {
if log.V(0 /* TODO(tschottdorf): 1 */) {
log.Warningf("failed to invoke %s: %s", ba, err)
}
}
return reply, err
}()
// If sending succeeded, break this loop.
if err == nil {
break
}
//.........這裏部分代碼省略.........