本文整理匯總了Golang中github.com/cockroachdb/cockroach/roachpb.Error.GetDetail方法的典型用法代碼示例。如果您正苦於以下問題:Golang Error.GetDetail方法的具體用法?Golang Error.GetDetail怎麽用?Golang Error.GetDetail使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/roachpb.Error
的用法示例。
在下文中一共展示了Error.GetDetail方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
f := func() {
// TODO(tschottdorf) get a hold of the client's ID, add it to the
// context before dispatching, and create an ID for tracing the request.
sp := n.ctx.Tracer.StartSpan("node")
defer sp.Finish()
ctx, _ := opentracing.ContextWithSpan((*Node)(n).context(), sp)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
sp.LogEvent(fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.feed.CallComplete(*ba, time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
示例2: makeResultFromError
// If we hit an error and there is a pending transaction, rollback
// the transaction before returning. The client does not have to
// deal with cleaning up transaction state.
func makeResultFromError(planMaker *planner, pErr *roachpb.Error) Result {
if planMaker.txn != nil {
if _, ok := pErr.GetDetail().(*roachpb.SqlTransactionAbortedError); !ok {
planMaker.txn.Cleanup(pErr)
}
}
return Result{PErr: pErr}
}
示例3: processWriteIntentError
// processWriteIntentError tries to push the conflicting
// transaction(s) responsible for the given WriteIntentError, and to
// resolve those intents if possible. Returns a new error to be used
// in place of the original.
//
// The returned error may be a copy of the original WriteIntentError,
// with or without the Resolved flag set, which governs the client's
// retry behavior (if the transaction is pushed, the Resolved flag is
// set to tell the client to retry immediately; otherwise it is false
// to cause the client to back off).
func (ir *intentResolver) processWriteIntentError(ctx context.Context,
wiPErr *roachpb.Error, args roachpb.Request, h roachpb.Header,
pushType roachpb.PushTxnType) *roachpb.Error {
wiErr, ok := wiPErr.GetDetail().(*roachpb.WriteIntentError)
if !ok {
return roachpb.NewErrorf("not a WriteIntentError: %v", wiPErr)
}
if log.V(6) {
log.Infof(ctx, "resolving write intent %s", wiErr)
}
method := args.Method()
readOnly := roachpb.IsReadOnly(args) // TODO(tschottdorf): pass as param
resolveIntents, pushErr := ir.maybePushTransactions(ctx, wiErr.Intents, h, pushType, false)
if resErr := ir.resolveIntents(ctx, resolveIntents,
false /* !wait */, pushType == roachpb.PUSH_ABORT /* poison */); resErr != nil {
// When resolving without waiting, errors should not
// usually be returned here, although there are some cases
// when they may be (especially when a test cluster is in
// the process of shutting down).
log.Warningf(ctx, "asynchronous resolveIntents failed: %s", resErr)
}
if pushErr != nil {
if log.V(1) {
log.Infof(ctx, "on %s: %s", method, pushErr)
}
if _, isExpected := pushErr.GetDetail().(*roachpb.TransactionPushError); !isExpected {
// If an unexpected error occurred, make sure it bubbles up to the
// client. Examples are timeouts and logic errors.
return pushErr
}
// For write/write conflicts within a transaction, propagate the
// push failure, not the original write intent error. The push
// failure will instruct the client to restart the transaction
// with a backoff.
if h.Txn != nil && h.Txn.ID != nil && !readOnly {
return pushErr
}
// For read/write conflicts, and non-transactional write/write
// conflicts, return the write intent error which engages
// backoff/retry (with !Resolved). We don't need to restart the
// txn, only resend the read with a backoff.
return wiPErr
}
// We pushed all transactions, so tell the client everything's
// resolved and it can retry immediately.
wiErr.Resolved = true
return wiPErr // references wiErr
}
示例4: sendPError
func (c *v3Conn) sendPError(pErr *roachpb.Error) error {
var errCode string
if sqlErr, ok := pErr.GetDetail().(*roachpb.ErrorWithPGCode); ok {
errCode = sqlErr.ErrorCode
} else {
errCode = sql.CodeInternalError
}
return c.sendError(errCode, pErr.String())
}
示例5: TestTxnCoordSenderEndTxn
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// 4 cases: no deadline, past deadline, equal deadline, future deadline.
for i := 0; i < 4; i++ {
key := roachpb.Key("key: " + strconv.Itoa(i))
txn := client.NewTxn(*s.DB)
// Initialize the transaction
if pErr := txn.Put(key, []byte("value")); pErr != nil {
t.Fatal(pErr)
}
{
var pErr *roachpb.Error
switch i {
case 0:
// No deadline.
pErr = txn.Commit()
case 1:
// Past deadline.
pErr = txn.CommitBy(txn.Proto.Timestamp.Prev())
case 2:
// Equal deadline.
pErr = txn.CommitBy(txn.Proto.Timestamp)
case 3:
// Future deadline.
pErr = txn.CommitBy(txn.Proto.Timestamp.Next())
}
switch i {
case 0:
// No deadline.
if pErr != nil {
t.Error(pErr)
}
case 1:
// Past deadline.
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Errorf("expected TransactionAbortedError but got %T: %s", pErr, pErr)
}
case 2:
// Equal deadline.
if pErr != nil {
t.Error(pErr)
}
case 3:
// Future deadline.
if pErr != nil {
t.Error(pErr)
}
}
}
verifyCleanup(key, s.Sender, s.Eng, t)
}
}
示例6: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here
fail := func(err error) {
br = &roachpb.BatchResponse{}
br.Error = roachpb.NewError(err)
}
f := func() {
sp, err := tracing.JoinOrNew(n.ctx.Tracer, ba.Trace, opName)
if err != nil {
fail(err)
return
}
// If this is a snowball span, it gets special treatment: It skips the
// regular tracing machinery, and we instead send the collected spans
// back with the response. This is more expensive, but then again,
// those are individual requests traced by users, so they can be.
if sp.BaggageItem(tracing.Snowball) != "" {
if sp, err = tracing.JoinOrNewSnowball(opName, ba.Trace, func(rawSpan basictracer.RawSpan) {
encSp, err := tracing.EncodeRawSpan(&rawSpan, nil)
if err != nil {
log.Warning(err)
}
br.CollectedSpans = append(br.CollectedSpans, encSp)
}); err != nil {
fail(err)
return
}
}
defer sp.Finish()
ctx := opentracing.ContextWithSpan((*Node)(n).context(), sp)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
sp.LogEvent(fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
示例7: shouldCacheError
// Responses with write-intent and not leader errors are retried on
// the server, and so are not recorded in the sequence cache in the
// hopes of retrying to a successful outcome.
func (sc *SequenceCache) shouldCacheError(pErr *roachpb.Error) bool {
switch pErr.GetDetail().(type) {
case *roachpb.WriteIntentError, *roachpb.NotLeaderError, *roachpb.RangeKeyMismatchError:
return false
}
return true
}
示例8: convertBatchError
func convertBatchError(tableDesc *sqlbase.TableDescriptor, b client.Batch, origPErr *roachpb.Error) error {
if origPErr.Index == nil {
return origPErr.GoError()
}
index := origPErr.Index.Index
if index >= int32(len(b.Results)) {
panic(fmt.Sprintf("index %d outside of results: %+v", index, b.Results))
}
result := b.Results[index]
var alloc sqlbase.DatumAlloc
if _, ok := origPErr.GetDetail().(*roachpb.ConditionFailedError); ok {
for _, row := range result.Rows {
indexID, key, err := sqlbase.DecodeIndexKeyPrefix(tableDesc, row.Key)
if err != nil {
return err
}
index, err := tableDesc.FindIndexByID(indexID)
if err != nil {
return err
}
valTypes, err := sqlbase.MakeKeyVals(tableDesc, index.ColumnIDs)
if err != nil {
return err
}
dirs := make([]encoding.Direction, 0, len(index.ColumnIDs))
for _, dir := range index.ColumnDirections {
convertedDir, err := dir.ToEncodingDirection()
if err != nil {
return err
}
dirs = append(dirs, convertedDir)
}
vals := make([]parser.Datum, len(valTypes))
if _, err := sqlbase.DecodeKeyVals(&alloc, valTypes, vals, dirs, key); err != nil {
return err
}
return &errUniquenessConstraintViolation{index: index, vals: vals}
}
}
return origPErr.GoError()
}
示例9: handlePerReplicaError
// handlePerReplicaError returns true if the given error is likely to
// be unique to the replica that reported it, and retrying on other
// replicas is likely to produce different results. This method should
// be called only once for each error as it may have side effects such
// as updating caches.
func (ds *DistSender) handlePerReplicaError(rangeID roachpb.RangeID, pErr *roachpb.Error) bool {
switch tErr := pErr.GetDetail().(type) {
case *roachpb.RangeNotFoundError:
return true
case *roachpb.NodeUnavailableError:
return true
case *roachpb.NotLeaseHolderError:
if tErr.LeaseHolder != nil {
// If the replica we contacted knows the new lease holder, update the cache.
ds.updateLeaseHolderCache(rangeID, *tErr.LeaseHolder)
// TODO(bdarnell): Move the new lease holder to the head of the queue
// for the next retry.
}
return true
}
return false
}
示例10: Send
//.........這裏部分代碼省略.........
// Populate et.IntentSpans, taking into account both any existing
// and new writes, and taking care to perform proper deduplication.
txnMeta := tc.txns[*ba.Txn.ID]
distinctSpans := true
if txnMeta != nil {
et.IntentSpans = txnMeta.keys
// Defensively set distinctSpans to false if we had any previous
// requests in this transaction. This effectively limits the distinct
// spans optimization to 1pc transactions.
distinctSpans = len(txnMeta.keys) == 0
}
ba.IntentSpanIterate(func(key, endKey roachpb.Key) {
et.IntentSpans = append(et.IntentSpans, roachpb.Span{
Key: key,
EndKey: endKey,
})
})
// TODO(peter): Populate DistinctSpans on all batches, not just batches
// which contain an EndTransactionRequest.
var distinct bool
// The request might already be used by an outgoing goroutine, so
// we can't safely mutate anything in-place (as MergeSpans does).
et.IntentSpans = append([]roachpb.Span(nil), et.IntentSpans...)
et.IntentSpans, distinct = roachpb.MergeSpans(et.IntentSpans)
ba.Header.DistinctSpans = distinct && distinctSpans
if len(et.IntentSpans) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state
// in the client.
return roachpb.NewErrorf("cannot commit a read-only transaction")
}
if txnMeta != nil {
txnMeta.keys = et.IntentSpans
}
return nil
}(); pErr != nil {
return nil, pErr
}
if hasET && log.V(1) {
for _, intent := range et.IntentSpans {
log.Tracef(ctx, "intent: [%s,%s)", intent.Key, intent.EndKey)
}
}
}
// Send the command through wrapped sender, taking appropriate measures
// on error.
var br *roachpb.BatchResponse
{
var pErr *roachpb.Error
br, pErr = tc.wrapped.Send(ctx, ba)
if _, ok := pErr.GetDetail().(*roachpb.OpRequiresTxnError); ok {
// TODO(tschottdorf): needs to keep the trace.
br, pErr = tc.resendWithTxn(ba)
}
if pErr = tc.updateState(startNS, ctx, ba, br, pErr); pErr != nil {
log.Tracef(ctx, "error: %s", pErr)
return nil, pErr
}
}
if br.Txn == nil {
return br, nil
}
if _, ok := ba.GetArg(roachpb.EndTransaction); !ok {
return br, nil
}
// If the --linearizable flag is set, we want to make sure that
// all the clocks in the system are past the commit timestamp
// of the transaction. This is guaranteed if either
// - the commit timestamp is MaxOffset behind startNS
// - MaxOffset ns were spent in this function
// when returning to the client. Below we choose the option
// that involves less waiting, which is likely the first one
// unless a transaction commits with an odd timestamp.
if tsNS := br.Txn.Timestamp.WallTime; startNS > tsNS {
startNS = tsNS
}
sleepNS := tc.clock.MaxOffset() -
time.Duration(tc.clock.PhysicalNow()-startNS)
if tc.linearizable && sleepNS > 0 {
defer func() {
if log.V(1) {
log.Infof(ctx, "%v: waiting %s on EndTransaction for linearizability", br.Txn.ID.Short(), util.TruncateDuration(sleepNS, time.Millisecond))
}
time.Sleep(sleepNS)
}()
}
if br.Txn.Status != roachpb.PENDING {
tc.Lock()
tc.cleanupTxnLocked(ctx, *br.Txn)
tc.Unlock()
}
return br, nil
}
示例11: TestPriorityRatchetOnAbortOrPush
// TestPriorityRatchetOnAbortOrPush verifies that the priority of
// a transaction is ratcheted by successive aborts or pushes. In
// particular, we want to ensure ratcheted priorities when the txn
// discovers it's been aborted or pushed through a poisoned sequence
// cache. This happens when a concurrent writer aborts an intent or a
// concurrent reader pushes an intent.
func TestPriorityRatchetOnAbortOrPush(t *testing.T) {
defer leaktest.AfterTest(t)()
s := createTestDB(t)
defer s.Stop()
const pusheePri = 1
const pusherPri = 10 // pusher will win
pushByReading := func(key roachpb.Key) {
if pErr := s.DB.Txn(func(txn *client.Txn) *roachpb.Error {
txn.InternalSetPriority(pusherPri)
_, pErr := txn.Get(key)
return pErr
}); pErr != nil {
t.Fatal(pErr)
}
}
abortByWriting := func(key roachpb.Key) {
if pErr := s.DB.Txn(func(txn *client.Txn) *roachpb.Error {
txn.InternalSetPriority(pusherPri)
return txn.Put(key, "foo")
}); pErr != nil {
t.Fatal(pErr)
}
}
// Try all combinations of read/write and snapshot/serializable isolation.
for _, read := range []bool{true, false} {
for _, iso := range []roachpb.IsolationType{roachpb.SNAPSHOT, roachpb.SERIALIZABLE} {
var iteration int
if pErr := s.DB.Txn(func(txn *client.Txn) *roachpb.Error {
defer func() { iteration++ }()
key := roachpb.Key(fmt.Sprintf("read=%t, iso=%s", read, iso))
// Only set our priority on first try.
if iteration == 0 {
txn.InternalSetPriority(pusheePri)
}
if err := txn.SetIsolation(iso); err != nil {
t.Fatal(err)
}
// Write to lay down an intent (this will send the begin
// transaction which gets the updated priority).
if pErr := txn.Put(key, "bar"); pErr != nil {
return pErr
}
if iteration == 1 {
// Verify our priority has ratcheted to one less than the pusher's priority
if pri := txn.Proto.Priority; pri != pusherPri-1 {
t.Fatalf("%s: expected priority on retry to ratchet to %d; got %d", key, pusherPri-1, pri)
}
return nil
}
// Now simulate a concurrent reader or writer. Our txn will
// either be pushed or aborted. Then issue a read and verify
// that if we've been pushed, no error is returned and if we
// have been aborted, we get an aborted error.
var pErr *roachpb.Error
if read {
pushByReading(key)
_, pErr = txn.Get(key)
if pErr != nil {
t.Fatalf("%s: expected no error; got %s", key, pErr)
}
} else {
abortByWriting(key)
_, pErr = txn.Get(key)
if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Fatalf("%s: expected transaction aborted error; got %s", key, pErr)
}
}
return pErr
}); pErr != nil {
t.Fatal(pErr)
}
}
}
}
示例12: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(
startNS int64, ctx context.Context, ba roachpb.BatchRequest,
br *roachpb.BatchResponse, pErr *roachpb.Error) *roachpb.Error {
newTxn := &roachpb.Transaction{}
newTxn.Update(ba.Txn)
if pErr == nil {
newTxn.Update(br.Txn)
} else {
newTxn.Update(pErr.GetTxn())
}
switch t := pErr.GetDetail().(type) {
case *roachpb.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(ctx, *pErr.GetTxn())
case *roachpb.OpRequiresTxnError:
panic("OpRequiresTxnError must not happen at this level")
case *roachpb.ReadWithinUncertaintyIntervalError:
// If the reader encountered a newer write within the uncertainty
// interval, we advance the txn's timestamp just past the last observed
// timestamp from the node.
restartTS, ok := newTxn.GetObservedTimestamp(pErr.OriginNode)
if !ok {
pErr = roachpb.NewError(util.Errorf("no observed timestamp for node %d found on uncertainty restart", pErr.OriginNode))
} else {
newTxn.Timestamp.Forward(restartTS)
newTxn.Restart(ba.UserPriority, newTxn.Priority, newTxn.Timestamp)
}
case *roachpb.TransactionAbortedError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(pErr.GetTxn().Timestamp)
newTxn.Priority = pErr.GetTxn().Priority
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(ctx, *newTxn)
case *roachpb.TransactionPushError:
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp)
newTxn.Restart(ba.UserPriority, t.PusheeTxn.Priority-1, newTxn.Timestamp)
case *roachpb.TransactionRetryError:
// Increase timestamp so on restart, we're ahead of any timestamp
// cache entries or newer versions which caused the restart.
newTxn.Restart(ba.UserPriority, pErr.GetTxn().Priority, newTxn.Timestamp)
case *roachpb.WriteTooOldError:
newTxn.Restart(ba.UserPriority, newTxn.Priority, t.ActualTimestamp)
case nil:
// Nothing to do here, avoid the default case.
default:
if pErr.GetTxn() != nil {
if pErr.CanRetry() {
panic("Retryable internal error must not happen at this level")
} else {
// Do not clean up the transaction here since the client might still
// want to continue the transaction. For example, a client might
// continue its transaction after receiving ConditionFailedError, which
// can come from a unique index violation.
}
}
}
if pErr != nil && pErr.GetTxn() != nil {
// Avoid changing existing errors because sometimes they escape into
// goroutines and then there are races. Fairly sure there isn't one
// here, but better safe than sorry.
pErrShallow := *pErr
pErrShallow.SetTxn(newTxn)
pErr = &pErrShallow
}
if newTxn.ID == nil {
return pErr
}
txnID := *newTxn.ID
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[txnID]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation.
// A tricky edge case is that of a transaction which "fails" on the
// first writing request, but actually manages to write some intents
// (for example, due to being multi-range). In this case, there will
// be an error, but the transaction will be marked as Writing and the
// coordinator must track the state, for the client's retry will be
// performed with a Writing transaction which the coordinator rejects
// unless it is tracking it (on top of it making sense to track it;
// after all, it **has** laid down intents and only the coordinator
// can augment a potential EndTransaction call). See #3303.
var intentGroup interval.RangeGroup
if txnMeta != nil {
intentGroup = txnMeta.keys
} else if pErr == nil || newTxn.Writing {
intentGroup = interval.NewRangeTree()
}
//.........這裏部分代碼省略.........
示例13: Send
//.........這裏部分代碼省略.........
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewErrorf("client must not pass intents to EndTransaction")
}
tc.Lock()
txnMeta, metaOK := tc.txns[txnID]
{
// Populate et.IntentSpans, taking into account both existing
// writes (if any) and new writes in this batch, and taking
// care to perform proper deduplication.
var keys interval.RangeGroup
if metaOK {
keys = txnMeta.keys
} else {
keys = interval.NewRangeTree()
}
ba.IntentSpanIterate(func(key, endKey roachpb.Key) {
addKeyRange(keys, key, endKey)
})
et.IntentSpans = collectIntentSpans(keys)
}
tc.Unlock()
if len(et.IntentSpans) > 0 {
// All good, proceed.
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, roachpb.NewErrorf("transaction is already committed or aborted")
}
if len(et.IntentSpans) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state in
// the client.
return nil, roachpb.NewErrorf("cannot commit a read-only transaction")
}
if log.V(1) {
for _, intent := range et.IntentSpans {
log.Trace(ctx, fmt.Sprintf("intent: [%s,%s)", intent.Key, intent.EndKey))
}
}
}
}
// Send the command through wrapped sender, taking appropriate measures
// on error.
var br *roachpb.BatchResponse
{
var pErr *roachpb.Error
br, pErr = tc.wrapped.Send(ctx, ba)
if _, ok := pErr.GetDetail().(*roachpb.OpRequiresTxnError); ok {
// TODO(tschottdorf): needs to keep the trace.
br, pErr = tc.resendWithTxn(ba)
}
if pErr = tc.updateState(startNS, ctx, ba, br, pErr); pErr != nil {
log.Trace(ctx, fmt.Sprintf("error: %s", pErr))
return nil, pErr
}
}
if br.Txn == nil {
return br, nil
}
if _, ok := ba.GetArg(roachpb.EndTransaction); !ok {
return br, nil
}
// If the --linearizable flag is set, we want to make sure that
// all the clocks in the system are past the commit timestamp
// of the transaction. This is guaranteed if either
// - the commit timestamp is MaxOffset behind startNS
// - MaxOffset ns were spent in this function
// when returning to the client. Below we choose the option
// that involves less waiting, which is likely the first one
// unless a transaction commits with an odd timestamp.
if tsNS := br.Txn.Timestamp.WallTime; startNS > tsNS {
startNS = tsNS
}
sleepNS := tc.clock.MaxOffset() -
time.Duration(tc.clock.PhysicalNow()-startNS)
if tc.linearizable && sleepNS > 0 {
defer func() {
if log.V(1) {
log.Infof("%v: waiting %s on EndTransaction for linearizability", br.Txn.ID.Short(), util.TruncateDuration(sleepNS, time.Millisecond))
}
time.Sleep(sleepNS)
}()
}
if br.Txn.Status != roachpb.PENDING {
tc.cleanupTxn(ctx, *br.Txn)
}
return br, nil
}
示例14: TestClientRetryNonTxn
// TestClientRetryNonTxn verifies that non-transactional client will
// succeed despite write/write and read/write conflicts. In the case
// where the non-transactional put can push the txn, we expect the
// transaction's value to be written after all retries are complete.
func TestClientRetryNonTxn(t *testing.T) {
defer leaktest.AfterTest(t)
s := server.StartTestServer(t)
defer s.Stop()
s.SetRangeRetryOptions(retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 5 * time.Millisecond,
Multiplier: 2,
MaxRetries: 1,
})
testCases := []struct {
args roachpb.Request
isolation roachpb.IsolationType
canPush bool
expAttempts int
}{
// Write/write conflicts.
{&roachpb.PutRequest{}, roachpb.SNAPSHOT, true, 2},
{&roachpb.PutRequest{}, roachpb.SERIALIZABLE, true, 2},
{&roachpb.PutRequest{}, roachpb.SNAPSHOT, false, 1},
{&roachpb.PutRequest{}, roachpb.SERIALIZABLE, false, 1},
// Read/write conflicts.
{&roachpb.GetRequest{}, roachpb.SNAPSHOT, true, 1},
{&roachpb.GetRequest{}, roachpb.SERIALIZABLE, true, 2},
{&roachpb.GetRequest{}, roachpb.SNAPSHOT, false, 1},
{&roachpb.GetRequest{}, roachpb.SERIALIZABLE, false, 1},
}
// Lay down a write intent using a txn and attempt to write to same
// key. Try this twice--once with priorities which will allow the
// intent to be pushed and once with priorities which will not.
for i, test := range testCases {
key := roachpb.Key(fmt.Sprintf("key-%d", i))
var txnPri int32 = 1
var clientPri roachpb.UserPriority = 1
if test.canPush {
clientPri = 2
} else {
txnPri = 2
}
db, sender := createTestNotifyClient(s.Stopper(), s.ServingAddr(), -clientPri)
// doneCall signals when the non-txn read or write has completed.
doneCall := make(chan struct{})
count := 0 // keeps track of retries
pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
if test.isolation == roachpb.SNAPSHOT {
if pErr := txn.SetIsolation(roachpb.SNAPSHOT); pErr != nil {
return pErr
}
}
txn.InternalSetPriority(txnPri)
count++
// Lay down the intent.
if pErr := txn.Put(key, "txn-value"); pErr != nil {
return pErr
}
// The wait group lets us pause txn until after the non-txn method has run once.
wg := sync.WaitGroup{}
// On the first true, send the non-txn put or get.
if count == 1 {
// We use a "notifying" sender here, which allows us to know exactly when the
// call has been processed; otherwise, we'd be dependent on timing.
sender.reset(&wg)
// We must try the non-txn put or get in a goroutine because
// it might have to retry and will only succeed immediately in
// the event we can push.
go func() {
var pErr *roachpb.Error
for i := 0; ; i++ {
if _, ok := test.args.(*roachpb.GetRequest); ok {
_, pErr = db.Get(key)
} else {
pErr = db.Put(key, "value")
}
if _, ok := pErr.GetDetail().(*roachpb.WriteIntentError); !ok {
break
}
}
close(doneCall)
if pErr != nil {
t.Fatalf("%d: expected success on non-txn call to %s; got %s", i, test.args.Method(), pErr)
}
}()
sender.wait()
}
return nil
})
if pErr != nil {
t.Fatalf("%d: expected success writing transactionally; got %s", i, pErr)
}
// Make sure non-txn put or get has finished.
<-doneCall
//.........這裏部分代碼省略.........
示例15: Batch
// Batch implements the roachpb.KVServer interface.
func (n *Node) Batch(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) {
// TODO(marc): this code is duplicated in kv/db.go, which should be fixed.
// Also, grpc's authentication model (which gives credential access in the
// request handler) doesn't really fit with the current design of the
// security package (which assumes that TLS state is only given at connection
// time) - that should be fixed.
if peer, ok := peer.FromContext(ctx); ok {
if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok {
certUser, err := security.GetCertificateUser(&tlsInfo.State)
if err != nil {
return nil, err
}
if certUser != security.NodeUser {
return nil, util.Errorf("user %s is not allowed", certUser)
}
}
}
var br *roachpb.BatchResponse
opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here
fail := func(err error) {
br = &roachpb.BatchResponse{}
br.Error = roachpb.NewError(err)
}
f := func() {
sp, err := tracing.JoinOrNew(n.ctx.Tracer, args.Trace, opName)
if err != nil {
fail(err)
return
}
// If this is a snowball span, it gets special treatment: It skips the
// regular tracing machinery, and we instead send the collected spans
// back with the response. This is more expensive, but then again,
// those are individual requests traced by users, so they can be.
if sp.BaggageItem(tracing.Snowball) != "" {
sp.LogEvent("delegating to snowball tracing")
sp.Finish()
if sp, err = tracing.JoinOrNewSnowball(opName, args.Trace, func(rawSpan basictracer.RawSpan) {
encSp, err := tracing.EncodeRawSpan(&rawSpan, nil)
if err != nil {
log.Warning(err)
}
br.CollectedSpans = append(br.CollectedSpans, encSp)
}); err != nil {
fail(err)
return
}
}
defer sp.Finish()
traceCtx := opentracing.ContextWithSpan(n.context(ctx), sp)
tStart := timeutil.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(traceCtx, *args)
if pErr != nil {
br = &roachpb.BatchResponse{}
log.Trace(traceCtx, fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(timeutil.Since(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}