本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.Error.GoError方法的典型用法代碼示例。如果您正苦於以下問題:Golang Error.GoError方法的具體用法?Golang Error.GoError怎麽用?Golang Error.GoError使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/proto.Error
的用法示例。
在下文中一共展示了Error.GoError方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: convertBatchError
func convertBatchError(tableDesc *TableDescriptor, b client.Batch, pErr *proto.Error) error {
err := pErr.GoError()
if pErr.Index == nil {
return err
}
index := pErr.Index.Index
if index >= int32(len(b.Results)) {
panic(fmt.Sprintf("index %d outside of results: %+v", index, b.Results))
}
result := b.Results[index]
if _, ok := err.(*proto.ConditionFailedError); ok {
for _, row := range result.Rows {
indexID, key, err := decodeIndexKeyPrefix(tableDesc, row.Key)
if err != nil {
return err
}
index, err := tableDesc.FindIndexByID(indexID)
if err != nil {
return err
}
valTypes, err := makeKeyVals(tableDesc, index.ColumnIDs)
if err != nil {
return err
}
vals := make([]parser.Datum, len(valTypes))
if _, err := decodeKeyVals(valTypes, vals, key); err != nil {
return err
}
return errUniquenessConstraintViolation{index: index, vals: vals}
}
}
return err
}
示例2: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(ctx context.Context, ba proto.BatchRequest, br *proto.BatchResponse, pErr *proto.Error) *proto.Error {
trace := tracer.FromCtx(ctx)
newTxn := &proto.Transaction{}
newTxn.Update(ba.GetTxn())
err := pErr.GoError()
switch t := err.(type) {
case nil:
newTxn.Update(br.GetTxn())
// Move txn timestamp forward to response timestamp if applicable.
// TODO(tschottdorf): see (*Replica).executeBatch and comments within.
// Looks like this isn't necessary any more, nor did it prevent a bug
// referenced in a TODO there.
newTxn.Timestamp.Forward(br.Timestamp)
case *proto.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(trace, t.Txn)
case *proto.OpRequiresTxnError:
// TODO(tschottdorf): range-spanning autowrap currently broken.
panic("TODO(tschottdorf): disabled")
case *proto.ReadWithinUncertaintyIntervalError:
// Mark the host as certain. See the protobuf comment for
// Transaction.CertainNodes for details.
if t.NodeID == 0 {
panic("no replica set in header on uncertainty restart")
}
newTxn.CertainNodes.Add(t.NodeID)
// If the reader encountered a newer write within the uncertainty
// interval, move the timestamp forward, just past that write or
// up to MaxTimestamp, whichever comes first.
candidateTS := newTxn.MaxTimestamp
candidateTS.Backward(t.ExistingTimestamp.Add(0, 1))
newTxn.Timestamp.Forward(candidateTS)
newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case *proto.TransactionAbortedError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Priority = t.Txn.Priority
t.Txn = *newTxn
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(trace, t.Txn)
case *proto.TransactionPushError:
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
t.Txn = newTxn
case *proto.TransactionRetryError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case proto.TransactionRestartError:
// Assertion: The above cases should exhaust all ErrorDetails which
// carry a Transaction.
if pErr.Detail != nil {
panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
}
}
return func() *proto.Error {
if len(newTxn.ID) <= 0 {
return pErr
}
id := string(newTxn.ID)
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[id]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation
// TODO(tschottdorf): already computed the intents prior to sending,
// consider re-using those.
if intents := ba.GetIntents(); len(intents) > 0 && err == nil {
if txnMeta == nil {
newTxn.Writing = true
txnMeta = &txnMetadata{
txn: *newTxn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[id] = txnMeta
// If the transaction is already over, there's no point in
// launching a one-off coordinator which will shut down right
// away.
if _, isEnding := ba.GetArg(proto.EndTransaction); !isEnding {
trace.Event("coordinator spawns")
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(id)
}) {
// The system is already draining and we can't start the
//.........這裏部分代碼省略.........
示例3: Send
//.........這裏部分代碼省略.........
if len(et.Intents) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, proto.NewError(util.Errorf("client must not pass intents to EndTransaction"))
}
if len(et.Key) != 0 {
return nil, proto.NewError(util.Errorf("EndTransaction must not have a Key set"))
}
et.Key = ba.Txn.Key
tc.Lock()
txnMeta, metaOK := tc.txns[id]
if id != "" && metaOK {
et.Intents = txnMeta.intents()
}
tc.Unlock()
if intents := ba.GetIntents(); len(intents) > 0 {
// Writes in Batch, so EndTransaction is fine. Should add
// outstanding intents to EndTransaction, though.
// TODO(tschottdorf): possible issues when the batch fails,
// but the intents have been added anyways.
// TODO(tschottdorf): some of these intents may be covered
// by others, for example {[a,b), a}). This can lead to
// some extra requests when those are non-local to the txn
// record. But it doesn't seem worth optimizing now.
et.Intents = append(et.Intents, intents...)
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, proto.NewError(util.Errorf("transaction is already committed or aborted"))
}
if len(et.Intents) == 0 {
// If there aren't any intents, then there's factually no
// transaction to end. Read-only txns have all of their state in
// the client.
return nil, proto.NewError(util.Errorf("cannot commit a read-only transaction"))
}
// TODO(tschottdorf): V(1)
for _, intent := range et.Intents {
trace.Event(fmt.Sprintf("intent: [%s,%s)", intent.Key, intent.EndKey))
}
}
}
// Send the command through wrapped sender, taking appropriate measures
// on error.
var br *proto.BatchResponse
{
var pErr *proto.Error
br, pErr = tc.wrapped.Send(ctx, ba)
if _, ok := pErr.GoError().(*proto.OpRequiresTxnError); ok {
br, pErr = tc.resendWithTxn(ba)
}
if pErr := tc.updateState(ctx, ba, br, pErr); pErr != nil {
return nil, pErr
}
}
if br.Txn == nil {
return br, nil
}
if _, ok := ba.GetArg(proto.EndTransaction); !ok {
return br, nil
}
// If the --linearizable flag is set, we want to make sure that
// all the clocks in the system are past the commit timestamp
// of the transaction. This is guaranteed if either
// - the commit timestamp is MaxOffset behind startNS
// - MaxOffset ns were spent in this function
// when returning to the client. Below we choose the option
// that involves less waiting, which is likely the first one
// unless a transaction commits with an odd timestamp.
if tsNS := br.Txn.Timestamp.WallTime; startNS > tsNS {
startNS = tsNS
}
sleepNS := tc.clock.MaxOffset() -
time.Duration(tc.clock.PhysicalNow()-startNS)
if tc.linearizable && sleepNS > 0 {
defer func() {
if log.V(1) {
log.Infof("%v: waiting %s on EndTransaction for linearizability", br.Txn.Short(), util.TruncateDuration(sleepNS, time.Millisecond))
}
time.Sleep(sleepNS)
}()
}
if br.Txn.Status != proto.PENDING {
tc.cleanupTxn(trace, *br.Txn)
}
return br, nil
}
示例4: sendChunk
//.........這裏部分代碼省略.........
if numActive == 0 && trErr == nil {
untruncate()
// This shouldn't happen in the wild, but some tests
// exercise it.
return nil, proto.NewError(util.Errorf("truncation resulted in empty batch on [%s,%s): %s",
from, to, ba))
}
defer untruncate()
if trErr != nil {
return nil, proto.NewError(trErr)
}
// TODO(tschottdorf): make key range on batch redundant. The
// requests within dictate it anyways.
ba.Key, ba.EndKey = keys.Range(ba)
reply, err := ds.sendAttempt(trace, ba, desc)
ba.Key, ba.EndKey = nil, nil
if err != nil {
if log.V(1) {
log.Warningf("failed to invoke %s: %s", ba, pErr)
}
}
return reply, err
}()
// If sending succeeded, break this loop.
if pErr == nil {
break
}
// Error handling below.
// If retryable, allow retry. For range not found or range
// key mismatch errors, we don't backoff on the retry,
// but reset the backoff loop so we can retry immediately.
switch tErr := pErr.GoError().(type) {
case *proto.SendError:
// For an RPC error to occur, we must've been unable to contact
// any replicas. In this case, likely all nodes are down (or
// not getting back to us within a reasonable amount of time).
// We may simply not be trying to talk to the up-to-date
// replicas, so clearing the descriptor here should be a good
// idea.
// TODO(tschottdorf): If a replica group goes dead, this
// will cause clients to put high read pressure on the first
// range, so there should be some rate limiting here.
evictDesc()
if tErr.CanRetry() {
continue
}
case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError:
trace.Event(fmt.Sprintf("reply error: %T", tErr))
// Range descriptor might be out of date - evict it.
evictDesc()
// On addressing errors, don't backoff; retry immediately.
r.Reset()
if log.V(1) {
log.Warning(tErr)
}
// On retries, allow [uncommitted] intents on range descriptor
// lookups to be returned 50% of the time in order to succeed
// at finding the transaction record pointed to by the intent
// itself. The 50% probability of returning either the current
// intent or the previously committed value balances between
// the two cases where the intent's txn hasn't yet been
// committed (the previous value is correct), or the intent's
// txn has been committed (the intent value is correct).
options.considerIntents = true
示例5: fillResults
func (b *Batch) fillResults(br *proto.BatchResponse, pErr *proto.Error) error {
offset := 0
for i := range b.Results {
result := &b.Results[i]
for k := 0; k < result.calls; k++ {
args := b.reqs[offset+k]
var reply proto.Response
if result.Err == nil {
result.Err = pErr.GoError()
if result.Err == nil {
if offset+k < len(br.Responses) {
reply = br.Responses[offset+k].GetValue().(proto.Response)
} else if args.Method() != proto.EndTransaction {
// TODO(tschottdorf): EndTransaction is excepted here
// because it may be elided (r/o txns). Might prefer to
// simulate an EndTransaction response instead; this
// effectively just leaks here.
panic("not enough responses for calls")
}
}
}
switch req := args.(type) {
case *proto.GetRequest:
row := &result.Rows[k]
row.Key = []byte(req.Key)
if result.Err == nil {
row.Value = reply.(*proto.GetResponse).Value
}
case *proto.PutRequest:
row := &result.Rows[k]
row.Key = []byte(req.Key)
if result.Err == nil {
row.Value = &req.Value
row.setTimestamp(reply.(*proto.PutResponse).Timestamp)
}
case *proto.ConditionalPutRequest:
row := &result.Rows[k]
row.Key = []byte(req.Key)
if result.Err == nil {
row.Value = &req.Value
row.setTimestamp(reply.(*proto.ConditionalPutResponse).Timestamp)
}
case *proto.IncrementRequest:
row := &result.Rows[k]
row.Key = []byte(req.Key)
if result.Err == nil {
t := reply.(*proto.IncrementResponse)
row.Value = &proto.Value{
Bytes: encoding.EncodeUint64(nil, uint64(t.NewValue)),
Tag: proto.ValueType_INT,
}
row.setTimestamp(t.Timestamp)
}
case *proto.ScanRequest:
if result.Err == nil {
t := reply.(*proto.ScanResponse)
result.Rows = make([]KeyValue, len(t.Rows))
for j := range t.Rows {
src := &t.Rows[j]
dst := &result.Rows[j]
dst.Key = src.Key
dst.Value = &src.Value
}
}
case *proto.ReverseScanRequest:
if result.Err == nil {
t := reply.(*proto.ReverseScanResponse)
result.Rows = make([]KeyValue, len(t.Rows))
for j := range t.Rows {
src := &t.Rows[j]
dst := &result.Rows[j]
dst.Key = src.Key
dst.Value = &src.Value
}
}
case *proto.DeleteRequest:
row := &result.Rows[k]
row.Key = []byte(args.(*proto.DeleteRequest).Key)
case *proto.DeleteRangeRequest:
case *proto.EndTransactionRequest:
case *proto.AdminMergeRequest:
case *proto.AdminSplitRequest:
case *proto.HeartbeatTxnRequest:
case *proto.GCRequest:
case *proto.PushTxnRequest:
case *proto.RangeLookupRequest:
case *proto.ResolveIntentRequest:
case *proto.ResolveIntentRangeRequest:
case *proto.MergeRequest:
case *proto.TruncateLogRequest:
case *proto.LeaderLeaseRequest:
case *proto.BatchRequest:
// Nothing to do for these methods as they do not generate any
// rows.
default:
//.........這裏部分代碼省略.........