本文整理匯總了Golang中github.com/cockroachdb/cockroach/roachpb.ErrorUnexpectedlySet函數的典型用法代碼示例。如果您正苦於以下問題:Golang ErrorUnexpectedlySet函數的具體用法?Golang ErrorUnexpectedlySet怎麽用?Golang ErrorUnexpectedlySet使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ErrorUnexpectedlySet函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Send
// Send forwards the call to the single store. This is a poor man's
// version of kv.TxnCoordSender, but it serves the purposes of
// supporting tests in this package. Transactions are not supported.
// Since kv/ depends on storage/, we can't get access to a
// TxnCoordSender from here.
// TODO(tschottdorf): {kv->storage}.LocalSender
func (db *testSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if et, ok := ba.GetArg(roachpb.EndTransaction); ok {
return nil, roachpb.NewError(util.Errorf("%s method not supported", et.Method()))
}
// Lookup range and direct request.
key, endKey := keys.Range(ba)
rng := db.store.LookupReplica(key, endKey)
if rng == nil {
return nil, roachpb.NewError(roachpb.NewRangeKeyMismatchError(key, endKey, nil))
}
ba.RangeID = rng.Desc().RangeID
replica := rng.GetReplica()
if replica == nil {
return nil, roachpb.NewError(util.Errorf("own replica missing in range"))
}
ba.Replica = *replica
br, pErr := db.store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(db.store, br))
}
if pErr != nil {
return nil, pErr
}
return br, nil
}
示例2: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
f := func() {
// TODO(tschottdorf) get a hold of the client's ID, add it to the
// context before dispatching, and create an ID for tracing the request.
sp := n.ctx.Tracer.StartSpan("node")
defer sp.Finish()
ctx, _ := opentracing.ContextWithSpan((*Node)(n).context(), sp)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
sp.LogEvent(fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.feed.CallComplete(*ba, time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
示例3: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GoError().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
示例4: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (s *DBServer) executeCmd(argsI proto.Message) (proto.Message, error) {
var br *roachpb.BatchResponse
var err error
f := func() {
ba := argsI.(*roachpb.BatchRequest)
if err = verifyRequest(ba); err != nil {
return
}
var pErr *roachpb.Error
br, pErr = s.sender.Send(context.TODO(), *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(s.sender, br))
}
br.Error = pErr
}
if !s.stopper.RunTask(f) {
err = util.Errorf("node stopped")
}
return br, err
}
示例5: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
// TODO(tschottdorf): see about using only the top-level *roachpb.Error
// information for this restart logic (includes adding the Txn).
err := pErr.GoError()
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if err == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if abrtErr, ok := err.(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
Name: ts.Proto.Name,
Isolation: ts.Proto.Isolation,
}
if abrtTxn := abrtErr.Transaction(); abrtTxn != nil {
// Acts as a minimum priority on restart.
ts.Proto.Priority = abrtTxn.Priority
}
} else if txnErr, ok := err.(roachpb.TransactionRestartError); ok {
ts.Proto.Update(txnErr.Transaction())
}
return nil, pErr
}
示例6: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
var store *Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, err = ls.lookupReplica(rs.Key, rs.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
if err != nil {
return nil, roachpb.NewError(err)
}
sp, cleanupSp := tracing.SpanFromContext(opStores, store.Tracer(), ctx)
defer cleanupSp()
if ba.Txn != nil {
// For calls that read data within a txn, we keep track of timestamps
// observed from the various participating nodes' HLC clocks. If we have
// a timestamp on file for this Node which is smaller than MaxTimestamp,
// we can lower MaxTimestamp accordingly. If MaxTimestamp drops below
// OrigTimestamp, we effectively can't see uncertainty restarts any
// more.
// Note that it's not an issue if MaxTimestamp propagates back out to
// the client via a returned Transaction update - when updating a Txn
// from another, the larger MaxTimestamp wins.
if maxTS, ok := ba.Txn.GetObservedTimestamp(ba.Replica.NodeID); ok && maxTS.Less(ba.Txn.MaxTimestamp) {
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// The uncertainty window is [OrigTimestamp, maxTS), so if that window
// is empty, there won't be any uncertainty restarts.
if !ba.Txn.OrigTimestamp.Less(maxTS) {
sp.LogEvent("read has no clock uncertainty")
}
shallowTxn.MaxTimestamp.Backward(maxTS)
ba.Txn = &shallowTxn
}
}
br, pErr := store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例7: Batch
// Batch implements the roachpb.KVServer interface.
func (s *DBServer) Batch(
ctx context.Context, args *roachpb.BatchRequest,
) (br *roachpb.BatchResponse, err error) {
// TODO(marc,bdarnell): this code is duplicated in server/node.go,
// which should be fixed.
defer func() {
// We always return errors via BatchResponse.Error so structure is
// preserved; plain errors are presumed to be from the RPC
// framework and not from cockroach.
if err != nil {
if br == nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(fmt.Sprintf(
"attempting to return both a plain error (%s) and roachpb.Error (%s)", err, br.Error))
}
br.Error = roachpb.NewError(err)
err = nil
}
}()
// TODO(marc): grpc's authentication model (which gives credential access in
// the request handler) doesn't really fit with the current design of the
// security package (which assumes that TLS state is only given at connection
// time) - that should be fixed.
if peer, ok := peer.FromContext(ctx); ok {
if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok {
certUser, err := security.GetCertificateUser(&tlsInfo.State)
if err != nil {
return nil, err
}
if certUser != security.NodeUser {
return nil, errors.Errorf("user %s is not allowed", certUser)
}
}
}
if err = verifyRequest(args); err != nil {
return br, err
}
err = s.stopper.RunTask(func() {
var pErr *roachpb.Error
// TODO(wiz): This is required to be a different context from the one
// provided by grpc since it has to last for the entire transaction and not
// just this one RPC call. See comment for (*TxnCoordSender).hearbeatLoop.
br, pErr = s.sender.Send(context.TODO(), *args)
if pErr != nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(s.sender, br))
}
br.Error = pErr
})
return br, err
}
示例8: Send
func (ss *notifyingSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br, pErr := ss.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ss.wrapped, br))
}
if ss.waiter != nil {
ss.waiter.Done()
}
return br, pErr
}
示例9: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here
fail := func(err error) {
br = &roachpb.BatchResponse{}
br.Error = roachpb.NewError(err)
}
f := func() {
sp, err := tracing.JoinOrNew(n.ctx.Tracer, ba.Trace, opName)
if err != nil {
fail(err)
return
}
// If this is a snowball span, it gets special treatment: It skips the
// regular tracing machinery, and we instead send the collected spans
// back with the response. This is more expensive, but then again,
// those are individual requests traced by users, so they can be.
if sp.BaggageItem(tracing.Snowball) != "" {
if sp, err = tracing.JoinOrNewSnowball(opName, ba.Trace, func(rawSpan basictracer.RawSpan) {
encSp, err := tracing.EncodeRawSpan(&rawSpan, nil)
if err != nil {
log.Warning(err)
}
br.CollectedSpans = append(br.CollectedSpans, encSp)
}); err != nil {
fail(err)
return
}
}
defer sp.Finish()
ctx := opentracing.ContextWithSpan((*Node)(n).context(), sp)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
sp.LogEvent(fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
示例10: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
sp := tracing.SpanFromContext(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=OrigTimestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
sp.LogEvent("read has no clock uncertainty")
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
// We set to OrigTimestamp because that works for both SNAPSHOT and
// SERIALIZABLE: If we used Timestamp instead, we could run into
// unnecessary retries at SNAPSHOT. For example, a SNAPSHOT txn at
// OrigTimestamp = 1000.0, Timestamp = 2000.0, MaxTimestamp = 3000.0
// will always read at 1000, so a MaxTimestamp of 2000 will still let
// it restart with uncertainty when it finds a value in (1000, 2000).
shallowTxn.MaxTimestamp = ba.Txn.OrigTimestamp
ba.Txn = &shallowTxn
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}
示例11: shouldCacheResponse
// shouldCacheResponse returns whether the response should be cached.
// Responses with write-too-old, write-intent and not leader errors
// are retried on the server, and so are not recorded in the response
// cache in the hopes of retrying to a successful outcome.
func (rc *ResponseCache) shouldCacheResponse(replyWithErr roachpb.ResponseWithError) bool {
if err := replyWithErr.Reply.Header().Error; err != nil {
panic(roachpb.ErrorUnexpectedlySet(rc, replyWithErr.Reply))
}
switch replyWithErr.Err.(type) {
case *roachpb.WriteTooOldError, *roachpb.WriteIntentError, *roachpb.NotLeaderError:
return false
}
return true
}
示例12: Send
func (ss *notifyingSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br, pErr := ss.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ss.wrapped, br))
}
select {
case ss.notify <- struct{}{}:
default:
}
return br, pErr
}
示例13: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
if ts.UserPriority > 0 {
ba.UserPriority = ts.UserPriority
}
ctx = opentracing.ContextWithSpan(ctx, ts.Trace)
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
if br != nil {
for _, encSp := range br.CollectedSpans {
var newSp basictracer.RawSpan
if err := tracing.DecodeRawSpan(encSp, &newSp); err != nil {
return nil, roachpb.NewError(err)
}
ts.CollectedSpans = append(ts.CollectedSpans, newSp)
}
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
TxnMeta: roachpb.TxnMeta{
Isolation: ts.Proto.Isolation,
},
Name: ts.Proto.Name,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
示例14: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (s *DBServer) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
if err := verifyRequest(ba); err != nil {
return nil, err
}
br, pErr := s.sender.Send(context.TODO(), *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(s.sender, br))
}
br.Error = pErr
return br, nil
}
示例15: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *Stores) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
sp := tracing.SpanFromContext(ctx)
var store *Store
var pErr *roachpb.Error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
rs := keys.Range(ba)
rangeID, repl, pErr = ls.lookupReplica(rs.Key, rs.EndKey)
if pErr == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if pErr == nil {
store, pErr = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if pErr != nil {
return nil, pErr
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
sp.LogEvent("read has no clock uncertainty")
// Copy-on-write to protect others we might be sharing the Txn with.
shallowTxn := *ba.Txn
shallowTxn.MaxTimestamp = ba.Txn.Timestamp
ba.Txn = &shallowTxn
}
br, pErr = store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}