本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.NewError函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewError函數的具體用法?Golang NewError怎麽用?Golang NewError使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewError函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: send
// send runs the specified calls synchronously in a single batch and
// returns any errors.
func (db *DB) send(calls ...proto.Call) (pErr *proto.Error) {
if len(calls) == 0 {
return nil
}
if len(calls) == 1 {
c := calls[0]
// We only send BatchRequest. Everything else needs to go into one.
if _, ok := calls[0].Args.(*proto.BatchRequest); ok {
if c.Args.Header().UserPriority == nil && db.userPriority != 0 {
c.Args.Header().UserPriority = gogoproto.Int32(db.userPriority)
}
resetClientCmdID(c.Args)
_ = SendCall(db.sender, c)
pErr = c.Reply.Header().Error
if pErr != nil {
if log.V(1) {
log.Infof("failed %s: %s", c.Method(), pErr)
}
} else if c.Post != nil {
pErr = proto.NewError(c.Post())
}
return pErr
}
}
ba, br := &proto.BatchRequest{}, &proto.BatchResponse{}
for _, call := range calls {
ba.Add(call.Args)
}
pErr = db.send(proto.Call{Args: ba, Reply: br})
// Recover from protobuf merge panics.
defer func() {
if r := recover(); r != nil {
// Take care to log merge error and to return it if no error has
// already been set.
mergeErr := util.Errorf("unable to merge response: %s", r)
log.Error(mergeErr)
if pErr == nil {
pErr = proto.NewError(mergeErr)
}
}
}()
// Transfer individual responses from batch response to prepared replies.
for i, reply := range br.Responses {
c := calls[i]
gogoproto.Merge(c.Reply, reply.GetInner())
if c.Post != nil {
if e := c.Post(); e != nil && pErr != nil {
pErr = proto.NewError(e)
}
}
}
return
}
示例2: send
// send runs the specified calls synchronously in a single batch and
// returns any errors. If the transaction is read-only or has already
// been successfully committed or aborted, a potential trailing
// EndTransaction call is silently dropped, allowing the caller to
// always commit or clean-up explicitly even when that may not be
// required (or even erroneous).
func (txn *Txn) send(calls ...proto.Call) *proto.Error {
if txn.Proto.Status != proto.PENDING {
return proto.NewError(util.Errorf("attempting to use %s transaction", txn.Proto.Status))
}
lastIndex := len(calls) - 1
if lastIndex < 0 {
return nil
}
lastReq := calls[lastIndex].Args
// haveTxnWrite tracks intention to write. This is in contrast to
// txn.Proto.Writing, which is set by the coordinator when the first
// intent has been created, and which lives for the life of the
// transaction.
haveTxnWrite := proto.IsTransactionWrite(lastReq)
for _, call := range calls[:lastIndex] {
request := call.Args
if req, ok := request.(*proto.EndTransactionRequest); ok {
return proto.NewError(util.Errorf("%s sent as non-terminal call", req.Method()))
}
if !haveTxnWrite {
haveTxnWrite = proto.IsTransactionWrite(request)
}
}
endTxnRequest, haveEndTxn := lastReq.(*proto.EndTransactionRequest)
needEndTxn := txn.Proto.Writing || haveTxnWrite
elideEndTxn := haveEndTxn && !needEndTxn
if elideEndTxn {
calls = calls[:lastIndex]
}
pErr := txn.db.send(calls...)
if elideEndTxn && pErr == nil {
// This normally happens on the server and sent back in response
// headers, but this transaction was optimized away. The caller may
// still inspect the transaction struct, so we manually update it
// here to emulate a true transaction.
if endTxnRequest.Commit {
txn.Proto.Status = proto.COMMITTED
} else {
txn.Proto.Status = proto.ABORTED
}
}
return pErr
}
示例3: sendAttempt
// sendAttempt gathers and rearranges the replicas, and makes an RPC call.
func (ds *DistSender) sendAttempt(trace *tracer.Trace, ba proto.BatchRequest, desc *proto.RangeDescriptor) (*proto.BatchResponse, *proto.Error) {
defer trace.Epoch("sending RPC")()
leader := ds.leaderCache.Lookup(proto.RangeID(desc.RangeID))
// Try to send the call.
replicas := newReplicaSlice(ds.gossip, desc)
// Rearrange the replicas so that those replicas with long common
// prefix of attributes end up first. If there's no prefix, this is a
// no-op.
order := ds.optimizeReplicaOrder(replicas)
// If this request needs to go to a leader and we know who that is, move
// it to the front.
if !(proto.IsReadOnly(&ba) && ba.ReadConsistency == proto.INCONSISTENT) &&
leader.StoreID > 0 {
if i := replicas.FindReplica(leader.StoreID); i >= 0 {
replicas.MoveToFront(i)
order = rpc.OrderStable
}
}
// TODO(tschottdorf) &ba -> ba
resp, err := ds.sendRPC(trace, desc.RangeID, replicas, order, &ba)
if err != nil {
return nil, proto.NewError(err)
}
// Untangle the error from the received response.
br := resp.(*proto.BatchResponse)
pErr := br.Error
br.Error = nil // scrub the response error
return br, pErr
}
示例4: getDescriptors
// getDescriptors looks up the range descriptor to use for a query over the
// key range [from,to), with the given lookupOptions. The range descriptor
// which contains the range in which the request should start its query is
// returned first; the returned bool is true in case the given range reaches
// outside the first descriptor.
// In case either of the descriptors is discovered stale, the returned closure
// should be called; it evicts the cache appropriately.
// Note that `from` and `to` are not necessarily Key and EndKey from a
// RequestHeader; it's assumed that they've been translated to key addresses
// already (via KeyAddress).
func (ds *DistSender) getDescriptors(from, to proto.Key, options lookupOptions) (*proto.RangeDescriptor, bool, func(), *proto.Error) {
var desc *proto.RangeDescriptor
var err error
var descKey proto.Key
if !options.useReverseScan {
descKey = from
} else {
descKey = to
}
desc, err = ds.rangeCache.LookupRangeDescriptor(descKey, options)
if err != nil {
return nil, false, nil, proto.NewError(err)
}
// Checks whether need to get next range descriptor. If so, returns true.
needAnother := func(desc *proto.RangeDescriptor, isReverse bool) bool {
if isReverse {
return from.Less(desc.StartKey)
}
return desc.EndKey.Less(to)
}
evict := func() {
ds.rangeCache.EvictCachedRangeDescriptor(descKey, desc, options.useReverseScan)
}
return desc, needAnother(desc, options.useReverseScan), evict, nil
}
示例5: Send
// Batch sends a request to Cockroach via RPC. Errors which are retryable are
// retried with backoff in a loop using the default retry options. Other errors
// sending the request are retried indefinitely using the same client command
// ID to avoid reporting failure when in fact the command may have gone through
// and been executed successfully. We retry here to eventually get through with
// the same client command ID and be given the cached response.
func (s *rpcSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
var err error
var br proto.BatchResponse
for r := retry.Start(s.retryOpts); r.Next(); {
select {
case <-s.client.Healthy():
default:
err = fmt.Errorf("failed to send RPC request %s: client is unhealthy", method)
log.Warning(err)
continue
}
if err = s.client.Call(method, &ba, &br); err != nil {
br.Reset() // don't trust anyone.
// Assume all errors sending request are retryable. The actual
// number of things that could go wrong is vast, but we don't
// want to miss any which should in theory be retried with the
// same client command ID. We log the error here as a warning so
// there's visiblity that this is happening. Some of the errors
// we'll sweep up in this net shouldn't be retried, but we can't
// really know for sure which.
log.Warningf("failed to send RPC request %s: %s", method, err)
continue
}
// On successful post, we're done with retry loop.
break
}
if err != nil {
return nil, proto.NewError(err)
}
pErr := br.Error
br.Error = nil
return &br, pErr
}
示例6: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *LocalSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
trace := tracer.FromCtx(ctx)
var store *storage.Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *proto.Replica
var rangeID proto.RangeID
rangeID, repl, err = ls.lookupReplica(ba.Key, ba.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.Method, ba.Method(), // TODO(tschottdorf): Method() always `Batch`.
log.Key, ba.Key,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
var br *proto.BatchResponse
if err != nil {
return nil, proto.NewError(err)
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See proto.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
ba.Txn.MaxTimestamp = ba.Txn.Timestamp
}
// TODO(tschottdorf): &ba -> ba
tmpR, pErr := store.ExecuteCmd(ctx, &ba)
// TODO(tschottdorf): remove this dance once BatchResponse is returned.
if tmpR != nil {
br = tmpR.(*proto.BatchResponse)
if br.Error != nil {
panic(proto.ErrorUnexpectedlySet(store, br))
}
}
return br, pErr
}
示例7: TestRunTransactionRetryOnErrors
// TestRunTransactionRetryOnErrors verifies that the transaction
// is retried on the correct errors.
func TestRunTransactionRetryOnErrors(t *testing.T) {
defer leaktest.AfterTest(t)
testCases := []struct {
err error
retry bool // Expect retry?
}{
{&proto.ReadWithinUncertaintyIntervalError{}, true},
{&proto.TransactionAbortedError{}, true},
{&proto.TransactionPushError{}, true},
{&proto.TransactionRetryError{}, true},
{&proto.RangeNotFoundError{}, false},
{&proto.RangeKeyMismatchError{}, false},
{&proto.TransactionStatusError{}, false},
}
for i, test := range testCases {
count := 0
db := newDB(newTestSender(func(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
if _, ok := ba.GetArg(proto.Put); ok {
count++
if count == 1 {
return nil, proto.NewError(test.err)
}
}
return &proto.BatchResponse{}, nil
}, nil))
db.txnRetryOptions.InitialBackoff = 1 * time.Millisecond
err := db.Txn(func(txn *Txn) error {
return txn.Put("a", "b")
})
if test.retry {
if count != 2 {
t.Errorf("%d: expected one retry; got %d", i, count-1)
}
if err != nil {
t.Errorf("%d: expected success on retry; got %s", i, err)
}
} else {
if count != 1 {
t.Errorf("%d: expected no retries; got %d", i, count)
}
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("%d: expected error of type %T; got %T", i, test.err, err)
}
}
}
}
示例8: sendAndFill
// sendAndFill is a helper which sends the given batch and fills its results,
// returning the appropriate error which is either from the first failing call,
// or an "internal" error.
func sendAndFill(send func(...proto.Call) *proto.Error, b *Batch) *proto.Error {
// Errors here will be attached to the results, so we will get them from
// the call to fillResults in the regular case in which an individual call
// fails. But send() also returns its own errors, so there's some dancing
// here to do because we want to run fillResults() so that the individual
// result gets initialized with an error from the corresponding call.
pErr := send(b.calls...)
if pErr != nil {
// TODO(tschottdorf): give the error to fillResults or make sure in
// some other way that fillResults knows it's only called to set the
// keys.
_ = b.fillResults()
return pErr
}
return proto.NewError(b.fillResults())
}
示例9: TestClientCommandID
// TestClientCommandID verifies that client command ID is set
// on call.
func TestClientCommandID(t *testing.T) {
defer leaktest.AfterTest(t)
count := 0
db := NewDB(newTestSender(func(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
count++
if ba.CmdID.WallTime == 0 {
return nil, proto.NewError(util.Errorf("expected client command ID to be initialized"))
}
return ba.CreateReply().(*proto.BatchResponse), nil
}, nil))
if err := db.Put("a", "b"); err != nil {
t.Error(err)
}
if count != 1 {
t.Errorf("expected test sender to be invoked once; got %d", count)
}
}
示例10: TestAbortTransactionOnCommitErrors
// TestAbortTransactionOnCommitErrors verifies that non-exec transactions are
// aborted on the correct errors.
func TestAbortTransactionOnCommitErrors(t *testing.T) {
defer leaktest.AfterTest(t)
testCases := []struct {
err error
abort bool
}{
{&proto.ReadWithinUncertaintyIntervalError{}, true},
{&proto.TransactionAbortedError{}, false},
{&proto.TransactionPushError{}, true},
{&proto.TransactionRetryError{}, true},
{&proto.RangeNotFoundError{}, true},
{&proto.RangeKeyMismatchError{}, true},
{&proto.TransactionStatusError{}, true},
}
for _, test := range testCases {
var commit, abort bool
db := NewDB(newTestSender(func(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
switch t := ba.Requests[0].GetInner().(type) {
case *proto.EndTransactionRequest:
if t.Commit {
commit = true
return nil, proto.NewError(test.err)
}
abort = true
}
return &proto.BatchResponse{}, nil
}, nil))
txn := NewTxn(*db)
_ = txn.Put("a", "b")
_ = txn.Commit()
if !commit {
t.Errorf("%T: failed to find commit", test.err)
}
if test.abort && !abort {
t.Errorf("%T: failed to find abort", test.err)
} else if !test.abort && abort {
t.Errorf("%T: found unexpected abort", test.err)
}
}
}
示例11: TestTxnResetTxnOnAbort
// TestTxnResetTxnOnAbort verifies transaction is reset on abort.
func TestTxnResetTxnOnAbort(t *testing.T) {
defer leaktest.AfterTest(t)
db := newDB(newTestSender(func(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
return nil, proto.NewError(&proto.TransactionAbortedError{
Txn: *gogoproto.Clone(ba.Txn).(*proto.Transaction),
})
}, nil))
txn := NewTxn(*db)
_, pErr := txn.db.sender.Send(context.Background(), testPut())
if _, ok := pErr.GoError().(*proto.TransactionAbortedError); !ok {
t.Fatalf("expected TransactionAbortedError, got %v", pErr)
}
if len(txn.Proto.ID) != 0 {
t.Errorf("expected txn to be cleared")
}
}
示例12: TestTxnRequestTxnTimestamp
// TestTxnRequestTxnTimestamp verifies response txn timestamp is
// always upgraded on successive requests.
func TestTxnRequestTxnTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)
makeTS := func(walltime int64, logical int32) proto.Timestamp {
return proto.ZeroTimestamp.Add(walltime, logical)
}
testPutReq := gogoproto.Clone(testPutReq).(*proto.PutRequest)
testCases := []struct {
expRequestTS, responseTS proto.Timestamp
}{
{makeTS(0, 0), makeTS(10, 0)},
{makeTS(10, 0), makeTS(10, 1)},
{makeTS(10, 1), makeTS(10, 0)},
{makeTS(10, 1), makeTS(20, 1)},
{makeTS(20, 1), makeTS(20, 1)},
{makeTS(20, 1), makeTS(0, 0)},
{makeTS(20, 1), makeTS(20, 1)},
}
var testIdx int
db := NewDB(newTestSender(nil, func(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
test := testCases[testIdx]
if !test.expRequestTS.Equal(ba.Txn.Timestamp) {
return nil, proto.NewError(util.Errorf("%d: expected ts %s got %s", testIdx, test.expRequestTS, ba.Txn.Timestamp))
}
br := &proto.BatchResponse{}
br.Txn = &proto.Transaction{}
br.Txn.Update(ba.Txn) // copy
br.Txn.Timestamp = test.responseTS
return br, nil
}))
txn := NewTxn(*db)
for testIdx = range testCases {
if _, err := send(txn.db.sender, testPutReq); err != nil {
t.Fatal(err)
}
}
}
示例13: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority())
br := &proto.BatchResponse{}
if err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
for _, arg := range ba.Requests {
req := arg.GetInner()
call := proto.Call{Args: req, Reply: req.CreateReply()}
b.InternalAddCall(call)
br.Add(call.Reply)
}
return txn.CommitInBatch(b)
}); err != nil {
return nil, proto.NewError(err)
}
return br, nil
}
示例14: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority())
var br *proto.BatchResponse
err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
for _, arg := range ba.Requests {
req := arg.GetInner()
b.InternalAddRequest(req)
}
var err error
br, err = txn.CommitInBatchWithResponse(b)
return err
})
if err != nil {
return nil, proto.NewError(err)
}
br.Txn = nil // hide the evidence
return br, nil
}
示例15: TestNodeEventFeed
func TestNodeEventFeed(t *testing.T) {
defer leaktest.AfterTest(t)
nodeDesc := proto.NodeDescriptor{
NodeID: proto.NodeID(99),
}
// A testCase corresponds to a single Store event type. Each case contains a
// method which publishes a single event to the given storeEventPublisher,
// and an expected result interface which should match the produced
// event.
testCases := []struct {
publishTo func(status.NodeEventFeed)
expected interface{}
}{
{
publishTo: func(nef status.NodeEventFeed) {
nef.StartNode(nodeDesc, 100)
},
expected: &status.StartNodeEvent{
Desc: nodeDesc,
StartedAt: 100,
},
},
{
publishTo: func(nef status.NodeEventFeed) {
nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), nil)
},
expected: &status.CallSuccessEvent{
NodeID: proto.NodeID(1),
Method: proto.Get,
},
},
{
publishTo: func(nef status.NodeEventFeed) {
nef.CallComplete(wrap(proto.NewPut(proto.Key("abc"), proto.Value{Bytes: []byte("def")})), nil)
},
expected: &status.CallSuccessEvent{
NodeID: proto.NodeID(1),
Method: proto.Put,
},
},
{
publishTo: func(nef status.NodeEventFeed) {
nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), proto.NewError(util.Errorf("error")))
},
expected: &status.CallErrorEvent{
NodeID: proto.NodeID(1),
Method: proto.Batch,
},
},
{
publishTo: func(nef status.NodeEventFeed) {
nef.CallComplete(wrap(proto.NewGet(proto.Key("abc"))), &proto.Error{
Index: &proto.ErrPosition{Index: 0},
Message: "boo",
})
},
expected: &status.CallErrorEvent{
NodeID: proto.NodeID(1),
Method: proto.Get,
},
},
}
// Compile expected events into a single slice.
expectedEvents := make([]interface{}, len(testCases))
for i := range testCases {
expectedEvents[i] = testCases[i].expected
}
events := make([]interface{}, 0, len(expectedEvents))
// Run test cases directly through a feed.
stopper := stop.NewStopper()
defer stopper.Stop()
feed := util.NewFeed(stopper)
feed.Subscribe(func(event interface{}) {
events = append(events, event)
})
nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed)
for _, tc := range testCases {
tc.publishTo(nodefeed)
}
feed.Flush()
if a, e := events, expectedEvents; !reflect.DeepEqual(a, e) {
t.Errorf("received incorrect events.\nexpected: %v\nactual: %v", e, a)
}
}