當前位置: 首頁>>代碼示例>>Golang>>正文


Golang proto.Error類代碼示例

本文整理匯總了Golang中github.com/cockroachdb/cockroach/proto.Error的典型用法代碼示例。如果您正苦於以下問題:Golang Error類的具體用法?Golang Error怎麽用?Golang Error使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了Error類的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。

示例1: convertBatchError

func convertBatchError(tableDesc *TableDescriptor, b client.Batch, pErr *proto.Error) error {
	err := pErr.GoError()
	if pErr.Index == nil {
		return err
	}
	index := pErr.Index.Index
	if index >= int32(len(b.Results)) {
		panic(fmt.Sprintf("index %d outside of results: %+v", index, b.Results))
	}
	result := b.Results[index]
	if _, ok := err.(*proto.ConditionFailedError); ok {
		for _, row := range result.Rows {
			indexID, key, err := decodeIndexKeyPrefix(tableDesc, row.Key)
			if err != nil {
				return err
			}
			index, err := tableDesc.FindIndexByID(indexID)
			if err != nil {
				return err
			}
			valTypes, err := makeKeyVals(tableDesc, index.ColumnIDs)
			if err != nil {
				return err
			}
			vals := make([]parser.Datum, len(valTypes))
			if _, err := decodeKeyVals(valTypes, vals, key); err != nil {
				return err
			}

			return errUniquenessConstraintViolation{index: index, vals: vals}
		}
	}
	return err
}
開發者ID:alunarbeach,項目名稱:cockroach,代碼行數:34,代碼來源:errors.go

示例2: makeResultFromError

// If we hit an error and there is a pending transaction, rollback
// the transaction before returning. The client does not have to
// deal with cleaning up transaction state.
func makeResultFromError(planMaker *planner, err error) driver.Result {
	if planMaker.txn != nil {
		if err != errTransactionAborted {
			planMaker.txn.Cleanup(err)
		}
		// This transaction will normally get marked aborted as part of
		// Cleanup above, but we do it explicitly here because edge cases
		// exist:
		// (1)
		// BEGIN
		// <some operation which is implemented using CPut, which fails>
		// (2)
		// BEGIN
		// <syntax error>
		// Both cases will not write any intents, and so client.Txn will
		// not actually send an EndTransaction, and rightly so.
		// Unfortunately, we depend on txn.Proto.Status being equivalent to
		// our SQL transaction's status, and in these cases, our SQL
		// transaction is aborted.
		planMaker.txn.Proto.Status = proto.ABORTED
	}
	var errProto proto.Error
	errProto.SetResponseGoError(err)
	return driver.Result{Error: &errProto}
}
開發者ID:kangxinrong,項目名稱:cockroach,代碼行數:28,代碼來源:server.go

示例3: ServeHTTP

// ServeHTTP serves the SQL API by treating the request URL path
// as the method, the request body as the arguments, and sets the
// response body as the method reply. The request body is unmarshalled
// into arguments based on the Content-Type request header. Protobuf
// and JSON-encoded requests are supported. The response body is
// encoded according to the request's Accept header, or if not
// present, in the same format as the request's incoming Content-Type
// header.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	defer r.Body.Close()
	method := r.URL.Path
	if !strings.HasPrefix(method, driver.Endpoint) {
		http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
		return
	}

	// Check TLS settings.
	authenticationHook, err := security.AuthenticationHook(s.context.Insecure, r.TLS)
	if err != nil {
		http.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	method = strings.TrimPrefix(method, driver.Endpoint)
	if method != driver.Execute.String() {
		http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
		return
	}

	// Unmarshal the request.
	reqBody, err := ioutil.ReadAll(r.Body)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	var args driver.Request
	if err := util.UnmarshalRequest(r, reqBody, &args, allowedEncodings); err != nil {
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}

	// Check request user against client certificate user.
	if err := authenticationHook(&args); err != nil {
		http.Error(w, err.Error(), http.StatusUnauthorized)
		return
	}

	// Send the Request for SQL execution and set the application-level error
	// on the reply.
	reply, err := s.exec(args)
	if err != nil {
		errProto := proto.Error{}
		errProto.SetResponseGoError(err)
		reply.Error = &errProto
	}

	// Marshal the response.
	body, contentType, err := util.MarshalResponse(r, &reply, allowedEncodings)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	w.Header().Set(util.ContentTypeHeader, contentType)
	w.Write(body)
}
開發者ID:arypurnomoz,項目名稱:cockroach,代碼行數:66,代碼來源:server.go

示例4: makeResultFromError

// If we hit an error and there is a pending transaction, rollback
// the transaction before returning. The client does not have to
// deal with cleaning up transaction state.
func makeResultFromError(planMaker *planner, err error) driver.Result {
	if planMaker.txn != nil {
		if err != errTransactionAborted {
			planMaker.txn.Cleanup(err)
		}
	}
	var errProto proto.Error
	errProto.SetResponseGoError(err)
	return driver.Result{Error: &errProto}
}
開發者ID:earlredwolf,項目名稱:cockroach,代碼行數:13,代碼來源:executor.go

示例5: rollbackTxnAndReturnResultWithError

// If we hit an error and there is a pending transaction, rollback
// the transaction before returning. The client does not have to
// deal with cleaning up transaction state.
func rollbackTxnAndReturnResultWithError(planMaker *planner, err error) driver.Result {
	if planMaker.txn != nil {
		// What do we do with a rollback error? This is an internally
		// initiated rollback that the client is unaware of. Reporting it
		// will only cause confusion. Not reporting it could leave a transaction
		// pending, but that will get GCed eventually.
		_ = planMaker.txn.Rollback()
	}
	var errProto proto.Error
	errProto.SetResponseGoError(err)
	return driver.Result{Error: &errProto}
}
開發者ID:slavau,項目名稱:cockroach,代碼行數:15,代碼來源:server.go

示例6: updateState

// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(ctx context.Context, ba proto.BatchRequest, br *proto.BatchResponse, pErr *proto.Error) *proto.Error {
	trace := tracer.FromCtx(ctx)
	newTxn := &proto.Transaction{}
	newTxn.Update(ba.GetTxn())
	err := pErr.GoError()
	switch t := err.(type) {
	case nil:
		newTxn.Update(br.GetTxn())
		// Move txn timestamp forward to response timestamp if applicable.
		// TODO(tschottdorf): see (*Replica).executeBatch and comments within.
		// Looks like this isn't necessary any more, nor did it prevent a bug
		// referenced in a TODO there.
		newTxn.Timestamp.Forward(br.Timestamp)
	case *proto.TransactionStatusError:
		// Likely already committed or more obscure errors such as epoch or
		// timestamp regressions; consider txn dead.
		defer tc.cleanupTxn(trace, t.Txn)
	case *proto.OpRequiresTxnError:
		// TODO(tschottdorf): range-spanning autowrap currently broken.
		panic("TODO(tschottdorf): disabled")
	case *proto.ReadWithinUncertaintyIntervalError:
		// Mark the host as certain. See the protobuf comment for
		// Transaction.CertainNodes for details.
		if t.NodeID == 0 {
			panic("no replica set in header on uncertainty restart")
		}
		newTxn.CertainNodes.Add(t.NodeID)
		// If the reader encountered a newer write within the uncertainty
		// interval, move the timestamp forward, just past that write or
		// up to MaxTimestamp, whichever comes first.
		candidateTS := newTxn.MaxTimestamp
		candidateTS.Backward(t.ExistingTimestamp.Add(0, 1))
		newTxn.Timestamp.Forward(candidateTS)
		newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp)
		t.Txn = *newTxn
	case *proto.TransactionAbortedError:
		// Increase timestamp if applicable.
		newTxn.Timestamp.Forward(t.Txn.Timestamp)
		newTxn.Priority = t.Txn.Priority
		t.Txn = *newTxn
		// Clean up the freshly aborted transaction in defer(), avoiding a
		// race with the state update below.
		defer tc.cleanupTxn(trace, t.Txn)
	case *proto.TransactionPushError:
		// Increase timestamp if applicable, ensuring that we're
		// just ahead of the pushee.
		newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
		newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
		t.Txn = newTxn
	case *proto.TransactionRetryError:
		// Increase timestamp if applicable.
		newTxn.Timestamp.Forward(t.Txn.Timestamp)
		newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
		t.Txn = *newTxn
	case proto.TransactionRestartError:
		// Assertion: The above cases should exhaust all ErrorDetails which
		// carry a Transaction.
		if pErr.Detail != nil {
			panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
		}
	}

	return func() *proto.Error {
		if len(newTxn.ID) <= 0 {
			return pErr
		}
		id := string(newTxn.ID)
		tc.Lock()
		defer tc.Unlock()
		txnMeta := tc.txns[id]
		// For successful transactional requests, keep the written intents and
		// the updated transaction record to be sent along with the reply.
		// The transaction metadata is created with the first writing operation
		// TODO(tschottdorf): already computed the intents prior to sending,
		// consider re-using those.
		if intents := ba.GetIntents(); len(intents) > 0 && err == nil {
			if txnMeta == nil {
				newTxn.Writing = true
				txnMeta = &txnMetadata{
					txn:              *newTxn,
					keys:             cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
					firstUpdateNanos: tc.clock.PhysicalNow(),
					lastUpdateNanos:  tc.clock.PhysicalNow(),
					timeoutDuration:  tc.clientTimeout,
					txnEnd:           make(chan struct{}),
				}
				tc.txns[id] = txnMeta
				// If the transaction is already over, there's no point in
				// launching a one-off coordinator which will shut down right
				// away.
				if _, isEnding := ba.GetArg(proto.EndTransaction); !isEnding {
					trace.Event("coordinator spawns")
					if !tc.stopper.RunAsyncTask(func() {
						tc.heartbeatLoop(id)
					}) {
						// The system is already draining and we can't start the
//.........這裏部分代碼省略.........
開發者ID:freakynit,項目名稱:cockroach,代碼行數:101,代碼來源:txn_coord_sender.go

示例7: Send

// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
	tc.maybeBeginTxn(&ba)
	ba.CmdID = ba.GetOrCreateCmdID(tc.clock.PhysicalNow())
	var startNS int64

	// This is the earliest point at which the request has a ClientCmdID and/or
	// TxnID (if applicable). Begin a Trace which follows this request.
	trace := tc.tracer.NewTrace(&ba)
	defer trace.Finalize()
	// TODO(tschottdorf): always "Batch"
	defer trace.Epoch(fmt.Sprintf("sending %s", ba.Method()))()
	ctx = tracer.ToCtx(ctx, trace)

	// TODO(tschottdorf): No looping through the batch will be necessary once
	// we've eliminated all the redundancies.
	for _, arg := range ba.Requests {
		trace.Event(fmt.Sprintf("%T", arg.GetValue()))
		if err := updateForBatch(arg.GetInner(), ba.RequestHeader); err != nil {
			return nil, proto.NewError(err)
		}
	}

	var id string // optional transaction ID
	if ba.Txn != nil {
		// If this request is part of a transaction...
		id = string(ba.Txn.ID)
		// Verify that if this Transaction is not read-only, we have it on
		// file. If not, refuse writes - the client must have issued a write on
		// another coordinator previously.
		if ba.Txn.Writing && ba.IsTransactionWrite() {
			tc.Lock()
			_, ok := tc.txns[id]
			tc.Unlock()
			if !ok {
				return nil, proto.NewError(util.Errorf("transaction must not write on multiple coordinators"))
			}
		}

		// Set the timestamp to the original timestamp for read-only
		// commands and to the transaction timestamp for read/write
		// commands.
		if ba.IsReadOnly() {
			ba.Timestamp = ba.Txn.OrigTimestamp
		} else {
			ba.Timestamp = ba.Txn.Timestamp
		}

		if rArgs, ok := ba.GetArg(proto.EndTransaction); ok {
			et := rArgs.(*proto.EndTransactionRequest)
			// Remember when EndTransaction started in case we want to
			// be linearizable.
			startNS = tc.clock.PhysicalNow()
			if len(et.Intents) > 0 {
				// TODO(tschottdorf): it may be useful to allow this later.
				// That would be part of a possible plan to allow txns which
				// write on multiple coordinators.
				return nil, proto.NewError(util.Errorf("client must not pass intents to EndTransaction"))
			}
			if len(et.Key) != 0 {
				return nil, proto.NewError(util.Errorf("EndTransaction must not have a Key set"))
			}
			et.Key = ba.Txn.Key

			tc.Lock()
			txnMeta, metaOK := tc.txns[id]
			if id != "" && metaOK {
				et.Intents = txnMeta.intents()
			}
			tc.Unlock()

			if intents := ba.GetIntents(); len(intents) > 0 {
				// Writes in Batch, so EndTransaction is fine. Should add
				// outstanding intents to EndTransaction, though.
				// TODO(tschottdorf): possible issues when the batch fails,
				// but the intents have been added anyways.
				// TODO(tschottdorf): some of these intents may be covered
				// by others, for example {[a,b), a}). This can lead to
				// some extra requests when those are non-local to the txn
				// record. But it doesn't seem worth optimizing now.
				et.Intents = append(et.Intents, intents...)
			} else if !metaOK {
				// If we don't have the transaction, then this must be a retry
				// by the client. We can no longer reconstruct a correct
				// request so we must fail.
				//
				// TODO(bdarnell): if we had a GetTransactionStatus API then
				// we could lookup the transaction and return either nil or
				// TransactionAbortedError instead of this ambivalent error.
				return nil, proto.NewError(util.Errorf("transaction is already committed or aborted"))
			}
			if len(et.Intents) == 0 {
//.........這裏部分代碼省略.........
開發者ID:freakynit,項目名稱:cockroach,代碼行數:101,代碼來源:txn_coord_sender.go

示例8: sendChunk

// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc).
func (ds *DistSender) sendChunk(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
	// TODO(tschottdorf): prepare for removing Key and EndKey from BatchRequest,
	// making sure that anything that relies on them goes bust.
	ba.Key, ba.EndKey = nil, nil

	isReverse := ba.IsReverse()

	trace := tracer.FromCtx(ctx)

	// The minimal key range encompassing all requests contained within.
	// Local addressing has already been resolved.
	// TODO(tschottdorf): consider rudimentary validation of the batch here
	// (for example, non-range requests with EndKey, or empty key ranges).
	from, to := keys.Range(ba)
	var br *proto.BatchResponse
	// Send the request to one range per iteration.
	for {
		options := lookupOptions{
			useReverseScan: isReverse,
		}

		var curReply *proto.BatchResponse
		var desc *proto.RangeDescriptor
		var needAnother bool
		var pErr *proto.Error
		for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
			// Get range descriptor (or, when spanning range, descriptors). Our
			// error handling below may clear them on certain errors, so we
			// refresh (likely from the cache) on every retry.
			descDone := trace.Epoch("meta descriptor lookup")
			var evictDesc func()
			desc, needAnother, evictDesc, pErr = ds.getDescriptors(from, to, options)
			descDone()

			// getDescriptors may fail retryably if the first range isn't
			// available via Gossip.
			if pErr != nil {
				if pErr.Retryable {
					if log.V(1) {
						log.Warning(pErr)
					}
					continue
				}
				break
			}

			// If there's no transaction and op spans ranges, possibly
			// re-run as part of a transaction for consistency. The
			// case where we don't need to re-run is if the read
			// consistency is not required.
			if needAnother && ba.Txn == nil && ba.IsRange() &&
				ba.ReadConsistency != proto.INCONSISTENT {
				return nil, proto.NewError(&proto.OpRequiresTxnError{})
			}

			// It's possible that the returned descriptor misses parts of the
			// keys it's supposed to scan after it's truncated to match the
			// descriptor. Example revscan [a,g), first desc lookup for "g"
			// returns descriptor [c,d) -> [d,g) is never scanned.
			// We evict and retry in such a case.
			if (isReverse && !desc.ContainsKeyRange(desc.StartKey, to)) || (!isReverse && !desc.ContainsKeyRange(from, desc.EndKey)) {
				evictDesc()
				continue
			}

			curReply, pErr = func() (*proto.BatchResponse, *proto.Error) {
				// Truncate the request to our current key range.
				untruncate, numActive, trErr := truncate(&ba, desc, from, to)
				if numActive == 0 && trErr == nil {
					untruncate()
					// This shouldn't happen in the wild, but some tests
					// exercise it.
					return nil, proto.NewError(util.Errorf("truncation resulted in empty batch on [%s,%s): %s",
						from, to, ba))
				}
				defer untruncate()
				if trErr != nil {
					return nil, proto.NewError(trErr)
				}
				// TODO(tschottdorf): make key range on batch redundant. The
				// requests within dictate it anyways.
				ba.Key, ba.EndKey = keys.Range(ba)
				reply, err := ds.sendAttempt(trace, ba, desc)
				ba.Key, ba.EndKey = nil, nil

				if err != nil {
					if log.V(1) {
						log.Warningf("failed to invoke %s: %s", ba, pErr)
					}
				}
				return reply, err
			}()
			// If sending succeeded, break this loop.
			if pErr == nil {
				break
			}

//.........這裏部分代碼省略.........
開發者ID:luckywhu,項目名稱:cockroach,代碼行數:101,代碼來源:dist_sender.go

示例9: fillResults

func (b *Batch) fillResults(br *proto.BatchResponse, pErr *proto.Error) error {
	offset := 0
	for i := range b.Results {
		result := &b.Results[i]

		for k := 0; k < result.calls; k++ {
			args := b.reqs[offset+k]

			var reply proto.Response
			if result.Err == nil {
				result.Err = pErr.GoError()
				if result.Err == nil {
					if offset+k < len(br.Responses) {
						reply = br.Responses[offset+k].GetValue().(proto.Response)
					} else if args.Method() != proto.EndTransaction {
						// TODO(tschottdorf): EndTransaction is excepted here
						// because it may be elided (r/o txns). Might prefer to
						// simulate an EndTransaction response instead; this
						// effectively just leaks here.
						panic("not enough responses for calls")
					}
				}
			}

			switch req := args.(type) {
			case *proto.GetRequest:
				row := &result.Rows[k]
				row.Key = []byte(req.Key)
				if result.Err == nil {
					row.Value = reply.(*proto.GetResponse).Value
				}
			case *proto.PutRequest:
				row := &result.Rows[k]
				row.Key = []byte(req.Key)
				if result.Err == nil {
					row.Value = &req.Value
					row.setTimestamp(reply.(*proto.PutResponse).Timestamp)
				}
			case *proto.ConditionalPutRequest:
				row := &result.Rows[k]
				row.Key = []byte(req.Key)
				if result.Err == nil {
					row.Value = &req.Value
					row.setTimestamp(reply.(*proto.ConditionalPutResponse).Timestamp)
				}
			case *proto.IncrementRequest:
				row := &result.Rows[k]
				row.Key = []byte(req.Key)
				if result.Err == nil {
					t := reply.(*proto.IncrementResponse)
					row.Value = &proto.Value{
						Bytes: encoding.EncodeUint64(nil, uint64(t.NewValue)),
						Tag:   proto.ValueType_INT,
					}
					row.setTimestamp(t.Timestamp)
				}
			case *proto.ScanRequest:
				if result.Err == nil {
					t := reply.(*proto.ScanResponse)
					result.Rows = make([]KeyValue, len(t.Rows))
					for j := range t.Rows {
						src := &t.Rows[j]
						dst := &result.Rows[j]
						dst.Key = src.Key
						dst.Value = &src.Value
					}
				}
			case *proto.ReverseScanRequest:
				if result.Err == nil {
					t := reply.(*proto.ReverseScanResponse)
					result.Rows = make([]KeyValue, len(t.Rows))
					for j := range t.Rows {
						src := &t.Rows[j]
						dst := &result.Rows[j]
						dst.Key = src.Key
						dst.Value = &src.Value
					}
				}
			case *proto.DeleteRequest:
				row := &result.Rows[k]
				row.Key = []byte(args.(*proto.DeleteRequest).Key)

			case *proto.DeleteRangeRequest:
			case *proto.EndTransactionRequest:
			case *proto.AdminMergeRequest:
			case *proto.AdminSplitRequest:
			case *proto.HeartbeatTxnRequest:
			case *proto.GCRequest:
			case *proto.PushTxnRequest:
			case *proto.RangeLookupRequest:
			case *proto.ResolveIntentRequest:
			case *proto.ResolveIntentRangeRequest:
			case *proto.MergeRequest:
			case *proto.TruncateLogRequest:
			case *proto.LeaderLeaseRequest:
			case *proto.BatchRequest:
				// Nothing to do for these methods as they do not generate any
				// rows.

			default:
//.........這裏部分代碼省略.........
開發者ID:kumarh1982,項目名稱:cockroach,代碼行數:101,代碼來源:batch.go


注:本文中的github.com/cockroachdb/cockroach/proto.Error類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。