本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Error函數的典型用法代碼示例。如果您正苦於以下問題:Golang Error函數的具體用法?Golang Error怎麽用?Golang Error使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Error函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Node
// handleNodeStatus handles GET requests for a single node's status.
func (s *statusServer) Node(
ctx context.Context, req *serverpb.NodeRequest,
) (*status.NodeStatus, error) {
ctx = s.AnnotateCtx(ctx)
nodeID, _, err := s.parseNodeID(req.NodeId)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
}
key := keys.NodeStatusKey(nodeID)
b := &client.Batch{}
b.Get(key)
if err := s.db.Run(ctx, b); err != nil {
log.Error(ctx, err)
return nil, grpc.Errorf(codes.Internal, err.Error())
}
var nodeStatus status.NodeStatus
if err := b.Results[0].Rows[0].ValueProto(&nodeStatus); err != nil {
err = errors.Errorf("could not unmarshal NodeStatus from %s: %s", key, err)
log.Error(ctx, err)
return nil, grpc.Errorf(codes.Internal, err.Error())
}
return &nodeStatus, nil
}
示例2: Nodes
// Nodes returns all node statuses.
func (s *statusServer) Nodes(
ctx context.Context, req *serverpb.NodesRequest,
) (*serverpb.NodesResponse, error) {
ctx = s.AnnotateCtx(ctx)
startKey := keys.StatusNodePrefix
endKey := startKey.PrefixEnd()
b := &client.Batch{}
b.Scan(startKey, endKey)
if err := s.db.Run(ctx, b); err != nil {
log.Error(ctx, err)
return nil, grpc.Errorf(codes.Internal, err.Error())
}
rows := b.Results[0].Rows
resp := serverpb.NodesResponse{
Nodes: make([]status.NodeStatus, len(rows)),
}
for i, row := range rows {
if err := row.ValueProto(&resp.Nodes[i]); err != nil {
log.Error(ctx, err)
return nil, grpc.Errorf(codes.Internal, err.Error())
}
}
return &resp, nil
}
示例3: setupFlow
func (ds *ServerImpl) setupFlow(
ctx context.Context, req *SetupFlowRequest, simpleFlowConsumer RowReceiver,
) (*Flow, error) {
sp, err := tracing.JoinOrNew(ds.AmbientContext.Tracer, req.TraceContext, "flow")
if err != nil {
return nil, err
}
ctx = opentracing.ContextWithSpan(ctx, sp)
txn := ds.setupTxn(ctx, &req.Txn)
flowCtx := FlowCtx{
Context: ctx,
id: req.Flow.FlowID,
evalCtx: &ds.evalCtx,
rpcCtx: ds.RPCContext,
txn: txn,
}
f := newFlow(flowCtx, ds.flowRegistry, simpleFlowConsumer)
if err := f.setupFlow(&req.Flow); err != nil {
log.Error(ctx, err)
sp.Finish()
return nil, err
}
return f, nil
}
示例4: finish
func (c *v3Conn) finish(ctx context.Context) {
// This is better than always flushing on error.
if err := c.wr.Flush(); err != nil {
log.Error(ctx, err)
}
_ = c.conn.Close()
}
示例5: setupFlow
func (ds *ServerImpl) setupFlow(
ctx context.Context, req *SetupFlowRequest, syncFlowConsumer RowReceiver,
) (*Flow, error) {
sp, err := tracing.JoinOrNew(ds.AmbientContext.Tracer, req.TraceContext, "flow")
if err != nil {
return nil, err
}
ctx = opentracing.ContextWithSpan(ctx, sp)
// TODO(radu): we should sanity check some of these fields (especially
// txnProto).
flowCtx := FlowCtx{
Context: ctx,
id: req.Flow.FlowID,
evalCtx: &ds.evalCtx,
rpcCtx: ds.RPCContext,
txnProto: &req.Txn,
clientDB: ds.DB,
}
f := newFlow(flowCtx, ds.flowRegistry, syncFlowConsumer)
if err := f.setupFlow(&req.Flow); err != nil {
log.Error(ctx, err)
sp.Finish()
return nil, err
}
return f, nil
}
示例6: maybeLogError
func (a *allocSim) maybeLogError(err error) {
if localcluster.IsUnavailableError(err) {
return
}
log.Error(context.Background(), err)
atomic.AddUint64(&a.stats.errors, 1)
}
示例7: maybeLogError
func (z *zeroSum) maybeLogError(err error) {
if localcluster.IsUnavailableError(err) || strings.Contains(err.Error(), "range is frozen") {
return
}
log.Error(context.Background(), err)
atomic.AddUint64(&z.stats.errors, 1)
}
示例8: handleVars
func (s *statusServer) handleVars(w http.ResponseWriter, r *http.Request) {
w.Header().Set(httputil.ContentTypeHeader, httputil.PlaintextContentType)
err := s.metricSource.PrintAsText(w)
if err != nil {
log.Error(r.Context(), err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
示例9: FlowStream
// FlowStream is part of the DistSQLServer interface.
func (ds *ServerImpl) FlowStream(stream DistSQL_FlowStreamServer) error {
ctx := ds.AnnotateCtx(context.TODO())
err := ds.flowStreamInt(stream)
if err != nil {
log.Error(ctx, err)
}
return err
}
示例10: SetStorage
// SetStorage provides an instance of the Storage interface
// for reading and writing gossip bootstrap data from persistent
// storage. This should be invoked as early in the lifecycle of a
// gossip instance as possible, but can be called at any time.
func (g *Gossip) SetStorage(storage Storage) error {
ctx := g.AnnotateCtx(context.TODO())
// Maintain lock ordering.
var storedBI BootstrapInfo
if err := storage.ReadBootstrapInfo(&storedBI); err != nil {
log.Warningf(ctx, "failed to read gossip bootstrap info: %s", err)
}
g.mu.Lock()
defer g.mu.Unlock()
g.storage = storage
// Merge the stored bootstrap info addresses with any we've become
// aware of through gossip.
existing := map[string]struct{}{}
makeKey := func(a util.UnresolvedAddr) string { return fmt.Sprintf("%s,%s", a.Network(), a.String()) }
for _, addr := range g.bootstrapInfo.Addresses {
existing[makeKey(addr)] = struct{}{}
}
for _, addr := range storedBI.Addresses {
// If the address is new, and isn't our own address, add it.
if _, ok := existing[makeKey(addr)]; !ok && addr != g.mu.is.NodeAddr {
g.maybeAddBootstrapAddress(addr)
}
}
// Persist merged addresses.
if numAddrs := len(g.bootstrapInfo.Addresses); numAddrs > len(storedBI.Addresses) {
if err := g.storage.WriteBootstrapInfo(&g.bootstrapInfo); err != nil {
log.Error(ctx, err)
}
}
// Cycle through all persisted bootstrap hosts and add resolvers for
// any which haven't already been added.
newResolverFound := false
for _, addr := range g.bootstrapInfo.Addresses {
if !g.maybeAddResolver(addr) {
continue
}
// If we find a new resolver, reset the resolver index so that the
// next resolver we try is the first of the new resolvers.
if !newResolverFound {
newResolverFound = true
g.resolverIdx = len(g.resolvers) - 1
}
}
// If a new resolver was found, immediately signal bootstrap.
if newResolverFound {
if log.V(1) {
log.Infof(ctx, "found new resolvers from storage; signalling bootstrap")
}
g.signalStalledLocked()
}
return nil
}
示例11: DrainQueue
// DrainQueue locks the queue and processes the remaining queued replicas. It
// processes the replicas in the order they're queued in, one at a time.
// Exposed for testing only.
//
// TODO(bdarnell): this method may race with the call to bq.pop() in
// the main loop, in which case it does not guarantee that all
// replicas have been processed by the time it returns. This is most
// noticeable with ForceReplicaGCScanAndProcess, since the replica GC
// queue has many event-driven triggers. This should synchronize
// somehow with processLoop so we wait for anything being handled
// there to finish too. When that's done, the SucceedsSoon at the end
// of TestRemoveRangeWithoutGC (and perhaps others) can be replaced
// with a one-time check.
func (bq *baseQueue) DrainQueue(clock *hlc.Clock) {
ctx := bq.AnnotateCtx(context.TODO())
for repl := bq.pop(); repl != nil; repl = bq.pop() {
annotatedCtx := repl.AnnotateCtx(ctx)
if err := bq.processReplica(annotatedCtx, repl, clock); err != nil {
bq.failures.Inc(1)
log.Error(annotatedCtx, err)
}
}
}
示例12: updateNodeAddress
// updateNodeAddress is a gossip callback which fires with each
// update to the node address. This allows us to compute the
// total size of the gossip network (for determining max peers
// each gossip node is allowed to have), as well as to create
// new resolvers for each encountered host and to write the
// set of gossip node addresses to persistent storage when it
// changes.
func (g *Gossip) updateNodeAddress(_ string, content roachpb.Value) {
ctx := g.AnnotateCtx(context.TODO())
var desc roachpb.NodeDescriptor
if err := content.GetProto(&desc); err != nil {
log.Error(ctx, err)
return
}
g.mu.Lock()
defer g.mu.Unlock()
// Skip if the node has already been seen.
if _, ok := g.nodeDescs[desc.NodeID]; ok {
return
}
g.nodeDescs[desc.NodeID] = &desc
// Recompute max peers based on size of network and set the max
// sizes for incoming and outgoing node sets.
maxPeers := g.maxPeers(len(g.nodeDescs))
g.mu.incoming.setMaxSize(maxPeers)
g.outgoing.setMaxSize(maxPeers)
// Skip if it's our own address.
if desc.Address == g.mu.is.NodeAddr {
return
}
// Add this new node address (if it's not already there) to our list
// of resolvers so we can keep connecting to gossip if the original
// resolvers go offline.
g.maybeAddResolver(desc.Address)
// Add new address (if it's not already there) to bootstrap info and
// persist if possible.
if g.storage != nil && g.maybeAddBootstrapAddress(desc.Address) {
if err := g.storage.WriteBootstrapInfo(&g.bootstrapInfo); err != nil {
log.Error(ctx, err)
}
}
}
示例13: process
// process() is called on every range for which this node is a lease holder.
func (q *consistencyQueue) process(ctx context.Context, repl *Replica, _ config.SystemConfig) error {
req := roachpb.CheckConsistencyRequest{}
if _, pErr := repl.CheckConsistency(ctx, req); pErr != nil {
log.Error(ctx, pErr.GoError())
}
// Update the last processed time for this queue.
if err := repl.setQueueLastProcessed(ctx, q.name, repl.store.Clock().Now()); err != nil {
log.ErrEventf(ctx, "failed to update last processed time: %v", err)
}
return nil
}
示例14: Addr
// Addr returns the TCP address to connect to.
func (c *Container) Addr(port nat.Port) *net.TCPAddr {
containerInfo, err := c.Inspect()
if err != nil {
log.Error(context.TODO(), err)
return nil
}
bindings, ok := containerInfo.NetworkSettings.Ports[port]
if !ok || len(bindings) == 0 {
return nil
}
portNum, err := strconv.Atoi(bindings[0].HostPort)
if err != nil {
log.Error(context.TODO(), err)
return nil
}
return &net.TCPAddr{
IP: dockerIP(),
Port: portNum,
}
}
示例15: GRPCDial
// GRPCDial calls grpc.Dial with the options appropriate for the context.
func (ctx *Context) GRPCDial(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
ctx.conns.Lock()
meta, ok := ctx.conns.cache[target]
if !ok {
meta = &connMeta{}
ctx.conns.cache[target] = meta
}
ctx.conns.Unlock()
meta.Do(func() {
var dialOpt grpc.DialOption
if ctx.Insecure {
dialOpt = grpc.WithInsecure()
} else {
tlsConfig, err := ctx.GetClientTLSConfig()
if err != nil {
meta.err = err
return
}
dialOpt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
}
dialOpts := make([]grpc.DialOption, 0, 2+len(opts))
dialOpts = append(dialOpts, dialOpt)
dialOpts = append(dialOpts, grpc.WithBackoffMaxDelay(maxBackoff))
dialOpts = append(dialOpts, opts...)
if log.V(1) {
log.Infof(ctx.masterCtx, "dialing %s", target)
}
meta.conn, meta.err = grpc.DialContext(ctx.masterCtx, target, dialOpts...)
if meta.err == nil {
if err := ctx.Stopper.RunTask(func() {
ctx.Stopper.RunWorker(func() {
err := ctx.runHeartbeat(meta.conn, target)
if err != nil && !grpcutil.IsClosedConnection(err) {
log.Error(ctx.masterCtx, err)
}
ctx.removeConn(target, meta)
})
}); err != nil {
meta.err = err
// removeConn and ctx's cleanup worker both lock ctx.conns. However,
// to avoid racing with meta's initialization, the cleanup worker
// blocks on meta.Do while holding ctx.conns. Invoke removeConn
// asynchronously to avoid deadlock.
go ctx.removeConn(target, meta)
}
}
})
return meta.conn, meta.err
}