本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/log.WithLogTag函數的典型用法代碼示例。如果您正苦於以下問題:Golang WithLogTag函數的具體用法?Golang WithLogTag怎麽用?Golang WithLogTag使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了WithLogTag函數的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: newEvaluator
func newEvaluator(
flowCtx *FlowCtx, spec *EvaluatorSpec, input RowSource, output RowReceiver,
) (*evaluator, error) {
ev := &evaluator{
input: input,
output: output,
ctx: log.WithLogTag(flowCtx.Context, "Evaluator", nil),
exprs: make([]exprHelper, len(spec.Exprs)),
render: make([]parser.TypedExpr, len(spec.Exprs)),
tuple: make(parser.DTuple, len(spec.Exprs)),
}
for i, expr := range spec.Exprs {
err := ev.exprs[i].init(expr, spec.Types, flowCtx.evalCtx)
if err != nil {
return nil, err
}
}
// Loop over the expressions in our expression set and extract out fully
// typed expressions, this will later be evaluated for each input row to
// construct our output row.
for i := range ev.exprs {
typedExpr, err := (&ev.exprs[i]).expr.TypeCheck(nil, parser.NoTypePreference)
if err != nil {
return nil, err
}
ev.render[i] = typedExpr
}
return ev, nil
}
示例2: init
func (jb *joinerBase) init(
flowCtx *FlowCtx,
inputs []RowSource,
output RowReceiver,
outputCols []uint32,
jType JoinType,
leftTypes []*sqlbase.ColumnType,
rightTypes []*sqlbase.ColumnType,
expr Expression,
) error {
jb.inputs = inputs
jb.output = output
jb.ctx = log.WithLogTag(flowCtx.Context, "Joiner", nil)
jb.outputCols = columns(outputCols)
jb.joinType = joinType(jType)
jb.emptyLeft = make(sqlbase.EncDatumRow, len(leftTypes))
for i := range jb.emptyLeft {
jb.emptyLeft[i].Datum = parser.DNull
}
jb.emptyRight = make(sqlbase.EncDatumRow, len(rightTypes))
for i := range jb.emptyRight {
jb.emptyRight[i].Datum = parser.DNull
}
return jb.filter.init(expr, append(leftTypes, rightTypes...), flowCtx.evalCtx)
}
示例3: newSorter
func newSorter(flowCtx *FlowCtx, spec *SorterSpec, input RowSource, output RowReceiver) *sorter {
return &sorter{
input: input,
output: output,
ordering: convertToColumnOrdering(spec.OutputOrdering),
matchLen: spec.OrderingMatchLen,
limit: spec.Limit,
ctx: log.WithLogTag(flowCtx.Context, "Sorter", nil),
}
}
示例4: bootstrap
// bootstrap connects the node to the gossip network. Bootstrapping
// commences in the event there are no connected clients or the
// sentinel gossip info is not available. After a successful bootstrap
// connection, this method will block on the stalled condvar, which
// receives notifications that gossip network connectivity has been
// lost and requires re-bootstrapping.
func (g *Gossip) bootstrap() {
g.server.stopper.RunWorker(func() {
ctx := g.AnnotateCtx(context.Background())
ctx = log.WithLogTag(ctx, "bootstrap", nil)
var bootstrapTimer timeutil.Timer
defer bootstrapTimer.Stop()
for {
if g.server.stopper.RunTask(func() {
g.mu.Lock()
defer g.mu.Unlock()
haveClients := g.outgoing.len() > 0
haveSentinel := g.mu.is.getInfo(KeySentinel) != nil
log.Eventf(ctx, "have clients: %t, have sentinel: %t", haveClients, haveSentinel)
if !haveClients || !haveSentinel {
// Try to get another bootstrap address from the resolvers.
if addr := g.getNextBootstrapAddress(); addr != nil {
g.startClient(addr, g.NodeID.Get())
} else {
bootstrapAddrs := make([]string, 0, len(g.bootstrapping))
for addr := range g.bootstrapping {
bootstrapAddrs = append(bootstrapAddrs, addr)
}
log.Eventf(ctx, "no next bootstrap address; currently bootstrapping: %v", bootstrapAddrs)
// We couldn't start a client, signal that we're stalled so that
// we'll retry.
g.maybeSignalStatusChangeLocked()
}
}
}) != nil {
return
}
// Pause an interval before next possible bootstrap.
bootstrapTimer.Reset(g.bootstrapInterval)
log.Eventf(ctx, "sleeping %s until bootstrap", g.bootstrapInterval)
select {
case <-bootstrapTimer.C:
bootstrapTimer.Read = true
// break
case <-g.server.stopper.ShouldStop():
return
}
log.Eventf(ctx, "idling until bootstrap required")
// Block until we need bootstrapping again.
select {
case <-g.stalledCh:
log.Eventf(ctx, "detected stall; commencing bootstrap")
// break
case <-g.server.stopper.ShouldStop():
return
}
}
})
}
示例5: maybeRunPeriodicCheck
// If the time is greater than the timestamp stored at `key`, run `f`.
// Before running `f`, the timestamp is updated forward by a small amount via
// a compare-and-swap to ensure at-most-one concurrent execution. After `f`
// executes the timestamp is set to the next execution time.
// Returns how long until `f` should be run next (i.e. when this method should
// be called again).
func (s *Server) maybeRunPeriodicCheck(
op string, key roachpb.Key, f func(context.Context),
) time.Duration {
ctx, span := s.AnnotateCtxWithSpan(context.Background(), "op")
defer span.Finish()
// Add the op name to the log context.
ctx = log.WithLogTag(ctx, op, nil)
resp, err := s.db.Get(ctx, key)
if err != nil {
log.Infof(ctx, "error reading time: %s", err)
return updateCheckRetryFrequency
}
// We should early returned below if either the next check time is in the
// future or if the atomic compare-and-set of that time failed (which
// would happen if two nodes tried at the same time).
if resp.Exists() {
whenToCheck, pErr := resp.Value.GetTime()
if pErr != nil {
log.Warningf(ctx, "error decoding time: %s", err)
return updateCheckRetryFrequency
} else if delay := whenToCheck.Sub(timeutil.Now()); delay > 0 {
return delay
}
nextRetry := whenToCheck.Add(updateCheckRetryFrequency)
if err := s.db.CPut(ctx, key, nextRetry, whenToCheck); err != nil {
if log.V(2) {
log.Infof(ctx, "could not set next version check time (maybe another node checked?): %s", err)
}
return updateCheckRetryFrequency
}
} else {
log.Infof(ctx, "No previous %s time.", op)
nextRetry := timeutil.Now().Add(updateCheckRetryFrequency)
// CPut with `nil` prev value to assert that no other node has checked.
if err := s.db.CPut(ctx, key, nextRetry, nil); err != nil {
if log.V(2) {
log.Infof(ctx, "Could not set %s time (maybe another node checked?): %v", op, err)
}
return updateCheckRetryFrequency
}
}
f(ctx)
if err := s.db.Put(ctx, key, timeutil.Now().Add(updateCheckFrequency)); err != nil {
log.Infof(ctx, "Error updating %s time: %v", op, err)
}
return updateCheckFrequency
}
示例6: newDistinct
func newDistinct(
flowCtx *FlowCtx, spec *DistinctSpec, input RowSource, output RowReceiver,
) (*distinct, error) {
d := &distinct{
input: input,
output: output,
ctx: log.WithLogTag(flowCtx.Context, "Evaluator", nil),
orderedCols: make(map[uint32]struct{}),
}
for _, ord := range spec.Ordering.Columns {
d.orderedCols[ord.ColIdx] = struct{}{}
}
return d, nil
}
示例7: newEvaluator
func newEvaluator(
flowCtx *FlowCtx, spec *EvaluatorSpec, input RowSource, output RowReceiver,
) (*evaluator, error) {
ev := &evaluator{
flowCtx: flowCtx,
input: input,
output: output,
specExprs: spec.Exprs,
ctx: log.WithLogTag(flowCtx.Context, "Evaluator", nil),
exprs: make([]exprHelper, len(spec.Exprs)),
exprTypes: make([]sqlbase.ColumnType_Kind, len(spec.Exprs)),
}
return ev, nil
}
示例8: newAggregator
func newAggregator(
ctx *FlowCtx, spec *AggregatorSpec, input RowSource, output RowReceiver,
) (*aggregator, error) {
ag := &aggregator{
input: input,
output: output,
ctx: log.WithLogTag(ctx.Context, "Agg", nil),
rows: &RowBuffer{},
buckets: make(map[string]struct{}),
inputCols: make(columns, len(spec.Exprs)),
outputTypes: make([]*sqlbase.ColumnType, len(spec.Exprs)),
groupCols: make(columns, len(spec.GroupCols)),
}
inputTypes := make([]*sqlbase.ColumnType, len(spec.Exprs))
for i, expr := range spec.Exprs {
ag.inputCols[i] = expr.ColIdx
inputTypes[i] = spec.Types[expr.ColIdx]
}
copy(ag.groupCols, spec.GroupCols)
// Loop over the select expressions and extract any aggregate functions --
// non-aggregation functions are replaced with parser.NewIdentAggregate,
// (which just returns the last value added to them for a bucket) to provide
// grouped-by values for each bucket. ag.funcs is updated to contain all
// the functions which need to be fed values.
eh := &exprHelper{types: inputTypes}
eh.vars = parser.MakeIndexedVarHelper(eh, len(eh.types))
for i, expr := range spec.Exprs {
fn, retType, err := ag.extractFunc(expr, eh)
if err != nil {
return nil, err
}
ag.funcs = append(ag.funcs, fn)
// The aggregate function extracted is an identity function, the return
// type of this therefore being the i-th input type.
if retType == nil {
ag.outputTypes[i] = inputTypes[i]
} else {
typ := sqlbase.DatumTypeToColumnType(retType)
ag.outputTypes[i] = &typ
}
}
return ag, nil
}
示例9: NewExecutor
// NewExecutor creates an Executor and registers a callback on the
// system config.
func NewExecutor(
cfg ExecutorConfig, stopper *stop.Stopper, startupMemMetrics *MemoryMetrics,
) *Executor {
exec := &Executor{
cfg: cfg,
reCache: parser.NewRegexpCache(512),
Latency: metric.NewLatency(MetaLatency, cfg.MetricsSampleInterval),
TxnBeginCount: metric.NewCounter(MetaTxnBegin),
TxnCommitCount: metric.NewCounter(MetaTxnCommit),
TxnAbortCount: metric.NewCounter(MetaTxnAbort),
TxnRollbackCount: metric.NewCounter(MetaTxnRollback),
SelectCount: metric.NewCounter(MetaSelect),
UpdateCount: metric.NewCounter(MetaUpdate),
InsertCount: metric.NewCounter(MetaInsert),
DeleteCount: metric.NewCounter(MetaDelete),
DdlCount: metric.NewCounter(MetaDdl),
MiscCount: metric.NewCounter(MetaMisc),
QueryCount: metric.NewCounter(MetaQuery),
}
exec.systemConfigCond = sync.NewCond(exec.systemConfigMu.RLocker())
gossipUpdateC := cfg.Gossip.RegisterSystemConfigChannel()
stopper.RunWorker(func() {
for {
select {
case <-gossipUpdateC:
sysCfg, _ := cfg.Gossip.GetSystemConfig()
exec.updateSystemConfig(sysCfg)
case <-stopper.ShouldStop():
return
}
}
})
ctx := log.WithLogTag(context.Background(), "startup", nil)
startupSession := NewSession(ctx, SessionArgs{}, exec, nil, startupMemMetrics)
if err := exec.virtualSchemas.init(&startupSession.planner); err != nil {
log.Fatal(ctx, err)
}
startupSession.Finish(exec)
return exec
}
示例10: performRangeLookup
// performRangeLookup handles delegating the range lookup to the cache's
// RangeDescriptorDB.
func (rdc *rangeDescriptorCache) performRangeLookup(
ctx context.Context, key roachpb.RKey, useReverseScan bool,
) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, error) {
// metadataKey is sent to RangeLookup to find the RangeDescriptor
// which contains key.
metadataKey, err := meta(key)
if err != nil {
return nil, nil, err
}
// desc is the RangeDescriptor for the range which contains metadataKey.
var desc *roachpb.RangeDescriptor
switch {
case bytes.Equal(metadataKey, roachpb.RKeyMin):
// In this case, the requested key is stored in the cluster's first
// range. Return the first range, which is always gossiped and not
// queried from the datastore.
var err error
if desc, err = rdc.db.FirstRange(); err != nil {
return nil, nil, err
}
return []roachpb.RangeDescriptor{*desc}, nil, nil
case bytes.HasPrefix(metadataKey, keys.Meta1Prefix):
// In this case, desc is the cluster's first range.
var err error
if desc, err = rdc.db.FirstRange(); err != nil {
return nil, nil, err
}
default:
// Look up desc from the cache, which will recursively call into
// this function if it is not cached.
var err error
if desc, _, err = rdc.LookupRangeDescriptor(
ctx, metadataKey, nil, useReverseScan,
); err != nil {
return nil, nil, err
}
}
// Tag inner operations.
ctx = log.WithLogTag(ctx, "range-lookup", nil)
descs, prefetched, pErr := rdc.db.RangeLookup(ctx, metadataKey, desc, useReverseScan)
return descs, prefetched, pErr.GoError()
}
示例11: startWriteSummaries
// startWriteSummaries begins periodically persisting status summaries for the
// node and its stores.
func (n *Node) startWriteSummaries(frequency time.Duration) {
ctx := log.WithLogTag(n.AnnotateCtx(context.Background()), "summaries", nil)
// Immediately record summaries once on server startup.
n.stopper.RunWorker(func() {
// Write a status summary immediately; this helps the UI remain
// responsive when new nodes are added.
if err := n.writeSummaries(ctx); err != nil {
log.Warningf(ctx, "error recording initial status summaries: %s", err)
}
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := n.writeSummaries(ctx); err != nil {
log.Warningf(ctx, "error recording status summaries: %s", err)
}
case <-n.stopper.ShouldStop():
return
}
}
})
}