本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/log.V函数的典型用法代码示例。如果您正苦于以下问题:Golang V函数的具体用法?Golang V怎么用?Golang V使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了V函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: waitAndProcess
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(
ctx context.Context, start time.Time, clock *hlc.Clock, stopper *stop.Stopper, repl *Replica,
) bool {
waitInterval := rs.paceInterval(start, timeutil.Now())
rs.waitTimer.Reset(waitInterval)
if log.V(6) {
log.Infof(ctx, "wait timer interval set to %s", waitInterval)
}
for {
select {
case <-rs.waitTimer.C:
if log.V(6) {
log.Infof(ctx, "wait timer fired")
}
rs.waitTimer.Read = true
if repl == nil {
return false
}
if log.V(2) {
log.Infof(ctx, "replica scanner processing %s", repl)
}
for _, q := range rs.queues {
q.MaybeAdd(repl, clock.Now())
}
return false
case repl := <-rs.removed:
rs.removeReplica(repl)
case <-stopper.ShouldStop():
return true
}
}
}
示例2: runHistoryWithRetry
// runHistoryWithRetry intercepts retry errors. If one is encountered,
// alternate histories are generated which all contain the exact
// history prefix which encountered the error, but which recombine the
// remaining commands with all of the commands from the retrying
// history.
//
// This process continues recursively if there are further retries.
func (hv *historyVerifier) runHistoryWithRetry(
priorities []int32, isolations []enginepb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T,
) error {
if err := hv.runHistory(priorities, isolations, cmds, db, t); err != nil {
if log.V(1) {
log.Infof(context.Background(), "got an error running history %s: %s", historyString(cmds), err)
}
retry, ok := err.(*retryError)
if !ok {
return err
}
if _, hasRetried := hv.retriedTxns[retry.txnIdx]; hasRetried {
if log.V(1) {
log.Infof(context.Background(), "retried txn %d twice; skipping history", retry.txnIdx+1)
}
return nil
}
hv.retriedTxns[retry.txnIdx] = struct{}{}
// Randomly subsample 5% of histories for reduced execution time.
enumHis := sampleHistories(enumerateHistoriesAfterRetry(retry, cmds), 0.05)
for i, h := range enumHis {
if log.V(1) {
log.Infof(context.Background(), "after retry, running alternate history %d of %d", i, len(enumHis))
}
if err := hv.runHistoryWithRetry(priorities, isolations, h, db, t); err != nil {
return err
}
}
}
return nil
}
示例3: improve
// improve returns a candidate StoreDescriptor to rebalance a replica to. The
// strategy is to always converge on the mean range count. If that isn't
// possible, we don't return any candidate.
func (rcb rangeCountBalancer) improve(sl StoreList, excluded nodeIDSet) *roachpb.StoreDescriptor {
// Attempt to select a better candidate from the supplied list.
sl.stores = selectRandom(rcb.rand, allocatorRandomCount, sl, excluded)
candidate := rcb.selectBest(sl)
if candidate == nil {
if log.V(2) {
log.Infof(context.TODO(), "not rebalancing: no valid candidate targets: %s",
formatCandidates(nil, sl.stores))
}
return nil
}
// Adding a replica to the candidate must make its range count converge on the
// mean range count.
rebalanceConvergesOnMean := rebalanceToConvergesOnMean(sl, *candidate)
if !rebalanceConvergesOnMean {
if log.V(2) {
log.Infof(context.TODO(), "not rebalancing: %s wouldn't converge on the mean %.1f",
formatCandidates(candidate, sl.stores), sl.candidateCount.mean)
}
return nil
}
if log.V(2) {
log.Infof(context.TODO(), "rebalancing: mean=%.1f %s",
sl.candidateCount.mean, formatCandidates(candidate, sl.stores))
}
return candidate
}
示例4: flush
// flush sends the rows accumulated so far in a StreamMessage.
func (m *outbox) flush(last bool, err error) error {
if !last && m.numRows == 0 {
return nil
}
msg := m.encoder.FormMessage(last, err)
if log.V(3) {
log.Infof(m.flowCtx.Context, "flushing outbox")
}
var sendErr error
if m.stream != nil {
sendErr = m.stream.Send(msg)
} else {
sendErr = m.syncFlowStream.Send(msg)
}
if sendErr != nil {
if log.V(1) {
log.Errorf(m.flowCtx.Context, "outbox flush error: %s", sendErr)
}
} else if log.V(3) {
log.Infof(m.flowCtx.Context, "outbox flushed")
}
if sendErr != nil {
return sendErr
}
m.numRows = 0
return nil
}
示例5: AddMetricStruct
// AddMetricStruct examines all fields of metricStruct and adds
// all Iterable or metricGroup objects to the registry.
func (r *Registry) AddMetricStruct(metricStruct interface{}) {
v := reflect.ValueOf(metricStruct)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
vfield, tfield := v.Field(i), t.Field(i)
if !vfield.CanInterface() {
if log.V(2) {
log.Infof(context.TODO(), "Skipping unexported field %s", tfield.Name)
}
continue
}
val := vfield.Interface()
switch typ := val.(type) {
case Iterable:
r.AddMetric(typ)
case Struct:
r.AddMetricStruct(typ)
default:
if log.V(2) {
log.Infof(context.TODO(), "Skipping non-metric field %s", tfield.Name)
}
}
}
}
示例6: wrap
// wrap the supplied planNode with the sortNode if sorting is required.
// The first returned value is "true" if the sort node can be squashed
// in the selectTopNode (sorting unneeded).
func (n *sortNode) wrap(plan planNode) (bool, planNode) {
if n != nil {
// Check to see if the requested ordering is compatible with the existing
// ordering.
existingOrdering := plan.Ordering()
if log.V(2) {
log.Infof(n.ctx, "Sort: existing=%+v desired=%+v", existingOrdering, n.ordering)
}
match := computeOrderingMatch(n.ordering, existingOrdering, false)
if match < len(n.ordering) {
n.plan = plan
n.needSort = true
return false, n
}
if len(n.columns) < len(plan.Columns()) {
// No sorting required, but we have to strip off the extra render
// expressions we added.
n.plan = plan
return false, n
}
if log.V(2) {
log.Infof(n.ctx, "Sort: no sorting required")
}
}
return true, plan
}
示例7: throttle
// throttle informs the store pool that the given remote store declined a
// snapshot or failed to apply one, ensuring that it will not be considered
// for up-replication or rebalancing until after the configured timeout period
// has elapsed. Declined being true indicates that the remote store explicitly
// declined a snapshot.
func (sp *StorePool) throttle(reason throttleReason, toStoreID roachpb.StoreID) {
sp.mu.Lock()
defer sp.mu.Unlock()
detail := sp.getStoreDetailLocked(toStoreID)
ctx := sp.AnnotateCtx(context.TODO())
// If a snapshot is declined, be it due to an error or because it was
// rejected, we mark the store detail as having been declined so it won't
// be considered as a candidate for new replicas until after the configured
// timeout period has passed.
switch reason {
case throttleDeclined:
detail.throttledUntil = sp.clock.Now().GoTime().Add(sp.declinedReservationsTimeout)
if log.V(2) {
log.Infof(ctx, "snapshot declined, store:%s will be throttled for %s until %s",
toStoreID, sp.declinedReservationsTimeout, detail.throttledUntil)
}
case throttleFailed:
detail.throttledUntil = sp.clock.Now().GoTime().Add(sp.failedReservationsTimeout)
if log.V(2) {
log.Infof(ctx, "snapshot failed, store:%s will be throttled for %s until %s",
toStoreID, sp.failedReservationsTimeout, detail.throttledUntil)
}
}
}
示例8: deleteRow
// deleteRow adds to the batch the kv operations necessary to delete a table row
// with the given values.
func (rd *rowDeleter) deleteRow(ctx context.Context, b *client.Batch, values []parser.Datum) error {
if err := rd.fks.checkAll(values); err != nil {
return err
}
primaryIndexKey, secondaryIndexEntries, err := rd.helper.encodeIndexes(rd.fetchColIDtoRowIndex, values)
if err != nil {
return err
}
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
}
b.Del(secondaryIndexEntry.Key)
}
// Delete the row.
rd.startKey = roachpb.Key(primaryIndexKey)
rd.endKey = roachpb.Key(encoding.EncodeNotNullDescending(primaryIndexKey))
if log.V(2) {
log.Infof(ctx, "DelRange %s - %s", rd.startKey, rd.endKey)
}
b.DelRange(&rd.startKey, &rd.endKey, false)
rd.startKey, rd.endKey = nil, nil
return nil
}
示例9: Run
// Run is part of the processor interface.
func (d *distinct) Run(wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
ctx, span := tracing.ChildSpan(d.ctx, "distinct")
defer tracing.FinishSpan(span)
if log.V(2) {
log.Infof(ctx, "starting distinct process")
defer log.Infof(ctx, "exiting distinct")
}
var scratch []byte
for {
row, err := d.input.NextRow()
if err != nil || row == nil {
d.output.Close(err)
return
}
// If we are processing DISTINCT(x, y) and the input stream is ordered
// by x, we define x to be our group key. Our seen set at any given time
// is only the set of all rows with the same group key. The encoding of
// the row is the key we use in our 'seen' set.
encoding, err := d.encode(scratch, row)
if err != nil {
d.output.Close(err)
return
}
// The 'seen' set is reset whenever we find consecutive rows differing on the
// group key thus avoiding the need to store encodings of all rows.
matched, err := d.matchLastGroupKey(row)
if err != nil {
d.output.Close(err)
return
}
if !matched {
d.lastGroupKey = row
d.seen = make(map[string]struct{})
}
key := string(encoding)
if _, ok := d.seen[key]; !ok {
d.seen[key] = struct{}{}
if !d.output.PushRow(row) {
if log.V(2) {
log.Infof(ctx, "no more rows required")
}
d.output.Close(nil)
return
}
}
scratch = encoding[:0]
}
}
示例10: Run
// Run is part of the processor interface.
func (ev *evaluator) Run(wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
ctx, span := tracing.ChildSpan(ev.ctx, "evaluator")
defer tracing.FinishSpan(span)
if log.V(2) {
log.Infof(ctx, "starting evaluator process")
defer log.Infof(ctx, "exiting evaluator")
}
first := true
for {
row, err := ev.input.NextRow()
if err != nil || row == nil {
ev.output.Close(err)
return
}
if first {
first = false
types := make([]sqlbase.ColumnType_Kind, len(row))
for i := range types {
types[i] = row[i].Type
}
for i, expr := range ev.specExprs {
err := ev.exprs[i].init(expr, types, ev.flowCtx.evalCtx)
if err != nil {
ev.output.Close(err)
return
}
ev.exprTypes[i] = sqlbase.DatumTypeToColumnKind(ev.exprs[i].expr.ResolvedType())
}
}
outRow, err := ev.eval(row)
if err != nil {
ev.output.Close(err)
return
}
if log.V(3) {
log.Infof(ctx, "pushing %s\n", outRow)
}
// Push the row to the output RowReceiver; stop if they don't need more
// rows.
if !ev.output.PushRow(outRow) {
if log.V(2) {
log.Infof(ctx, "no more rows required")
}
ev.output.Close(nil)
return
}
}
}
示例11: setup
func (t *parallelTest) setup(spec *parTestSpec) {
if spec.ClusterSize == 0 {
spec.ClusterSize = 1
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Cluster Size: %d", spec.ClusterSize)
}
args := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
SQLExecutor: &sql.ExecutorTestingKnobs{
WaitForGossipUpdate: true,
CheckStmtStringChange: true,
},
},
},
}
t.cluster = serverutils.StartTestCluster(t, spec.ClusterSize, args)
t.clients = make([][]*gosql.DB, spec.ClusterSize)
for i := range t.clients {
t.clients[i] = append(t.clients[i], t.cluster.ServerConn(i))
}
r0 := sqlutils.MakeSQLRunner(t, t.clients[0][0])
if spec.RangeSplitSize != 0 {
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Setting range split size: %d", spec.RangeSplitSize)
}
zoneCfg := config.DefaultZoneConfig()
zoneCfg.RangeMaxBytes = int64(spec.RangeSplitSize)
zoneCfg.RangeMinBytes = zoneCfg.RangeMaxBytes / 2
buf, err := protoutil.Marshal(&zoneCfg)
if err != nil {
t.Fatal(err)
}
objID := keys.RootNamespaceID
r0.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf)
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Creating database")
}
r0.Exec("CREATE DATABASE test")
for i := range t.clients {
sqlutils.MakeSQLRunner(t, t.clients[i][0]).Exec("SET DATABASE = test")
}
if testing.Verbose() || log.V(1) {
log.Infof(t.ctx, "Test setup done")
}
}
示例12: maybeRunPeriodicCheck
// If the time is greater than the timestamp stored at `key`, run `f`.
// Before running `f`, the timestamp is updated forward by a small amount via
// a compare-and-swap to ensure at-most-one concurrent execution. After `f`
// executes the timestamp is set to the next execution time.
// Returns how long until `f` should be run next (i.e. when this method should
// be called again).
func (s *Server) maybeRunPeriodicCheck(
op string, key roachpb.Key, f func(context.Context),
) time.Duration {
ctx, span := s.AnnotateCtxWithSpan(context.Background(), "op")
defer span.Finish()
// Add the op name to the log context.
ctx = log.WithLogTag(ctx, op, nil)
resp, err := s.db.Get(ctx, key)
if err != nil {
log.Infof(ctx, "error reading time: %s", err)
return updateCheckRetryFrequency
}
// We should early returned below if either the next check time is in the
// future or if the atomic compare-and-set of that time failed (which
// would happen if two nodes tried at the same time).
if resp.Exists() {
whenToCheck, pErr := resp.Value.GetTime()
if pErr != nil {
log.Warningf(ctx, "error decoding time: %s", err)
return updateCheckRetryFrequency
} else if delay := whenToCheck.Sub(timeutil.Now()); delay > 0 {
return delay
}
nextRetry := whenToCheck.Add(updateCheckRetryFrequency)
if err := s.db.CPut(ctx, key, nextRetry, whenToCheck); err != nil {
if log.V(2) {
log.Infof(ctx, "could not set next version check time (maybe another node checked?): %s", err)
}
return updateCheckRetryFrequency
}
} else {
log.Infof(ctx, "No previous %s time.", op)
nextRetry := timeutil.Now().Add(updateCheckRetryFrequency)
// CPut with `nil` prev value to assert that no other node has checked.
if err := s.db.CPut(ctx, key, nextRetry, nil); err != nil {
if log.V(2) {
log.Infof(ctx, "Could not set %s time (maybe another node checked?): %v", op, err)
}
return updateCheckRetryFrequency
}
}
f(ctx)
if err := s.db.Put(ctx, key, timeutil.Now().Add(updateCheckFrequency)); err != nil {
log.Infof(ctx, "Error updating %s time: %v", op, err)
}
return updateCheckFrequency
}
示例13: clearOverlappingCachedRangeDescriptors
// clearOverlappingCachedRangeDescriptors looks up and clears any
// cache entries which overlap the specified descriptor.
func (rdc *rangeDescriptorCache) clearOverlappingCachedRangeDescriptors(
desc *roachpb.RangeDescriptor,
) error {
key := desc.EndKey
metaKey, err := meta(key)
if err != nil {
return err
}
// Clear out any descriptors which subsume the key which we're going
// to cache. For example, if an existing KeyMin->KeyMax descriptor
// should be cleared out in favor of a KeyMin->"m" descriptor.
k, v, ok := rdc.rangeCache.cache.Ceil(rangeCacheKey(metaKey))
if ok {
descriptor := v.(*roachpb.RangeDescriptor)
if descriptor.StartKey.Less(key) && !descriptor.EndKey.Less(key) {
if log.V(2) {
log.Infof(rdc.ctx, "clearing overlapping descriptor: key=%s desc=%s", k, descriptor)
}
rdc.rangeCache.cache.Del(k.(rangeCacheKey))
}
}
startMeta, err := meta(desc.StartKey)
if err != nil {
return err
}
endMeta, err := meta(desc.EndKey)
if err != nil {
return err
}
// Also clear any descriptors which are subsumed by the one we're
// going to cache. This could happen on a merge (and also happens
// when there's a lot of concurrency). Iterate from the range meta key
// after RangeMetaKey(desc.StartKey) to the range meta key for desc.EndKey.
var keys []rangeCacheKey
rdc.rangeCache.cache.DoRange(func(k, v interface{}) bool {
if log.V(2) {
log.Infof(rdc.ctx, "clearing subsumed descriptor: key=%s desc=%s",
k, v.(*roachpb.RangeDescriptor))
}
keys = append(keys, k.(rangeCacheKey))
return false
}, rangeCacheKey(startMeta.Next()), rangeCacheKey(endMeta))
for _, key := range keys {
rdc.rangeCache.cache.Del(key)
}
return nil
}
示例14: manage
// manage manages outgoing clients. Periodically, the infostore is
// scanned for infos with hop count exceeding the MaxHops
// threshold. If the number of outgoing clients doesn't exceed
// maxPeers(), a new gossip client is connected to a randomly selected
// peer beyond MaxHops threshold. Otherwise, the least useful peer
// node is cut off to make room for a replacement. Disconnected
// clients are processed via the disconnected channel and taken out of
// the outgoing address set. If there are no longer any outgoing
// connections or the sentinel gossip is unavailable, the bootstrapper
// is notified via the stalled conditional variable.
func (g *Gossip) manage() {
g.server.stopper.RunWorker(func() {
ctx := g.AnnotateCtx(context.Background())
cullTicker := time.NewTicker(g.jitteredInterval(g.cullInterval))
stallTicker := time.NewTicker(g.jitteredInterval(g.stallInterval))
defer cullTicker.Stop()
defer stallTicker.Stop()
for {
select {
case <-g.server.stopper.ShouldStop():
return
case c := <-g.disconnected:
g.doDisconnected(c)
case nodeID := <-g.tighten:
g.tightenNetwork(nodeID)
case <-cullTicker.C:
func() {
g.mu.Lock()
if !g.outgoing.hasSpace() {
leastUsefulID := g.mu.is.leastUseful(g.outgoing)
if c := g.findClient(func(c *client) bool {
return c.peerID == leastUsefulID
}); c != nil {
if log.V(1) {
log.Infof(ctx, "closing least useful client %+v to tighten network graph", c)
}
log.Eventf(ctx, "culling %s", c.addr)
c.close()
// After releasing the lock, block until the client disconnects.
defer func() {
g.doDisconnected(<-g.disconnected)
}()
} else {
if log.V(1) {
g.clientsMu.Lock()
log.Infof(ctx, "couldn't find least useful client among %+v", g.clientsMu.clients)
g.clientsMu.Unlock()
}
}
}
g.mu.Unlock()
}()
case <-stallTicker.C:
g.mu.Lock()
g.maybeSignalStatusChangeLocked()
g.mu.Unlock()
}
}
})
}
示例15: removeConnLocked
func (ctx *Context) removeConnLocked(key string, meta *connMeta) {
if log.V(1) {
log.Infof(ctx.masterCtx, "closing %s", key)
}
if conn := meta.conn; conn != nil {
if err := conn.Close(); err != nil && !grpcutil.IsClosedConnection(err) {
if log.V(1) {
log.Errorf(ctx.masterCtx, "failed to close client connection: %s", err)
}
}
}
delete(ctx.conns.cache, key)
}