本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Eventf函數的典型用法代碼示例。如果您正苦於以下問題:Golang Eventf函數的具體用法?Golang Eventf怎麽用?Golang Eventf使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Eventf函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: maybeSignalStatusChangeLocked
// maybeSignalStatusChangeLocked checks whether gossip should transition its
// internal state from connected to stalled or vice versa.
func (g *Gossip) maybeSignalStatusChangeLocked() {
ctx := g.AnnotateCtx(context.TODO())
orphaned := g.outgoing.len()+g.mu.incoming.len() == 0
stalled := orphaned || g.mu.is.getInfo(KeySentinel) == nil
if stalled {
// We employ the stalled boolean to avoid filling logs with warnings.
if !g.stalled {
log.Eventf(ctx, "now stalled")
if orphaned {
if len(g.resolvers) == 0 {
log.Warningf(ctx, "no resolvers found; use --join to specify a connected node")
} else {
log.Warningf(ctx, "no incoming or outgoing connections")
}
} else if len(g.resolversTried) == len(g.resolvers) {
log.Warningf(ctx, "first range unavailable; resolvers exhausted")
} else {
log.Warningf(ctx, "first range unavailable; trying remaining resolvers")
}
}
if len(g.resolvers) > 0 {
g.signalStalledLocked()
}
} else {
if g.stalled {
log.Eventf(ctx, "connected")
log.Infof(ctx, "node has connected to cluster via gossip")
g.signalConnectedLocked()
}
g.maybeCleanupBootstrapAddressesLocked()
}
g.stalled = stalled
}
示例2: addInternal
// addInternal adds the replica the queue with specified priority. If
// the replica is already queued, updates the existing
// priority. Expects the queue lock to be held by caller.
func (bq *baseQueue) addInternal(
ctx context.Context, desc *roachpb.RangeDescriptor, should bool, priority float64,
) (bool, error) {
if bq.mu.stopped {
return false, errQueueStopped
}
if bq.mu.disabled {
log.Event(ctx, "queue disabled")
return false, errQueueDisabled
}
if !desc.IsInitialized() {
// We checked this above in MaybeAdd(), but we need to check it
// again for Add().
return false, errors.New("replica not initialized")
}
// If the replica is currently in purgatory, don't re-add it.
if _, ok := bq.mu.purgatory[desc.RangeID]; ok {
return false, nil
}
item, ok := bq.mu.replicas[desc.RangeID]
if !should {
if ok {
log.Eventf(ctx, "%s: removing from queue", item.value)
bq.remove(item)
}
return false, errReplicaNotAddable
} else if ok {
if item.priority != priority {
log.Eventf(ctx, "%s: updating priority: %0.3f -> %0.3f",
desc, item.priority, priority)
}
// Replica has already been added; update priority.
bq.mu.priorityQ.update(item, priority)
return false, nil
}
log.VEventf(ctx, 3, "%s: adding: priority=%0.3f", desc, priority)
item = &replicaItem{value: desc.RangeID, priority: priority}
bq.add(item)
// If adding this replica has pushed the queue past its maximum size,
// remove the lowest priority element.
if pqLen := bq.mu.priorityQ.Len(); pqLen > bq.maxSize {
bq.remove(bq.mu.priorityQ[pqLen-1])
}
// Signal the processLoop that a replica has been added.
select {
case bq.incoming <- struct{}{}:
default:
// No need to signal again.
}
return true, nil
}
示例3: bootstrap
// bootstrap connects the node to the gossip network. Bootstrapping
// commences in the event there are no connected clients or the
// sentinel gossip info is not available. After a successful bootstrap
// connection, this method will block on the stalled condvar, which
// receives notifications that gossip network connectivity has been
// lost and requires re-bootstrapping.
func (g *Gossip) bootstrap() {
g.server.stopper.RunWorker(func() {
ctx := g.AnnotateCtx(context.Background())
ctx = log.WithLogTag(ctx, "bootstrap", nil)
var bootstrapTimer timeutil.Timer
defer bootstrapTimer.Stop()
for {
if g.server.stopper.RunTask(func() {
g.mu.Lock()
defer g.mu.Unlock()
haveClients := g.outgoing.len() > 0
haveSentinel := g.mu.is.getInfo(KeySentinel) != nil
log.Eventf(ctx, "have clients: %t, have sentinel: %t", haveClients, haveSentinel)
if !haveClients || !haveSentinel {
// Try to get another bootstrap address from the resolvers.
if addr := g.getNextBootstrapAddress(); addr != nil {
g.startClient(addr, g.NodeID.Get())
} else {
bootstrapAddrs := make([]string, 0, len(g.bootstrapping))
for addr := range g.bootstrapping {
bootstrapAddrs = append(bootstrapAddrs, addr)
}
log.Eventf(ctx, "no next bootstrap address; currently bootstrapping: %v", bootstrapAddrs)
// We couldn't start a client, signal that we're stalled so that
// we'll retry.
g.maybeSignalStatusChangeLocked()
}
}
}) != nil {
return
}
// Pause an interval before next possible bootstrap.
bootstrapTimer.Reset(g.bootstrapInterval)
log.Eventf(ctx, "sleeping %s until bootstrap", g.bootstrapInterval)
select {
case <-bootstrapTimer.C:
bootstrapTimer.Read = true
// break
case <-g.server.stopper.ShouldStop():
return
}
log.Eventf(ctx, "idling until bootstrap required")
// Block until we need bootstrapping again.
select {
case <-g.stalledCh:
log.Eventf(ctx, "detected stall; commencing bootstrap")
// break
case <-g.server.stopper.ShouldStop():
return
}
}
})
}
示例4: Seek
// Seek positions the iterator at the specified key.
func (ri *RangeIterator) Seek(ctx context.Context, key roachpb.RKey, scanDir ScanDirection) {
log.Eventf(ctx, "querying next range at %s", key)
ri.scanDir = scanDir
ri.init = true // the iterator is now initialized
ri.pErr = nil // clear any prior error
ri.key = key // set the key
// Retry loop for looking up next range in the span. The retry loop
// deals with retryable range descriptor lookups.
for r := retry.StartWithCtx(ctx, ri.ds.rpcRetryOptions); r.Next(); {
log.Event(ctx, "meta descriptor lookup")
var err error
ri.desc, ri.token, err = ri.ds.getDescriptor(
ctx, ri.key, ri.token, ri.scanDir == Descending)
// getDescriptor may fail retryably if, for example, the first
// range isn't available via Gossip. Assume that all errors at
// this level are retryable. Non-retryable errors would be for
// things like malformed requests which we should have checked
// for before reaching this point.
if err != nil {
log.VEventf(ctx, 1, "range descriptor lookup failed: %s", err)
continue
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to include after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
// TODO: this code is subject to removal. See
// https://groups.google.com/d/msg/cockroach-db/DebjQEgU9r4/_OhMe7atFQAJ
reverse := ri.scanDir == Descending
if (reverse && !ri.desc.ContainsExclusiveEndKey(ri.key)) ||
(!reverse && !ri.desc.ContainsKey(ri.key)) {
log.Eventf(ctx, "addressing error: %s does not include key %s", ri.desc, ri.key)
if err := ri.token.Evict(ctx); err != nil {
ri.pErr = roachpb.NewError(err)
return
}
// On addressing errors, don't backoff; retry immediately.
r.Reset()
continue
}
return
}
// Check for an early exit from the retry loop.
if pErr := ri.ds.deduceRetryEarlyExitError(ctx); pErr != nil {
ri.pErr = pErr
} else {
ri.pErr = roachpb.NewErrorf("RangeIterator failed to seek to %s", key)
}
}
示例5: processReplica
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock must not be held
// while calling this method.
func (bq *baseQueue) processReplica(
queueCtx context.Context, repl *Replica, clock *hlc.Clock,
) error {
bq.processMu.Lock()
defer bq.processMu.Unlock()
// Load the system config.
cfg, ok := bq.gossip.GetSystemConfig()
if !ok {
log.VEventf(queueCtx, 1, "no system config available, skipping")
return nil
}
if bq.requiresSplit(cfg, repl) {
// Range needs to be split due to zone configs, but queue does
// not accept unsplit ranges.
log.VEventf(queueCtx, 3, "split needed; skipping")
return nil
}
// Putting a span in a context means that events will no longer go to the
// event log. Use queueCtx for events that are intended for the event log.
ctx, span := bq.AnnotateCtxWithSpan(queueCtx, bq.name)
defer span.Finish()
// Also add the Replica annotations to ctx.
ctx = repl.AnnotateCtx(ctx)
ctx, cancel := context.WithTimeout(ctx, bq.processTimeout)
defer cancel()
log.Eventf(ctx, "processing replica")
if err := repl.IsDestroyed(); err != nil {
log.VEventf(queueCtx, 3, "replica destroyed (%s); skipping", err)
return nil
}
// If the queue requires a replica to have the range lease in
// order to be processed, check whether this replica has range lease
// and renew or acquire if necessary.
if bq.needsLease {
// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
if err := repl.redirectOnOrAcquireLease(ctx); err != nil {
switch v := err.GetDetail().(type) {
case *roachpb.NotLeaseHolderError, *roachpb.RangeNotFoundError:
log.VEventf(queueCtx, 3, "%s; skipping", v)
return nil
default:
return errors.Wrapf(err.GoError(), "%s: could not obtain lease", repl)
}
}
log.Event(ctx, "got range lease")
}
log.VEventf(queueCtx, 3, "processing")
if err := bq.impl.process(ctx, clock.Now(), repl, cfg); err != nil {
return err
}
log.Event(ctx, "done")
bq.successes.Inc(1)
return nil
}
示例6: GetSnapshot
// GetSnapshot wraps Snapshot() but does not require the replica lock
// to be held and it will block instead of returning
// ErrSnapshotTemporaryUnavailable. The caller is directly responsible for
// calling r.CloseOutSnap.
func (r *Replica) GetSnapshot(ctx context.Context, snapType string) (*OutgoingSnapshot, error) {
// Use shorter-than-usual backoffs because this rarely succeeds on
// the first attempt and this method is used a lot in tests.
// Unsuccessful attempts are cheap, so we can have a low MaxBackoff.
retryOpts := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 100 * time.Millisecond,
Multiplier: 2,
}
for retryObj := retry.StartWithCtx(ctx, retryOpts); retryObj.Next(); {
log.Eventf(ctx, "snapshot retry loop pass %d", retryObj.CurrentAttempt())
r.mu.Lock()
doneChan := r.mu.outSnapDone
r.mu.Unlock()
<-doneChan
r.mu.Lock()
snap, err := r.snapshotWithContext(ctx, snapType)
if err == nil {
r.mu.outSnap.claimed = true
}
r.mu.Unlock()
if err == raft.ErrSnapshotTemporarilyUnavailable {
continue
} else {
return snap, err
}
}
return nil, ctx.Err() // the only loop exit condition
}
示例7: maybeAddBootstrapAddress
// maybeAddBootstrapAddress adds the specified address to the list of
// bootstrap addresses if not already present. Returns whether a new
// bootstrap address was added. The caller must hold the gossip mutex.
func (g *Gossip) maybeAddBootstrapAddress(addr util.UnresolvedAddr) bool {
if _, ok := g.bootstrapAddrs[addr]; ok {
return false
}
g.bootstrapInfo.Addresses = append(g.bootstrapInfo.Addresses, addr)
g.bootstrapAddrs[addr] = struct{}{}
ctx := g.AnnotateCtx(context.TODO())
log.Eventf(ctx, "add bootstrap %s", addr)
return true
}
示例8: manage
// manage manages outgoing clients. Periodically, the infostore is
// scanned for infos with hop count exceeding the MaxHops
// threshold. If the number of outgoing clients doesn't exceed
// maxPeers(), a new gossip client is connected to a randomly selected
// peer beyond MaxHops threshold. Otherwise, the least useful peer
// node is cut off to make room for a replacement. Disconnected
// clients are processed via the disconnected channel and taken out of
// the outgoing address set. If there are no longer any outgoing
// connections or the sentinel gossip is unavailable, the bootstrapper
// is notified via the stalled conditional variable.
func (g *Gossip) manage() {
g.server.stopper.RunWorker(func() {
ctx := g.AnnotateCtx(context.Background())
cullTicker := time.NewTicker(g.jitteredInterval(g.cullInterval))
stallTicker := time.NewTicker(g.jitteredInterval(g.stallInterval))
defer cullTicker.Stop()
defer stallTicker.Stop()
for {
select {
case <-g.server.stopper.ShouldStop():
return
case c := <-g.disconnected:
g.doDisconnected(c)
case nodeID := <-g.tighten:
g.tightenNetwork(nodeID)
case <-cullTicker.C:
func() {
g.mu.Lock()
if !g.outgoing.hasSpace() {
leastUsefulID := g.mu.is.leastUseful(g.outgoing)
if c := g.findClient(func(c *client) bool {
return c.peerID == leastUsefulID
}); c != nil {
if log.V(1) {
log.Infof(ctx, "closing least useful client %+v to tighten network graph", c)
}
log.Eventf(ctx, "culling %s", c.addr)
c.close()
// After releasing the lock, block until the client disconnects.
defer func() {
g.doDisconnected(<-g.disconnected)
}()
} else {
if log.V(1) {
g.clientsMu.Lock()
log.Infof(ctx, "couldn't find least useful client among %+v", g.clientsMu.clients)
g.clientsMu.Unlock()
}
}
}
g.mu.Unlock()
}()
case <-stallTicker.C:
g.mu.Lock()
g.maybeSignalStatusChangeLocked()
g.mu.Unlock()
}
}
})
}
示例9: maybeAddBootstrapAddress
// maybeAddBootstrapAddress adds the specified address to the list of
// bootstrap addresses if not already present. Returns whether a new
// bootstrap address was added. The caller must hold the gossip mutex.
func (g *Gossip) maybeAddBootstrapAddress(addr util.UnresolvedAddr, nodeID roachpb.NodeID) bool {
if existingNodeID, ok := g.bootstrapAddrs[addr]; ok {
if existingNodeID == unknownNodeID || existingNodeID != nodeID {
g.bootstrapAddrs[addr] = nodeID
}
return false
}
g.bootstrapInfo.Addresses = append(g.bootstrapInfo.Addresses, addr)
g.bootstrapAddrs[addr] = nodeID
ctx := g.AnnotateCtx(context.TODO())
log.Eventf(ctx, "add bootstrap %s", addr)
return true
}
示例10: removeClient
// removeClient removes the specified client. Called when a client
// disconnects.
func (g *Gossip) removeClient(target *client) {
g.clientsMu.Lock()
defer g.clientsMu.Unlock()
for i, candidate := range g.clientsMu.clients {
if candidate == target {
ctx := g.AnnotateCtx(context.TODO())
log.Eventf(ctx, "client %s disconnected", candidate.addr)
g.clientsMu.clients = append(g.clientsMu.clients[:i], g.clientsMu.clients[i+1:]...)
delete(g.bootstrapping, candidate.addr.String())
g.outgoing.removeNode(candidate.peerID)
break
}
}
}
示例11: startClient
// startClient launches a new client connected to remote address.
// The client is added to the outgoing address set and launched in
// a goroutine.
func (g *Gossip) startClient(addr net.Addr, nodeID roachpb.NodeID) {
g.clientsMu.Lock()
defer g.clientsMu.Unlock()
breaker, ok := g.clientsMu.breakers[addr.String()]
if !ok {
breaker = g.rpcContext.NewBreaker()
g.clientsMu.breakers[addr.String()] = breaker
}
ctx := g.AnnotateCtx(context.TODO())
log.Eventf(ctx, "starting new client to %s", addr)
c := newClient(g.server.AmbientContext, addr, g.serverMetrics)
g.clientsMu.clients = append(g.clientsMu.clients, c)
c.start(g, g.disconnected, g.rpcContext, g.server.stopper, nodeID, breaker)
}
示例12: tightenNetwork
// tightenNetwork "tightens" the network by starting a new gossip
// client to the most distant node as measured in required gossip hops
// to propagate info from the distant node to this node.
func (g *Gossip) tightenNetwork(distantNodeID roachpb.NodeID) {
g.mu.Lock()
defer g.mu.Unlock()
if g.outgoing.hasSpace() {
ctx := g.AnnotateCtx(context.TODO())
if nodeAddr, err := g.getNodeIDAddressLocked(distantNodeID); err != nil {
log.Errorf(ctx, "unable to get address for node %d: %s", distantNodeID, err)
} else {
log.Infof(ctx, "starting client to distant node %d to tighten network graph", distantNodeID)
log.Eventf(ctx, "tightening network with new client to %s", nodeAddr)
g.startClient(nodeAddr, g.NodeID.Get())
}
}
}
示例13: maybeAddResolver
// maybeAddResolver creates and adds a resolver for the specified
// address if one does not already exist. Returns whether a new
// resolver was added. The caller must hold the gossip mutex.
func (g *Gossip) maybeAddResolver(addr util.UnresolvedAddr) bool {
if _, ok := g.resolverAddrs[addr]; ok {
return false
}
ctx := g.AnnotateCtx(context.TODO())
r, err := resolver.NewResolverFromUnresolvedAddr(addr)
if err != nil {
log.Warningf(ctx, "bad address %s: %s", addr, err)
return false
}
g.resolvers = append(g.resolvers, r)
g.resolverAddrs[addr] = r
log.Eventf(ctx, "add resolver %s", r)
return true
}
示例14: snapshotWithContext
// snapshotWithContext is the main implementation for Snapshot() but it takes
// a context to allow tracing. If this method returns without error, callers
// must eventually call CloseOutSnap to ready this replica for more snapshots.
// r.mu must be held.
func (r *Replica) snapshotWithContext(
ctx context.Context, snapType string,
) (*OutgoingSnapshot, error) {
r.mu.AssertHeld()
rangeID := r.RangeID
if r.exceedsDoubleSplitSizeLocked() {
maxBytes := r.mu.maxBytes
size := r.mu.state.Stats.Total()
log.Infof(ctx,
"not generating %s snapshot because replica is too large: %d > 2 * %d",
snapType, size, maxBytes)
return &OutgoingSnapshot{}, raft.ErrSnapshotTemporarilyUnavailable
}
// See if there is already a snapshot running for this store.
select {
case <-r.mu.outSnapDone:
default:
log.Event(ctx, "snapshot already running")
return nil, raft.ErrSnapshotTemporarilyUnavailable
}
if !r.store.AcquireRaftSnapshot() {
log.Event(ctx, "snapshot already running")
return nil, raft.ErrSnapshotTemporarilyUnavailable
}
startKey := r.mu.state.Desc.StartKey
ctx, sp := r.AnnotateCtxWithSpan(ctx, "snapshot")
defer sp.Finish()
snap := r.store.NewSnapshot()
log.Eventf(ctx, "new engine snapshot for replica %s", r)
// Delegate to a static function to make sure that we do not depend
// on any indirect calls to r.store.Engine() (or other in-memory
// state of the Replica). Everything must come from the snapshot.
snapData, err := snapshot(ctx, snapType, snap, rangeID, r.store.raftEntryCache, startKey)
if err != nil {
log.Errorf(ctx, "error generating snapshot: %s", err)
return nil, err
}
log.Event(ctx, "snapshot generated")
r.store.metrics.RangeSnapshotsGenerated.Inc(1)
r.mu.outSnap = snapData
r.mu.outSnapDone = make(chan struct{})
return &r.mu.outSnap, nil
}
示例15: EvictAndReplace
// EvictAndReplace instructs the EvictionToken to evict the RangeDescriptor it was
// created with from the rangeDescriptorCache. It also allows the user to provide
// new RangeDescriptors to insert into the cache, all atomically. When called without
// arguments, EvictAndReplace will behave the same as Evict.
func (et *EvictionToken) EvictAndReplace(
ctx context.Context, newDescs ...roachpb.RangeDescriptor,
) error {
var err error
et.doOnce.Do(func() {
et.doLocker.Lock()
defer et.doLocker.Unlock()
err = et.do()
if err == nil {
if len(newDescs) > 0 {
err = et.doReplace(newDescs...)
log.Eventf(ctx, "evicting cached range descriptor with %d replacements", len(newDescs))
} else {
log.Event(ctx, "evicting cached range descriptor")
}
}
})
return err
}