本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Warningf函數的典型用法代碼示例。如果您正苦於以下問題:Golang Warningf函數的具體用法?Golang Warningf怎麽用?Golang Warningf使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Warningf函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: cutNetwork
func cutNetwork(t *testing.T, c cluster.Cluster, closer <-chan struct{}, partitions ...[]int) {
defer func() {
if errs := restoreNetwork(t, c); len(errs) > 0 {
t.Fatalf("errors restoring the network: %+v", errs)
}
}()
addrs, addrsToNode := mustGetHosts(t, c)
ipPartitions := make([][]iptables.IP, 0, len(partitions))
for _, partition := range partitions {
ipPartition := make([]iptables.IP, 0, len(partition))
for _, nodeIndex := range partition {
ipPartition = append(ipPartition, addrs[nodeIndex])
}
ipPartitions = append(ipPartitions, ipPartition)
}
log.Warningf(context.TODO(), "partitioning: %v (%v)", partitions, ipPartitions)
for host, cmds := range iptables.Rules(iptables.Bidirectional(ipPartitions...)) {
for _, cmd := range cmds {
if err := c.ExecRoot(addrsToNode[host], cmd); err != nil {
t.Fatal(err)
}
}
}
<-closer
log.Warningf(context.TODO(), "resolved all partitions")
}
示例2: maybeSignalStatusChangeLocked
// maybeSignalStatusChangeLocked checks whether gossip should transition its
// internal state from connected to stalled or vice versa.
func (g *Gossip) maybeSignalStatusChangeLocked() {
ctx := g.AnnotateCtx(context.TODO())
orphaned := g.outgoing.len()+g.mu.incoming.len() == 0
stalled := orphaned || g.mu.is.getInfo(KeySentinel) == nil
if stalled {
// We employ the stalled boolean to avoid filling logs with warnings.
if !g.stalled {
log.Eventf(ctx, "now stalled")
if orphaned {
if len(g.resolvers) == 0 {
log.Warningf(ctx, "no resolvers found; use --join to specify a connected node")
} else {
log.Warningf(ctx, "no incoming or outgoing connections")
}
} else if len(g.resolversTried) == len(g.resolvers) {
log.Warningf(ctx, "first range unavailable; resolvers exhausted")
} else {
log.Warningf(ctx, "first range unavailable; trying remaining resolvers")
}
}
if len(g.resolvers) > 0 {
g.signalStalledLocked()
}
} else {
if g.stalled {
log.Eventf(ctx, "connected")
log.Infof(ctx, "node has connected to cluster via gossip")
g.signalConnectedLocked()
}
g.maybeCleanupBootstrapAddressesLocked()
}
g.stalled = stalled
}
示例3: migrate7310And6991
// MIGRATION(tschottdorf): As of #7310, we make sure that a Replica always has
// a complete Raft state on disk. Prior versions may not have that, which
// causes issues due to the fact that we used to synthesize a TruncatedState
// and do so no more. To make up for that, write a missing TruncatedState here.
// That key is in the replicated state, but since during a cluster upgrade, all
// nodes do it, it's fine (and we never CPut on that key, so anything in the
// Raft pipeline will simply overwrite it).
//
// Migration(tschottdorf): See #6991. It's possible that the HardState is
// missing after a snapshot was applied (so there is a TruncatedState). In this
// case, synthesize a HardState (simply setting everything that was in the
// snapshot to committed). Having lost the original HardState can theoretically
// mean that the replica was further ahead or had voted, and so there's no
// guarantee that this will be correct. But it will be correct in the majority
// of cases, and some state *has* to be recovered.
func migrate7310And6991(
ctx context.Context, batch engine.ReadWriter, desc roachpb.RangeDescriptor,
) error {
state, err := loadState(ctx, batch, &desc)
if err != nil {
return errors.Wrap(err, "could not migrate TruncatedState: %s")
}
if (*state.TruncatedState == roachpb.RaftTruncatedState{}) {
state.TruncatedState.Term = raftInitialLogTerm
state.TruncatedState.Index = raftInitialLogIndex
state.RaftAppliedIndex = raftInitialLogIndex
if _, err := saveState(ctx, batch, state); err != nil {
return errors.Wrapf(err, "could not migrate TruncatedState to %+v", &state.TruncatedState)
}
log.Warningf(ctx, "migration: synthesized TruncatedState for %+v", desc)
}
hs, err := loadHardState(ctx, batch, desc.RangeID)
if err != nil {
return errors.Wrap(err, "unable to load HardState")
}
// Only update the HardState when there is a nontrivial Commit field. We
// don't have a snapshot here, so we could wind up lowering the commit
// index (which would error out and fatal us).
if hs.Commit == 0 {
log.Warningf(ctx, "migration: synthesized HardState for %+v", desc)
if err := synthesizeHardState(ctx, batch, state, hs); err != nil {
return errors.Wrap(err, "could not migrate HardState")
}
}
return nil
}
示例4: removeLease
// t.mu needs to be locked.
func (t *tableState) removeLease(lease *LeaseState, store LeaseStore) {
t.active.remove(lease)
t.tableNameCache.remove(lease)
// Release to the store asynchronously, without the tableState lock.
err := t.stopper.RunAsyncTask(context.TODO(), func(ctx context.Context) {
if err := store.Release(lease); err != nil {
log.Warningf(ctx, "error releasing lease %q: %s", lease, err)
}
})
if log.V(1) && err != nil {
log.Warningf(context.TODO(), "error removing lease from store: %s", err)
}
}
示例5: Heartbeat
// Heartbeat is called to update a node's expiration timestamp. This
// method does a conditional put on the node liveness record, and if
// successful, stores the updated liveness record in the nodes map.
func (nl *NodeLiveness) Heartbeat(ctx context.Context, liveness *Liveness) error {
defer func(start time.Time) {
if dur := timeutil.Now().Sub(start); dur > time.Second {
log.Warningf(ctx, "slow heartbeat took %0.1fs", dur.Seconds())
}
}(timeutil.Now())
// Allow only one heartbeat at a time.
select {
case nl.heartbeatSem <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
}
defer func() {
<-nl.heartbeatSem
}()
nodeID := nl.gossip.NodeID.Get()
var newLiveness Liveness
if liveness == nil {
newLiveness = Liveness{
NodeID: nodeID,
Epoch: 1,
}
} else {
newLiveness = *liveness
}
// We need to add the maximum clock offset to the expiration because it's
// used when determining liveness for a node.
newLiveness.Expiration = nl.clock.Now().Add(
(nl.livenessThreshold + nl.clock.MaxOffset()).Nanoseconds(), 0)
if err := nl.updateLiveness(ctx, &newLiveness, liveness, func(actual Liveness) error {
// Update liveness to actual value on mismatch.
nl.mu.Lock()
nl.mu.self = actual
nl.mu.Unlock()
// If the actual liveness is different than expected, but is
// considered live, treat the heartbeat as a success. This can
// happen when the periodic heartbeater races with a concurrent
// lease acquisition.
if actual.isLive(nl.clock.Now(), nl.clock.MaxOffset()) {
return errNodeAlreadyLive
}
// Otherwise, return error.
return errSkippedHeartbeat
}); err != nil {
if err == errNodeAlreadyLive {
return nil
}
nl.metrics.HeartbeatFailures.Inc(1)
return err
}
log.VEventf(ctx, 1, "heartbeat %+v", newLiveness.Expiration)
nl.mu.Lock()
nl.mu.self = newLiveness
nl.mu.Unlock()
nl.metrics.HeartbeatSuccesses.Inc(1)
return nil
}
示例6: recordJoinEvent
// recordJoinEvent begins an asynchronous task which attempts to log a "node
// join" or "node restart" event. This query will retry until it succeeds or the
// server stops.
func (n *Node) recordJoinEvent() {
if !n.storeCfg.LogRangeEvents {
return
}
logEventType := sql.EventLogNodeRestart
if n.initialBoot {
logEventType = sql.EventLogNodeJoin
}
n.stopper.RunWorker(func() {
ctx, span := n.AnnotateCtxWithSpan(context.Background(), "record-join-event")
defer span.Finish()
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = n.stopper.ShouldStop()
for r := retry.Start(retryOpts); r.Next(); {
if err := n.storeCfg.DB.Txn(ctx, func(txn *client.Txn) error {
return n.eventLogger.InsertEventRecord(txn,
logEventType,
int32(n.Descriptor.NodeID),
int32(n.Descriptor.NodeID),
struct {
Descriptor roachpb.NodeDescriptor
ClusterID uuid.UUID
StartedAt int64
}{n.Descriptor, n.ClusterID, n.startedAt},
)
}); err != nil {
log.Warningf(ctx, "%s: unable to log %s event: %s", n, logEventType, err)
} else {
return
}
}
})
}
示例7: updateRangeInfo
// updateRangeInfo is called whenever a range is updated by ApplySnapshot
// or is created by range splitting to setup the fields which are
// uninitialized or need updating.
func (r *Replica) updateRangeInfo(desc *roachpb.RangeDescriptor) error {
// RangeMaxBytes should be updated by looking up Zone Config in two cases:
// 1. After applying a snapshot, if the zone config was not updated for
// this key range, then maxBytes of this range will not be updated either.
// 2. After a new range is created by a split, only copying maxBytes from
// the original range wont work as the original and new ranges might belong
// to different zones.
// Load the system config.
cfg, ok := r.store.Gossip().GetSystemConfig()
if !ok {
// This could be before the system config was ever gossiped,
// or it expired. Let the gossip callback set the info.
ctx := r.AnnotateCtx(context.TODO())
log.Warningf(ctx, "no system config available, cannot determine range MaxBytes")
return nil
}
// Find zone config for this range.
zone, err := cfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return errors.Errorf("%s: failed to lookup zone config: %s", r, err)
}
r.SetMaxBytes(zone.RangeMaxBytes)
return nil
}
示例8: handleResponse
// handleResponse handles errors, remote forwarding, and combines delta
// gossip infos from the remote server with this node's infostore.
func (c *client) handleResponse(ctx context.Context, g *Gossip, reply *Response) error {
g.mu.Lock()
defer g.mu.Unlock()
bytesReceived := int64(reply.Size())
infosReceived := int64(len(reply.Delta))
c.clientMetrics.BytesReceived.Inc(bytesReceived)
c.clientMetrics.InfosReceived.Inc(infosReceived)
c.nodeMetrics.BytesReceived.Inc(bytesReceived)
c.nodeMetrics.InfosReceived.Inc(infosReceived)
// Combine remote node's infostore delta with ours.
if reply.Delta != nil {
freshCount, err := g.mu.is.combine(reply.Delta, reply.NodeID)
if err != nil {
log.Warningf(ctx, "failed to fully combine delta from node %d: %s", reply.NodeID, err)
}
if infoCount := len(reply.Delta); infoCount > 0 {
if log.V(1) {
log.Infof(ctx, "received %s from node %d (%d fresh)", extractKeys(reply.Delta), reply.NodeID, freshCount)
}
}
g.maybeTightenLocked()
}
c.peerID = reply.NodeID
g.outgoing.addNode(c.peerID)
c.remoteHighWaterStamps = reply.HighWaterStamps
// Handle remote forwarding.
if reply.AlternateAddr != nil {
if g.hasIncomingLocked(reply.AlternateNodeID) || g.hasOutgoingLocked(reply.AlternateNodeID) {
return errors.Errorf("received forward from node %d to %d (%s); already have active connection, skipping",
reply.NodeID, reply.AlternateNodeID, reply.AlternateAddr)
}
// We try to resolve the address, but don't actually use the result.
// The certificates (if any) may only be valid for the unresolved
// address.
if _, err := reply.AlternateAddr.Resolve(); err != nil {
return errors.Errorf("unable to resolve alternate address %s for node %d: %s", reply.AlternateAddr, reply.AlternateNodeID, err)
}
c.forwardAddr = reply.AlternateAddr
return errors.Errorf("received forward from node %d to %d (%s)", reply.NodeID, reply.AlternateNodeID, reply.AlternateAddr)
}
// Check whether we're connected at this point.
g.signalConnectedLocked()
// Check whether this outgoing client is duplicating work already
// being done by an incoming client, either because an outgoing
// matches an incoming or the client is connecting to itself.
if nodeID := g.NodeID.Get(); nodeID == c.peerID {
return errors.Errorf("stopping outgoing client to node %d (%s); loopback connection", c.peerID, c.addr)
} else if g.hasIncomingLocked(c.peerID) && nodeID > c.peerID {
// To avoid mutual shutdown, we only shutdown our client if our
// node ID is higher than the peer's.
return errors.Errorf("stopping outgoing client to node %d (%s); already have incoming", c.peerID, c.addr)
}
return nil
}
示例9: parseOptions
func parseOptions(data []byte) (sql.SessionArgs, error) {
args := sql.SessionArgs{}
buf := readBuffer{msg: data}
for {
key, err := buf.getString()
if err != nil {
return sql.SessionArgs{}, errors.Errorf("error reading option key: %s", err)
}
if len(key) == 0 {
break
}
value, err := buf.getString()
if err != nil {
return sql.SessionArgs{}, errors.Errorf("error reading option value: %s", err)
}
switch key {
case "database":
args.Database = value
case "user":
args.User = value
default:
if log.V(1) {
log.Warningf(context.TODO(), "unrecognized configuration parameter %q", key)
}
}
}
return args, nil
}
示例10: pushTxn
// pushTxn attempts to abort the txn via push. The wait group is signaled on
// completion.
func pushTxn(
ctx context.Context,
db *client.DB,
now hlc.Timestamp,
txn *roachpb.Transaction,
typ roachpb.PushTxnType,
) {
// Attempt to push the transaction which created the intent.
pushArgs := &roachpb.PushTxnRequest{
Span: roachpb.Span{
Key: txn.Key,
},
Now: now,
PusherTxn: roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Priority: math.MaxInt32}},
PusheeTxn: txn.TxnMeta,
PushType: typ,
}
b := &client.Batch{}
b.AddRawRequest(pushArgs)
if err := db.Run(ctx, b); err != nil {
log.Warningf(ctx, "push of txn %s failed: %s", txn, err)
return
}
br := b.RawResponse()
// Update the supplied txn on successful push.
*txn = br.Responses[0].GetInner().(*roachpb.PushTxnResponse).PusheeTxn
}
示例11: deleteIndexMutationsWithReversedColumns
// deleteIndexMutationsWithReversedColumns deletes index mutations with a
// different mutationID than the schema changer and a reference to one of the
// reversed columns.
func (sc *SchemaChanger) deleteIndexMutationsWithReversedColumns(
desc *sqlbase.TableDescriptor, columns map[string]struct{},
) {
newMutations := make([]sqlbase.DescriptorMutation, 0, len(desc.Mutations))
for _, mutation := range desc.Mutations {
if mutation.MutationID != sc.mutationID {
if idx := mutation.GetIndex(); idx != nil {
deleteMutation := false
for _, name := range idx.ColumnNames {
if _, ok := columns[name]; ok {
// Such an index mutation has to be with direction ADD and
// in the DELETE_ONLY state. Live indexes referencing live
// columns cannot be deleted and thus never have direction
// DROP. All mutations with the ADD direction start off in
// the DELETE_ONLY state.
if mutation.Direction != sqlbase.DescriptorMutation_ADD ||
mutation.State != sqlbase.DescriptorMutation_DELETE_ONLY {
panic(fmt.Sprintf("mutation in bad state: %+v", mutation))
}
log.Warningf(context.TODO(), "delete schema change mutation: %+v", mutation)
deleteMutation = true
break
}
}
if deleteMutation {
continue
}
}
}
newMutations = append(newMutations, mutation)
}
// Reset mutations.
desc.Mutations = newMutations
}
示例12: removeLeaseIfExpiring
// removeLeaseIfExpiring removes a lease and returns true if it is about to expire.
// The method also resets the transaction deadline.
func (p *planner) removeLeaseIfExpiring(lease *LeaseState) bool {
if lease == nil || lease.hasSomeLifeLeft(p.leaseMgr.clock) {
return false
}
// Remove the lease from p.leases.
idx := -1
for i, l := range p.leases {
if l == lease {
idx = i
break
}
}
if idx == -1 {
log.Warningf(p.ctx(), "lease (%s) not found", lease)
return false
}
p.leases[idx] = p.leases[len(p.leases)-1]
p.leases[len(p.leases)-1] = nil
p.leases = p.leases[:len(p.leases)-1]
if err := p.leaseMgr.Release(lease); err != nil {
log.Warning(p.ctx(), err)
}
// Reset the deadline so that a new deadline will be set after the lease is acquired.
p.txn.ResetDeadline()
for _, l := range p.leases {
p.txn.UpdateDeadlineMaybe(hlc.Timestamp{WallTime: l.Expiration().UnixNano()})
}
return true
}
示例13: RaftSnapshot
// RaftSnapshot handles incoming streaming snapshot requests.
func (t *RaftTransport) RaftSnapshot(stream MultiRaft_RaftSnapshotServer) error {
errCh := make(chan error, 1)
if err := t.rpcContext.Stopper.RunAsyncTask(stream.Context(), func(ctx context.Context) {
errCh <- func() error {
req, err := stream.Recv()
if err != nil {
return err
}
if req.Header == nil {
return stream.Send(&SnapshotResponse{
Status: SnapshotResponse_ERROR,
Message: "client error: no header in first snapshot request message"})
}
rmr := req.Header.RaftMessageRequest
t.recvMu.Lock()
handler, ok := t.recvMu.handlers[rmr.ToReplica.StoreID]
t.recvMu.Unlock()
if !ok {
log.Warningf(ctx, "unable to accept Raft message from %+v: no handler registered for %+v",
rmr.FromReplica, rmr.ToReplica)
return roachpb.NewStoreNotFoundError(rmr.ToReplica.StoreID)
}
return handler.HandleSnapshot(req.Header, stream)
}()
}); err != nil {
return err
}
select {
case <-t.rpcContext.Stopper.ShouldStop():
return nil
case err := <-errCh:
return err
}
}
示例14: reportUsage
func (s *Server) reportUsage(ctx context.Context) {
b := new(bytes.Buffer)
if err := json.NewEncoder(b).Encode(s.getReportingInfo()); err != nil {
log.Warning(ctx, err)
return
}
q := reportingURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
reportingURL.RawQuery = q.Encode()
res, err := http.Post(reportingURL.String(), "application/json", b)
if err != nil && log.V(2) {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
log.Warning(ctx, "Failed to report node usage metrics: ", err)
return
}
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(ctx, "Failed to report node usage metrics: status: %s, body: %s, "+
"error: %v", res.Status, b, err)
}
}
示例15: processIntentsAsync
// processIntentsAsync asynchronously processes intents which were
// encountered during another command but did not interfere with the
// execution of that command. This occurs in two cases: inconsistent
// reads and EndTransaction (which queues its own external intents for
// processing via this method). The two cases are handled somewhat
// differently and would be better served by different entry points,
// but combining them simplifies the plumbing necessary in Replica.
func (ir *intentResolver) processIntentsAsync(r *Replica, intents []intentsWithArg) {
if r.store.TestingKnobs().DisableAsyncIntentResolution {
return
}
now := r.store.Clock().Now()
ctx := context.TODO()
stopper := r.store.Stopper()
for _, item := range intents {
err := stopper.RunLimitedAsyncTask(
ctx, ir.sem, false /* wait */, func(ctx context.Context) {
ir.processIntents(ctx, r, item, now)
})
if err != nil {
if err == stop.ErrThrottled {
// A limited task was not available. Rather than waiting for one, we
// reuse the current goroutine.
ir.processIntents(ctx, r, item, now)
} else {
log.Warningf(ctx, "failed to resolve intents: %s", err)
return
}
}
}
}