本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Infof函數的典型用法代碼示例。如果您正苦於以下問題:Golang Infof函數的具體用法?Golang Infof怎麽用?Golang Infof使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Infof函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: postFreeze
func postFreeze(
c cluster.Cluster, freeze bool, timeout time.Duration,
) (serverpb.ClusterFreezeResponse, error) {
httpClient := cluster.HTTPClient
httpClient.Timeout = timeout
var resp serverpb.ClusterFreezeResponse
log.Infof(context.Background(), "requesting: freeze=%t, timeout=%s", freeze, timeout)
cb := func(v proto.Message) {
oldNum := resp.RangesAffected
resp = *v.(*serverpb.ClusterFreezeResponse)
if oldNum > resp.RangesAffected {
resp.RangesAffected = oldNum
}
if (resp != serverpb.ClusterFreezeResponse{}) {
log.Infof(context.Background(), "%+v", &resp)
}
}
err := httputil.StreamJSON(
httpClient,
c.URL(0)+"/_admin/v1/cluster/freeze",
&serverpb.ClusterFreezeRequest{Freeze: freeze},
&serverpb.ClusterFreezeResponse{},
cb,
)
return resp, err
}
示例2: testGossipPeeringsInner
func testGossipPeeringsInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
deadline := timeutil.Now().Add(cfg.Duration)
waitTime := longWaitTime
if cfg.Duration < waitTime {
waitTime = shortWaitTime
}
for timeutil.Now().Before(deadline) {
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
// Restart the first node.
log.Infof(ctx, "restarting node 0")
if err := c.Restart(ctx, 0); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
// Restart another node (if there is one).
var pickedNode int
if num > 1 {
pickedNode = rand.Intn(num-1) + 1
}
log.Infof(ctx, "restarting node %d", pickedNode)
if err := c.Restart(ctx, pickedNode); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
}
}
示例3: deleteRow
// deleteRow adds to the batch the kv operations necessary to delete a table row
// with the given values.
func (rd *rowDeleter) deleteRow(ctx context.Context, b *client.Batch, values []parser.Datum) error {
if err := rd.fks.checkAll(values); err != nil {
return err
}
primaryIndexKey, secondaryIndexEntries, err := rd.helper.encodeIndexes(rd.fetchColIDtoRowIndex, values)
if err != nil {
return err
}
for _, secondaryIndexEntry := range secondaryIndexEntries {
if log.V(2) {
log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
}
b.Del(secondaryIndexEntry.Key)
}
// Delete the row.
rd.startKey = roachpb.Key(primaryIndexKey)
rd.endKey = roachpb.Key(encoding.EncodeNotNullDescending(primaryIndexKey))
if log.V(2) {
log.Infof(ctx, "DelRange %s - %s", rd.startKey, rd.endKey)
}
b.DelRange(&rd.startKey, &rd.endKey, false)
rd.startKey, rd.endKey = nil, nil
return nil
}
示例4: main
func main() {
flag.Parse()
c := localcluster.New(*numNodes)
defer c.Close()
log.SetExitFunc(func(code int) {
c.Close()
os.Exit(code)
})
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
a := newAllocSim(c)
go func() {
var exitStatus int
select {
case s := <-signalCh:
log.Infof(context.Background(), "signal received: %v", s)
exitStatus = 1
case <-time.After(*duration):
log.Infof(context.Background(), "finished run of: %s", *duration)
}
a.finalStatus()
c.Close()
os.Exit(exitStatus)
}()
c.Start("allocsim", *workers, flag.Args(), []string{})
c.UpdateZoneConfig(1, 1<<20)
a.run(*workers)
}
示例5: improve
// improve returns a candidate StoreDescriptor to rebalance a replica to. The
// strategy is to always converge on the mean range count. If that isn't
// possible, we don't return any candidate.
func (rcb rangeCountBalancer) improve(sl StoreList, excluded nodeIDSet) *roachpb.StoreDescriptor {
// Attempt to select a better candidate from the supplied list.
sl.stores = selectRandom(rcb.rand, allocatorRandomCount, sl, excluded)
candidate := rcb.selectBest(sl)
if candidate == nil {
if log.V(2) {
log.Infof(context.TODO(), "not rebalancing: no valid candidate targets: %s",
formatCandidates(nil, sl.stores))
}
return nil
}
// Adding a replica to the candidate must make its range count converge on the
// mean range count.
rebalanceConvergesOnMean := rebalanceToConvergesOnMean(sl, *candidate)
if !rebalanceConvergesOnMean {
if log.V(2) {
log.Infof(context.TODO(), "not rebalancing: %s wouldn't converge on the mean %.1f",
formatCandidates(candidate, sl.stores), sl.candidateCount.mean)
}
return nil
}
if log.V(2) {
log.Infof(context.TODO(), "rebalancing: mean=%.1f %s",
sl.candidateCount.mean, formatCandidates(candidate, sl.stores))
}
return candidate
}
示例6: waitAndProcess
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(
ctx context.Context, start time.Time, clock *hlc.Clock, stopper *stop.Stopper, repl *Replica,
) bool {
waitInterval := rs.paceInterval(start, timeutil.Now())
rs.waitTimer.Reset(waitInterval)
if log.V(6) {
log.Infof(ctx, "wait timer interval set to %s", waitInterval)
}
for {
select {
case <-rs.waitTimer.C:
if log.V(6) {
log.Infof(ctx, "wait timer fired")
}
rs.waitTimer.Read = true
if repl == nil {
return false
}
if log.V(2) {
log.Infof(ctx, "replica scanner processing %s", repl)
}
for _, q := range rs.queues {
q.MaybeAdd(repl, clock.Now())
}
return false
case repl := <-rs.removed:
rs.removeReplica(repl)
case <-stopper.ShouldStop():
return true
}
}
}
示例7: process
// process synchronously invokes admin split for each proposed split key.
func (sq *splitQueue) process(
ctx context.Context, now hlc.Timestamp, r *Replica, sysCfg config.SystemConfig,
) error {
// First handle case of splitting due to zone config maps.
desc := r.Desc()
splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)
if len(splitKeys) > 0 {
log.Infof(ctx, "splitting at keys %v", splitKeys)
for _, splitKey := range splitKeys {
if err := sq.db.AdminSplit(ctx, splitKey.AsRawKey()); err != nil {
return errors.Errorf("unable to split %s at key %q: %s", r, splitKey, err)
}
}
return nil
}
// Next handle case of splitting due to size.
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return err
}
size := r.GetMVCCStats().Total()
// FIXME: why is this implementation not the same as the one above?
if float64(size)/float64(zone.RangeMaxBytes) > 1 {
log.Infof(ctx, "splitting size=%d max=%d", size, zone.RangeMaxBytes)
if _, pErr := client.SendWrappedWith(ctx, r, roachpb.Header{
Timestamp: now,
}, &roachpb.AdminSplitRequest{
Span: roachpb.Span{Key: desc.StartKey.AsRawKey()},
}); pErr != nil {
return pErr.GoError()
}
}
return nil
}
示例8: createDescriptorWithID
func (p *planner) createDescriptorWithID(
idKey roachpb.Key, id sqlbase.ID, descriptor sqlbase.DescriptorProto,
) error {
descriptor.SetID(id)
// TODO(pmattis): The error currently returned below is likely going to be
// difficult to interpret.
//
// TODO(pmattis): Need to handle if-not-exists here as well.
//
// TODO(pmattis): This is writing the namespace and descriptor table entries,
// but not going through the normal INSERT logic and not performing a precise
// mimicry. In particular, we're only writing a single key per table, while
// perfect mimicry would involve writing a sentinel key for each row as well.
descKey := sqlbase.MakeDescMetadataKey(descriptor.GetID())
b := &client.Batch{}
descID := descriptor.GetID()
descDesc := sqlbase.WrapDescriptor(descriptor)
if log.V(2) {
log.Infof(p.ctx(), "CPut %s -> %d", idKey, descID)
log.Infof(p.ctx(), "CPut %s -> %s", descKey, descDesc)
}
b.CPut(idKey, descID, nil)
b.CPut(descKey, descDesc, nil)
p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error {
if err := expectDescriptorID(systemConfig, idKey, descID); err != nil {
return err
}
return expectDescriptor(systemConfig, descKey, descDesc)
})
return p.txn.Run(b)
}
示例9: flush
// flush sends the rows accumulated so far in a StreamMessage.
func (m *outbox) flush(last bool, err error) error {
if !last && m.numRows == 0 {
return nil
}
msg := m.encoder.FormMessage(last, err)
if log.V(3) {
log.Infof(m.flowCtx.Context, "flushing outbox")
}
var sendErr error
if m.stream != nil {
sendErr = m.stream.Send(msg)
} else {
sendErr = m.syncFlowStream.Send(msg)
}
if sendErr != nil {
if log.V(1) {
log.Errorf(m.flowCtx.Context, "outbox flush error: %s", sendErr)
}
} else if log.V(3) {
log.Infof(m.flowCtx.Context, "outbox flushed")
}
if sendErr != nil {
return sendErr
}
m.numRows = 0
return nil
}
示例10: pullImage
func pullImage(
ctx context.Context, l *LocalCluster, ref string, options types.ImagePullOptions,
) error {
// HACK: on CircleCI, docker pulls the image on the first access from an
// acceptance test even though that image is already present. So we first
// check to see if our image is present in order to avoid this slowness.
if hasImage(ctx, l, ref) {
log.Infof(ctx, "ImagePull %s already exists", ref)
return nil
}
log.Infof(ctx, "ImagePull %s starting", ref)
defer log.Infof(ctx, "ImagePull %s complete", ref)
rc, err := l.client.ImagePull(ctx, ref, options)
if err != nil {
return err
}
defer rc.Close()
out := os.Stderr
outFd := out.Fd()
isTerminal := isatty.IsTerminal(outFd)
return jsonmessage.DisplayJSONMessagesStream(rc, out, outFd, isTerminal, nil)
}
示例11: initNodeID
// initNodeID updates the internal NodeDescriptor with the given ID. If zero is
// supplied, a new NodeID is allocated with the first invocation. For all other
// values, the supplied ID is stored into the descriptor (unless one has been
// set previously, in which case a fatal error occurs).
//
// Upon setting a new NodeID, the descriptor is gossiped and the NodeID is
// stored into the gossip instance.
func (n *Node) initNodeID(id roachpb.NodeID) {
ctx := n.AnnotateCtx(context.TODO())
if id < 0 {
log.Fatalf(ctx, "NodeID must not be negative")
}
if o := n.Descriptor.NodeID; o > 0 {
if id == 0 {
return
}
log.Fatalf(ctx, "cannot initialize NodeID to %d, already have %d", id, o)
}
var err error
if id == 0 {
ctxWithSpan, span := n.AnnotateCtxWithSpan(ctx, "alloc-node-id")
id, err = allocateNodeID(ctxWithSpan, n.storeCfg.DB)
if err != nil {
log.Fatal(ctxWithSpan, err)
}
log.Infof(ctxWithSpan, "new node allocated ID %d", id)
if id == 0 {
log.Fatal(ctxWithSpan, "new node allocated illegal ID 0")
}
span.Finish()
n.storeCfg.Gossip.NodeID.Set(ctx, id)
} else {
log.Infof(ctx, "node ID %d initialized", id)
}
// Gossip the node descriptor to make this node addressable by node ID.
n.Descriptor.NodeID = id
if err = n.storeCfg.Gossip.SetNodeDescriptor(&n.Descriptor); err != nil {
log.Fatalf(ctx, "couldn't gossip descriptor for node %d: %s", n.Descriptor.NodeID, err)
}
}
示例12: runHistoryWithRetry
// runHistoryWithRetry intercepts retry errors. If one is encountered,
// alternate histories are generated which all contain the exact
// history prefix which encountered the error, but which recombine the
// remaining commands with all of the commands from the retrying
// history.
//
// This process continues recursively if there are further retries.
func (hv *historyVerifier) runHistoryWithRetry(
priorities []int32, isolations []enginepb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T,
) error {
if err := hv.runHistory(priorities, isolations, cmds, db, t); err != nil {
if log.V(1) {
log.Infof(context.Background(), "got an error running history %s: %s", historyString(cmds), err)
}
retry, ok := err.(*retryError)
if !ok {
return err
}
if _, hasRetried := hv.retriedTxns[retry.txnIdx]; hasRetried {
if log.V(1) {
log.Infof(context.Background(), "retried txn %d twice; skipping history", retry.txnIdx+1)
}
return nil
}
hv.retriedTxns[retry.txnIdx] = struct{}{}
// Randomly subsample 5% of histories for reduced execution time.
enumHis := sampleHistories(enumerateHistoriesAfterRetry(retry, cmds), 0.05)
for i, h := range enumHis {
if log.V(1) {
log.Infof(context.Background(), "after retry, running alternate history %d of %d", i, len(enumHis))
}
if err := hv.runHistoryWithRetry(priorities, isolations, h, db, t); err != nil {
return err
}
}
}
return nil
}
示例13: AddMetricStruct
// AddMetricStruct examines all fields of metricStruct and adds
// all Iterable or metricGroup objects to the registry.
func (r *Registry) AddMetricStruct(metricStruct interface{}) {
v := reflect.ValueOf(metricStruct)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
vfield, tfield := v.Field(i), t.Field(i)
if !vfield.CanInterface() {
if log.V(2) {
log.Infof(context.TODO(), "Skipping unexported field %s", tfield.Name)
}
continue
}
val := vfield.Interface()
switch typ := val.(type) {
case Iterable:
r.AddMetric(typ)
case Struct:
r.AddMetricStruct(typ)
default:
if log.V(2) {
log.Infof(context.TODO(), "Skipping non-metric field %s", tfield.Name)
}
}
}
}
示例14: sendGossip
// sendGossip sends the latest gossip to the remote server, based on
// the remote server's notion of other nodes' high water timestamps.
func (c *client) sendGossip(g *Gossip, stream Gossip_GossipClient) error {
g.mu.Lock()
if delta := g.mu.is.delta(c.remoteHighWaterStamps); len(delta) > 0 {
args := Request{
NodeID: g.NodeID.Get(),
Addr: g.mu.is.NodeAddr,
Delta: delta,
HighWaterStamps: g.mu.is.getHighWaterStamps(),
}
bytesSent := int64(args.Size())
infosSent := int64(len(delta))
c.clientMetrics.BytesSent.Inc(bytesSent)
c.clientMetrics.InfosSent.Inc(infosSent)
c.nodeMetrics.BytesSent.Inc(bytesSent)
c.nodeMetrics.InfosSent.Inc(infosSent)
if log.V(1) {
ctx := c.AnnotateCtx(stream.Context())
if c.peerID != 0 {
log.Infof(ctx, "sending %s to node %d (%s)", extractKeys(args.Delta), c.peerID, c.addr)
} else {
log.Infof(ctx, "sending %s to %s", extractKeys(args.Delta), c.addr)
}
}
g.mu.Unlock()
return stream.Send(&args)
}
g.mu.Unlock()
return nil
}
示例15: wrap
// wrap the supplied planNode with the sortNode if sorting is required.
// The first returned value is "true" if the sort node can be squashed
// in the selectTopNode (sorting unneeded).
func (n *sortNode) wrap(plan planNode) (bool, planNode) {
if n != nil {
// Check to see if the requested ordering is compatible with the existing
// ordering.
existingOrdering := plan.Ordering()
if log.V(2) {
log.Infof(n.ctx, "Sort: existing=%+v desired=%+v", existingOrdering, n.ordering)
}
match := computeOrderingMatch(n.ordering, existingOrdering, false)
if match < len(n.ordering) {
n.plan = plan
n.needSort = true
return false, n
}
if len(n.columns) < len(plan.Columns()) {
// No sorting required, but we have to strip off the extra render
// expressions we added.
n.plan = plan
return false, n
}
if log.V(2) {
log.Infof(n.ctx, "Sort: no sorting required")
}
}
return true, plan
}