本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/log.Infof函数的典型用法代码示例。如果您正苦于以下问题:Golang Infof函数的具体用法?Golang Infof怎么用?Golang Infof使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Infof函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: connectGossip
// connectGossip connects to gossip network and reads cluster ID. If
// this node is already part of a cluster, the cluster ID is verified
// for a match. If not part of a cluster, the cluster ID is set. The
// node's address is gossipped with node ID as the gossip key.
func (n *Node) connectGossip() {
log.Infof("connecting to gossip network to verify cluster ID...")
<-n.gossip.Connected
val, err := n.gossip.GetInfo(gossip.KeyClusterID)
if err != nil || val == nil {
log.Fatalf("unable to ascertain cluster ID from gossip network: %v", err)
}
gossipClusterID := val.(string)
if n.ClusterID == "" {
n.ClusterID = gossipClusterID
} else if n.ClusterID != gossipClusterID {
log.Fatalf("node %d belongs to cluster %q but is attempting to connect to a gossip network for cluster %q",
n.Descriptor.NodeID, n.ClusterID, gossipClusterID)
}
log.Infof("node connected via gossip and verified as part of cluster %q", gossipClusterID)
// Gossip node address keyed by node ID.
if n.Descriptor.NodeID != 0 {
nodeIDKey := gossip.MakeNodeIDGossipKey(n.Descriptor.NodeID)
if err := n.gossip.AddInfo(nodeIDKey, n.Descriptor.Address, ttlNodeIDGossip); err != nil {
log.Errorf("couldn't gossip address for node %d: %v", n.Descriptor.NodeID, err)
}
}
}
示例2: process
// process synchronously invokes admin split for each proposed split key.
func (sq *splitQueue) process(now roachpb.Timestamp, rng *Replica,
sysCfg *config.SystemConfig) error {
// First handle case of splitting due to zone config maps.
desc := rng.Desc()
splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)
if len(splitKeys) > 0 {
log.Infof("splitting %s at keys %v", rng, splitKeys)
for _, splitKey := range splitKeys {
if err := sq.db.AdminSplit(splitKey.AsRawKey()); err != nil {
return util.Errorf("unable to split %s at key %q: %s", rng, splitKey, err)
}
}
return nil
}
// Next handle case of splitting due to size.
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return err
}
// FIXME: why is this implementation not the same as the one above?
if float64(rng.stats.GetSize())/float64(zone.RangeMaxBytes) > 1 {
log.Infof("splitting %s size=%d max=%d", rng, rng.stats.GetSize(), zone.RangeMaxBytes)
if _, pErr := client.SendWrapped(rng, rng.context(), &roachpb.AdminSplitRequest{
Span: roachpb.Span{Key: desc.StartKey.AsRawKey()},
}); pErr != nil {
return pErr.GoError()
}
}
return nil
}
示例3: wrap
// wrap the supplied planNode with the sortNode if sorting is required.
func (n *sortNode) wrap(plan planNode) planNode {
if n != nil {
// Check to see if the requested ordering is compatible with the existing
// ordering.
existingOrdering := plan.Ordering()
if log.V(2) {
log.Infof("Sort: existing=%d desired=%d", existingOrdering, n.ordering)
}
match := computeOrderingMatch(n.ordering, existingOrdering, false)
if match < len(n.ordering) {
n.plan = plan
n.needSort = true
return n
}
if len(n.columns) < len(plan.Columns()) {
// No sorting required, but we have to strip off the extra render
// expressions we added.
n.plan = plan
return n
}
}
if log.V(2) {
log.Infof("Sort: no sorting required")
}
return plan
}
示例4: start
// start dials the remote addr and commences gossip once connected. Upon exit,
// the client is sent on the disconnected channel. This method starts client
// processing in a goroutine and returns immediately.
func (c *client) start(g *Gossip, disconnected chan *client, ctx *rpc.Context, stopper *stop.Stopper) {
stopper.RunWorker(func() {
defer func() {
disconnected <- c
}()
// Note: avoid using `grpc.WithBlock` here. This code is already
// asynchronous from the caller's perspective, so the only effect of
// `WithBlock` here is blocking shutdown - at the time of this writing,
// that ends ups up making `kv` tests take twice as long.
conn, err := ctx.GRPCDial(c.addr.String())
if err != nil {
log.Errorf("failed to dial: %v", err)
return
}
// Start gossiping.
if err := c.gossip(g, NewGossipClient(conn), stopper); err != nil {
if !grpcutil.IsClosedConnection(err) {
g.mu.Lock()
peerID := c.peerID
g.mu.Unlock()
if peerID != 0 {
log.Infof("closing client to node %d (%s): %s", peerID, c.addr, err)
} else {
log.Infof("closing client to %s: %s", c.addr, err)
}
}
}
})
}
示例5: bootstrapStores
// bootstrapStores bootstraps uninitialized stores once the cluster
// and node IDs have been established for this node. Store IDs are
// allocated via a sequence id generator stored at a system key per
// node.
func (n *Node) bootstrapStores(bootstraps *list.List, stopper *stop.Stopper) {
log.Infof("bootstrapping %d store(s)", bootstraps.Len())
if n.ClusterID == "" {
panic("ClusterID missing during store bootstrap of auxiliary store")
}
// Bootstrap all waiting stores by allocating a new store id for
// each and invoking store.Bootstrap() to persist.
inc := int64(bootstraps.Len())
firstID, err := allocateStoreIDs(n.Descriptor.NodeID, inc, n.ctx.DB)
if err != nil {
log.Fatal(err)
}
sIdent := roachpb.StoreIdent{
ClusterID: n.ClusterID,
NodeID: n.Descriptor.NodeID,
StoreID: firstID,
}
for e := bootstraps.Front(); e != nil; e = e.Next() {
s := e.Value.(*storage.Store)
if err := s.Bootstrap(sIdent, stopper); err != nil {
log.Fatal(err)
}
if err := s.Start(stopper); err != nil {
log.Fatal(err)
}
n.stores.AddStore(s)
sIdent.StoreID++
log.Infof("bootstrapped store %s", s)
// Done regularly in Node.startGossip, but this cuts down the time
// until this store is used for range allocations.
s.GossipStore()
}
}
示例6: runExterminate
// runExterminate destroys the data held in the specified stores.
func runExterminate(cmd *cobra.Command, args []string) {
err := Context.Init("exterminate")
if err != nil {
log.Errorf("failed to initialize context: %s", err)
return
}
// First attempt to shutdown the server. Note that an error of EOF just
// means the HTTP server shutdown before the request to quit returned.
if err := server.SendQuit(Context); err != nil {
log.Infof("shutdown node %s: %s", Context.Addr, err)
} else {
log.Infof("shutdown node in anticipation of data extermination")
}
// Exterminate all data held in specified stores.
for _, e := range Context.Engines {
if rocksdb, ok := e.(*engine.RocksDB); ok {
log.Infof("exterminating data from store %s", e)
if err := rocksdb.Destroy(); err != nil {
log.Fatalf("unable to destroy store %s: %s", e, err)
}
}
}
log.Infof("exterminated all data from stores %s", Context.Engines)
}
示例7: waitAndProcess
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(start time.Time, clock *hlc.Clock, stopper *stop.Stopper,
repl *Replica) bool {
waitInterval := rs.paceInterval(start, timeutil.Now())
rs.waitTimer.Reset(waitInterval)
if log.V(6) {
log.Infof("Wait time interval set to %s", waitInterval)
}
for {
select {
case <-rs.waitTimer.C:
rs.waitTimer.Read = true
if repl == nil {
return false
}
return !stopper.RunTask(func() {
// Try adding replica to all queues.
for _, q := range rs.queues {
q.MaybeAdd(repl, clock.Now())
}
})
case repl := <-rs.removed:
// Remove replica from all queues as applicable.
for _, q := range rs.queues {
q.MaybeRemove(repl)
}
if log.V(6) {
log.Infof("removed replica %s", repl)
}
case <-stopper.ShouldStop():
return true
}
}
}
示例8: start
// start dials the remote addr and commences gossip once connected. Upon exit,
// the client is sent on the disconnected channel. This method starts client
// processing in a goroutine and returns immediately.
func (c *client) start(g *Gossip, disconnected chan *client, ctx *rpc.Context, stopper *stop.Stopper) {
stopper.RunWorker(func() {
defer func() {
disconnected <- c
}()
conn, err := ctx.GRPCDial(c.addr.String(), grpc.WithBlock())
if err != nil {
log.Errorf("failed to dial: %v", err)
return
}
// Start gossiping.
if err := c.gossip(g, NewGossipClient(conn), stopper); err != nil {
if !grpcutil.IsClosedConnection(err) {
g.mu.Lock()
peerID := c.peerID
g.mu.Unlock()
if peerID != 0 {
log.Infof("closing client to node %d (%s): %s", peerID, c.addr, err)
} else {
log.Infof("closing client to %s: %s", c.addr, err)
}
}
}
})
}
示例9: runExterminate
// runExterminate destroys the data held in the specified stores.
func runExterminate(cmd *cobra.Command, args []string) {
if err := context.InitStores(); err != nil {
log.Errorf("failed to initialize context: %s", err)
return
}
// First attempt to shutdown the server. Note that an error of EOF just
// means the HTTP server shutdown before the request to quit returned.
admin := client.NewAdminClient(&context.Context, context.Addr, client.Quit)
body, err := admin.Get()
if err != nil {
log.Infof("shutdown node %s: %s", context.Addr, err)
} else {
log.Infof("shutdown node in anticipation of data extermination: %s", body)
}
// Exterminate all data held in specified stores.
for _, e := range context.Engines {
if rocksdb, ok := e.(*engine.RocksDB); ok {
log.Infof("exterminating data from store %s", e)
if err := rocksdb.Destroy(); err != nil {
log.Errorf("unable to destroy store %s: %s", e, err)
osExit(1)
}
}
}
log.Infof("exterminated all data from stores %s", context.Engines)
}
示例10: createHealthCheck
// createHealthCheck creates the cockroach health check if it does not exist.
// Returns its resource link.
func (g *Google) createHealthCheck() (string, error) {
if check, err := g.getHealthCheck(); err == nil {
log.Infof("found HealthCheck %s: %s", healthCheckName, check.SelfLink)
return check.SelfLink, nil
}
op, err := g.computeService.HttpHealthChecks.Insert(g.project,
&compute.HttpHealthCheck{
Name: healthCheckName,
Port: g.context.Port,
RequestPath: healthCheckPath,
CheckIntervalSec: 2,
TimeoutSec: 1,
HealthyThreshold: 2,
UnhealthyThreshold: 2,
}).Do()
if err != nil {
return "", err
}
if err = g.waitForOperation(op); err != nil {
return "", err
}
log.Infof("created HealthCheck %s: %s", healthCheckName, op.TargetLink)
return op.TargetLink, nil
}
示例11: createFirewallRule
// createFirewallRule creates the cockroach firewall if it does not exist.
// It returns its resource link.
func (g *Google) createFirewallRule() (string, error) {
if rule, err := g.getFirewallRule(); err == nil {
log.Infof("found FirewallRule %s: %s", firewallRuleName, rule.SelfLink)
return rule.SelfLink, nil
}
op, err := g.computeService.Firewalls.Insert(g.project,
&compute.Firewall{
Name: firewallRuleName,
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: cockroachProtocol,
Ports: []string{
fmt.Sprintf("%d", g.context.Port),
},
},
},
SourceRanges: []string{
allIPAddresses,
},
}).Do()
if err != nil {
return "", err
}
if err = g.waitForOperation(op); err != nil {
return "", err
}
log.Infof("created FirewallRule %s: %s", firewallRuleName, op.TargetLink)
return op.TargetLink, nil
}
示例12: get
// get performs an HTTPS GET to the specified path for a specific node.
func get(t *testing.T, base, rel string) []byte {
// TODO(bram) #2059: Remove retry logic.
url := fmt.Sprintf("%s/%s", base, rel)
for r := retry.Start(retryOptions); r.Next(); {
resp, err := cluster.HTTPClient.Get(url)
if err != nil {
log.Infof("could not GET %s - %s", url, err)
continue
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Infof("could not read body for %s - %s", url, err)
continue
}
if resp.StatusCode != http.StatusOK {
log.Infof("could not GET %s - statuscode: %d - body: %s", url, resp.StatusCode, body)
continue
}
if log.V(1) {
log.Infof("OK response from %s", url)
}
return body
}
t.Fatalf("There was an error retrieving %s", url)
return []byte("")
}
示例13: initStores
// initStores initializes the Stores map from ID to Store. Stores are
// added to the local sender if already bootstrapped. A bootstrapped
// Store has a valid ident with cluster, node and Store IDs set. If
// the Store doesn't yet have a valid ident, it's added to the
// bootstraps list for initialization once the cluster and node IDs
// have been determined.
func (n *Node) initStores(engines []engine.Engine, stopper *stop.Stopper) error {
bootstraps := list.New()
if len(engines) == 0 {
return util.Errorf("no engines")
}
for _, e := range engines {
s := storage.NewStore(n.ctx, e, &n.Descriptor)
// Initialize each store in turn, handling un-bootstrapped errors by
// adding the store to the bootstraps list.
if err := s.Start(stopper); err != nil {
if _, ok := err.(*storage.NotBootstrappedError); ok {
log.Infof("store %s not bootstrapped", s)
bootstraps.PushBack(s)
continue
}
return util.Errorf("failed to start store: %s", err)
}
if s.Ident.ClusterID == "" || s.Ident.NodeID == 0 {
return util.Errorf("unidentified store: %s", s)
}
capacity, err := s.Capacity()
if err != nil {
return util.Errorf("could not query store capacity: %s", err)
}
log.Infof("initialized store %s: %+v", s, capacity)
n.stores.AddStore(s)
}
// Verify all initialized stores agree on cluster and node IDs.
if err := n.validateStores(); err != nil {
return err
}
// Set the stores map as the gossip persistent storage, so that
// gossip can bootstrap using the most recently persisted set of
// node addresses.
if err := n.ctx.Gossip.SetStorage(n.stores); err != nil {
return fmt.Errorf("failed to initialize the gossip interface: %s", err)
}
// Connect gossip before starting bootstrap. For new nodes, connecting
// to the gossip network is necessary to get the cluster ID.
n.connectGossip()
// If no NodeID has been assigned yet, allocate a new node ID by
// supplying 0 to initNodeID.
if n.Descriptor.NodeID == 0 {
n.initNodeID(0)
}
// Bootstrap any uninitialized stores asynchronously.
if bootstraps.Len() > 0 {
stopper.RunAsyncTask(func() {
n.bootstrapStores(bootstraps, stopper)
})
}
return nil
}
示例14: checkRangeReplication
func checkRangeReplication(t *testing.T, cluster *localcluster.Cluster, d time.Duration) {
// Always talk to node 0.
client, dbStopper := makeDBClient(t, cluster, 0)
defer dbStopper.Stop()
wantedReplicas := 3
if len(cluster.Nodes) < 3 {
wantedReplicas = len(cluster.Nodes)
}
log.Infof("waiting for first range to have %d replicas", wantedReplicas)
util.SucceedsWithin(t, d, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
foundReplicas, err := countRangeReplicas(client)
if err != nil {
return err
}
log.Infof("found %d replicas", foundReplicas)
if foundReplicas >= wantedReplicas {
return nil
}
return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
})
}
示例15: GetClientTLSConfig
// GetClientTLSConfig returns the context client TLS config, initializing it if needed.
// If Insecure is true, return a nil config, otherwise load a config based
// on the Certs directory. If Certs is empty, use a very permissive config.
// TODO(marc): empty Certs dir should fail when client certificates are required.
func (ctx *Context) GetClientTLSConfig() (*tls.Config, error) {
// Early out.
if ctx.Insecure {
return nil, nil
}
ctx.tlsConfigMu.Lock()
defer ctx.tlsConfigMu.Unlock()
if ctx.clientTLSConfig != nil {
return ctx.clientTLSConfig, nil
}
if ctx.Certs != "" {
if log.V(1) {
log.Infof("setting up TLS from certificates directory: %s", ctx.Certs)
}
cfg, err := security.LoadClientTLSConfig(ctx.Certs, ctx.User)
if err != nil {
return nil, util.Errorf("error setting up client TLS config: %s", err)
}
ctx.clientTLSConfig = cfg
} else {
if log.V(1) {
log.Infof("no certificates directory specified: using insecure TLS")
}
ctx.clientTLSConfig = security.LoadInsecureClientTLSConfig()
}
return ctx.clientTLSConfig, nil
}