本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Info函數的典型用法代碼示例。如果您正苦於以下問題:Golang Info函數的具體用法?Golang Info怎麽用?Golang Info使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Info函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Run
func (at *allocatorTest) Run(ctx context.Context, t *testing.T) {
at.f = MakeFarmer(t, at.Prefix, stopper)
if at.CockroachDiskSizeGB != 0 {
at.f.AddVars["cockroach_disk_size"] = strconv.Itoa(at.CockroachDiskSizeGB)
}
log.Infof(ctx, "creating cluster with %d node(s)", at.StartNodes)
if err := at.f.Resize(at.StartNodes); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, at.f, longWaitTime, HasPeers(at.StartNodes))
at.f.Assert(ctx, t)
log.Info(ctx, "initial cluster is up")
// We must stop the cluster because a) `nodectl` pokes at the data directory
// and, more importantly, b) we don't want the cluster above and the cluster
// below to ever talk to each other (see #7224).
log.Info(ctx, "stopping cluster")
for i := 0; i < at.f.NumNodes(); i++ {
if err := at.f.Kill(ctx, i); err != nil {
t.Fatalf("error stopping node %d: %s", i, err)
}
}
log.Info(ctx, "downloading archived stores from Google Cloud Storage in parallel")
errors := make(chan error, at.f.NumNodes())
for i := 0; i < at.f.NumNodes(); i++ {
go func(nodeNum int) {
errors <- at.f.Exec(nodeNum, "./nodectl download "+at.StoreURL)
}(i)
}
for i := 0; i < at.f.NumNodes(); i++ {
if err := <-errors; err != nil {
t.Fatalf("error downloading store %d: %s", i, err)
}
}
log.Info(ctx, "restarting cluster with archived store(s)")
for i := 0; i < at.f.NumNodes(); i++ {
if err := at.f.Restart(ctx, i); err != nil {
t.Fatalf("error restarting node %d: %s", i, err)
}
}
at.f.Assert(ctx, t)
log.Infof(ctx, "resizing cluster to %d nodes", at.EndNodes)
if err := at.f.Resize(at.EndNodes); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, at.f, longWaitTime, HasPeers(at.EndNodes))
at.f.Assert(ctx, t)
log.Info(ctx, "waiting for rebalance to finish")
if err := at.WaitForRebalance(ctx, t); err != nil {
t.Fatal(err)
}
at.f.Assert(ctx, t)
}
示例2: Stop
// Stop signals all live workers to stop and then waits for each to
// confirm it has stopped.
func (s *Stopper) Stop() {
defer s.Recover()
defer unregister(s)
log.Info(context.TODO(), "stop has been called, stopping or quiescing all running tasks")
// Don't bother doing stuff cleanly if we're panicking, that would likely
// block. Instead, best effort only. This cleans up the stack traces,
// avoids stalls and helps some tests in `./cli` finish cleanly (where
// panics happen on purpose).
if r := recover(); r != nil {
go s.Quiesce()
close(s.stopper)
close(s.stopped)
s.mu.Lock()
for _, c := range s.mu.closers {
go c.Close()
}
s.mu.Unlock()
panic(r)
}
s.Quiesce()
close(s.stopper)
s.stop.Wait()
s.mu.Lock()
defer s.mu.Unlock()
for _, c := range s.mu.closers {
c.Close()
}
close(s.stopped)
}
示例3: process
func (rq *replicateQueue) process(
ctx context.Context, now hlc.Timestamp, repl *Replica, sysCfg config.SystemConfig,
) error {
retryOpts := retry.Options{
InitialBackoff: 50 * time.Millisecond,
MaxBackoff: 1 * time.Second,
Multiplier: 2,
MaxRetries: 5,
}
// Use a retry loop in order to backoff in the case of preemptive
// snapshot errors, usually signalling that a rebalancing
// reservation could not be made with the selected target.
for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); {
if err := rq.processOneChange(ctx, now, repl, sysCfg); err != nil {
if IsPreemptiveSnapshotError(err) {
// If ChangeReplicas failed because the preemptive snapshot failed, we
// log the error but then return success indicating we should retry the
// operation. The most likely causes of the preemptive snapshot failing are
// a declined reservation or the remote node being unavailable. In either
// case we don't want to wait another scanner cycle before reconsidering
// the range.
log.Info(ctx, err)
continue
}
return err
}
// Enqueue this replica again to see if there are more changes to be made.
rq.MaybeAdd(repl, rq.clock.Now())
return nil
}
return errors.Errorf("failed to replicate %s after %d retries", repl, retryOpts.MaxRetries)
}
示例4: Close
func (c *sqlConn) Close() {
if c.conn != nil {
err := c.conn.Close()
if err != nil && err != driver.ErrBadConn {
log.Info(context.TODO(), err)
}
c.conn = nil
}
}
示例5: deleteAllRows
// deleteAllRows runs the kv operations necessary to delete all sql rows in the
// table passed at construction. This may require a scan.
//
// resume is the resume-span which should be used for the table deletion when
// the table deletion is chunked. The first call to this method should use a
// zero resume-span. After a chunk is deleted a new resume-span is returned.
//
// limit is a limit on either the number of keys or table-rows (for
// interleaved tables) deleted in the operation.
func (td *tableDeleter) deleteAllRows(
ctx context.Context, resume roachpb.Span, limit int64,
) (roachpb.Span, error) {
if td.rd.helper.tableDesc.IsInterleaved() {
if log.V(2) {
log.Info(ctx, "delete forced to scan: table is interleaved")
}
return td.deleteAllRowsScan(ctx, resume, limit)
}
return td.deleteAllRowsFast(ctx, resume, limit)
}
示例6: deleteIndex
// deleteIndex runs the kv operations necessary to delete all kv entries in the
// given index. This may require a scan.
//
// resume is the resume-span which should be used for the index deletion
// when the index deletion is chunked. The first call to this method should
// use a zero resume-span. After a chunk of the index is deleted a new resume-
// span is returned.
//
// limit is a limit on the number of index entries deleted in the operation.
func (td *tableDeleter) deleteIndex(
ctx context.Context, idx *sqlbase.IndexDescriptor, resume roachpb.Span, limit int64,
) (roachpb.Span, error) {
if len(idx.Interleave.Ancestors) > 0 || len(idx.InterleavedBy) > 0 {
if log.V(2) {
log.Info(ctx, "delete forced to scan: table is interleaved")
}
return td.deleteIndexScan(ctx, idx, resume, limit)
}
return td.deleteIndexFast(ctx, idx, resume, limit)
}
示例7: fastPathAvailable
// fastPathAvailable returns true if the fastDelete optimization can be used.
func (td *tableDeleter) fastPathAvailable(ctx context.Context) bool {
if len(td.rd.helper.indexes) != 0 {
if log.V(2) {
log.Infof(ctx, "delete forced to scan: values required to update %d secondary indexes", len(td.rd.helper.indexes))
}
return false
}
if td.rd.helper.tableDesc.IsInterleaved() {
if log.V(2) {
log.Info(ctx, "delete forced to scan: table is interleaved")
}
return false
}
if len(td.rd.helper.tableDesc.PrimaryIndex.ReferencedBy) > 0 {
if log.V(2) {
log.Info(ctx, "delete forced to scan: table is referenced by foreign keys")
}
return false
}
return true
}
示例8: getCompletedMigrations
func (m *Manager) getCompletedMigrations(ctx context.Context) (map[string]struct{}, error) {
if log.V(1) {
log.Info(ctx, "trying to get the list of completed migrations")
}
keyvals, err := m.db.Scan(ctx, keys.MigrationPrefix, keys.MigrationKeyMax, 0 /* maxRows */)
if err != nil {
return nil, errors.Wrapf(err, "failed to get list of completed migrations")
}
completedMigrations := make(map[string]struct{})
for _, keyval := range keyvals {
completedMigrations[string(keyval.Key)] = struct{}{}
}
return completedMigrations, nil
}
示例9: addHistory
// addHistory persists a line of input to the readline history
// file.
func (c *cliState) addHistory(line string) {
if !isInteractive {
return
}
// ins.SaveHistory will push command into memory and try to
// persist to disk (if ins's config.HistoryFile is set). err can
// be not nil only if it got a IO error while trying to persist.
if err := c.ins.SaveHistory(line); err != nil {
log.Warningf(context.TODO(), "cannot save command-line history: %s", err)
log.Info(context.TODO(), "command-line history will not be saved in this session")
cfg := c.ins.Config.Clone()
cfg.HistoryFile = ""
c.ins.SetConfig(cfg)
}
}
示例10: addReplica
func (rq *replicateQueue) addReplica(
ctx context.Context,
repl *Replica,
repDesc roachpb.ReplicaDescriptor,
desc *roachpb.RangeDescriptor,
) error {
err := repl.ChangeReplicas(ctx, roachpb.ADD_REPLICA, repDesc, desc)
if IsPreemptiveSnapshotError(err) {
// If the ChangeReplicas failed because the preemptive snapshot failed, we
// log the error but then return success indicating we should retry the
// operation. The most likely causes of the preemptive snapshot failing are
// a declined reservation or the remote node being unavailable. In either
// case we don't want to wait another scanner cycle before reconsidering
// the range.
log.Info(ctx, err)
return nil
}
return err
}
示例11: doStart
func (c *cliState) doStart(nextState cliStateEnum) cliStateEnum {
// Common initialization.
c.syntax = parser.Traditional
c.querySyntax = true
c.partialLines = []string{}
if isInteractive {
c.fullPrompt, c.continuePrompt = preparePrompts(c.conn.url)
// We only enable history management when the terminal is actually
// interactive. This saves on memory when e.g. piping a large SQL
// script through the command-line client.
userAcct, err := user.Current()
if err != nil {
if log.V(2) {
log.Warningf(context.TODO(), "cannot retrieve user information: %s", err)
log.Info(context.TODO(), "cannot load or save the command-line history")
}
} else {
histFile := filepath.Join(userAcct.HomeDir, cmdHistFile)
cfg := c.ins.Config.Clone()
cfg.HistoryFile = histFile
cfg.HistorySearchFold = true
c.ins.SetConfig(cfg)
}
// The user only gets to see the info screen on interactive session.
fmt.Print(infoMessage)
c.checkSyntax = true
c.normalizeHistory = true
c.errExit = false
} else {
// When running non-interactive, by default we want errors to stop
// further processing and all syntax checking to be done
// server-side.
c.errExit = true
c.checkSyntax = false
}
return nextState
}
示例12: EnsureMigrations
// EnsureMigrations should be run during node startup to ensure that all
// required migrations have been run (and running all those that are definitely
// safe to run).
func (m *Manager) EnsureMigrations(ctx context.Context) error {
// First, check whether there are any migrations that need to be run.
completedMigrations, err := m.getCompletedMigrations(ctx)
if err != nil {
return err
}
allMigrationsCompleted := true
for _, migration := range backwardCompatibleMigrations {
key := migrationKey(migration)
if _, ok := completedMigrations[string(key)]; !ok {
allMigrationsCompleted = false
}
}
if allMigrationsCompleted {
return nil
}
// If there are any, grab the migration lease to ensure that only one
// node is ever doing migrations at a time.
// Note that we shouldn't ever let client.LeaseNotAvailableErrors cause us
// to stop trying, because if we return an error the server will be shut down,
// and this server being down may prevent the leaseholder from finishing.
var lease *client.Lease
if log.V(1) {
log.Info(ctx, "trying to acquire lease")
}
for r := retry.StartWithCtx(ctx, base.DefaultRetryOptions()); r.Next(); {
lease, err = m.leaseManager.AcquireLease(ctx, keys.MigrationLease)
if err == nil {
break
}
log.Errorf(ctx, "failed attempt to acquire migration lease: %s", err)
}
if err != nil {
return errors.Wrapf(err, "failed to acquire lease for running necessary migrations")
}
// Ensure that we hold the lease throughout the migration process and release
// it when we're done.
done := make(chan interface{}, 1)
defer func() {
done <- nil
if log.V(1) {
log.Info(ctx, "trying to release the lease")
}
if err := m.leaseManager.ReleaseLease(ctx, lease); err != nil {
log.Errorf(ctx, "failed to release migration lease: %s", err)
}
}()
if err := m.stopper.RunAsyncTask(ctx, func(ctx context.Context) {
select {
case <-done:
return
case <-time.After(leaseRefreshInterval):
if err := m.leaseManager.ExtendLease(ctx, lease); err != nil {
log.Warningf(ctx, "unable to extend ownership of expiration lease: %s", err)
}
if m.leaseManager.TimeRemaining(lease) < leaseRefreshInterval {
// Note that we may be able to do better than this by influencing the
// deadline of migrations' transactions based on the least expiration
// time, but simply kill the process for now for the sake of simplicity.
log.Fatal(ctx, "not enough time left on migration lease, terminating for safety")
}
}
}); err != nil {
return err
}
// Re-get the list of migrations in case any of them were completed between
// our initial check and our grabbing of the lease.
completedMigrations, err = m.getCompletedMigrations(ctx)
if err != nil {
return err
}
startTime := timeutil.Now().String()
r := runner{
db: m.db,
sqlExecutor: m.sqlExecutor,
}
for _, migration := range backwardCompatibleMigrations {
key := migrationKey(migration)
if _, ok := completedMigrations[string(key)]; ok {
continue
}
if log.V(1) {
log.Infof(ctx, "running migration %q", migration.name)
}
if err := migration.workFn(ctx, r); err != nil {
return errors.Wrapf(err, "failed to run migration %q", migration.name)
}
if log.V(1) {
log.Infof(ctx, "trying to persist record of completing migration %s", migration.name)
}
if err := m.db.Put(ctx, key, startTime); err != nil {
//.........這裏部分代碼省略.........
示例13: Start
//.........這裏部分代碼省略.........
return errors.Wrap(err, "failed to create engines")
}
s.stopper.AddCloser(&s.engines)
// We might have to sleep a bit to protect against this node producing non-
// monotonic timestamps. Before restarting, its clock might have been driven
// by other nodes' fast clocks, but when we restarted, we lost all this
// information. For example, a client might have written a value at a
// timestamp that's in the future of the restarted node's clock, and if we
// don't do something, the same client's read would not return the written
// value. So, we wait up to MaxOffset; we couldn't have served timestamps more
// than MaxOffset in the future (assuming that MaxOffset was not changed, see
// #9733).
//
// As an optimization for tests, we don't sleep if all the stores are brand
// new. In this case, the node will not serve anything anyway until it
// synchronizes with other nodes.
{
anyStoreBootstrapped := false
for _, e := range s.engines {
if _, err := storage.ReadStoreIdent(ctx, e); err != nil {
// NotBootstrappedError is expected.
if _, ok := err.(*storage.NotBootstrappedError); !ok {
return err
}
} else {
anyStoreBootstrapped = true
break
}
}
if anyStoreBootstrapped {
sleepDuration := s.clock.MaxOffset() - timeutil.Since(startTime)
if sleepDuration > 0 {
log.Infof(ctx, "sleeping for %s to guarantee HLC monotonicity", sleepDuration)
time.Sleep(sleepDuration)
}
}
}
// Now that we have a monotonic HLC wrt previous incarnations of the process,
// init all the replicas.
err = s.node.start(
ctx,
unresolvedAdvertAddr,
s.engines,
s.cfg.NodeAttributes,
s.cfg.Locality,
)
if err != nil {
return err
}
log.Event(ctx, "started node")
// We can now add the node registry.
s.recorder.AddNode(s.registry, s.node.Descriptor, s.node.startedAt)
// Begin recording runtime statistics.
s.startSampleEnvironment(s.cfg.MetricsSampleInterval)
// Begin recording time series data collected by the status monitor.
s.tsDB.PollSource(
s.cfg.AmbientCtx, s.recorder, s.cfg.MetricsSampleInterval, ts.Resolution10s, s.stopper,
)
// Begin recording status summaries.
s.node.startWriteSummaries(s.cfg.MetricsSampleInterval)
示例14: RefreshLeases
// RefreshLeases starts a goroutine that refreshes the lease manager
// leases for tables received in the latest system configuration via gossip.
func (m *LeaseManager) RefreshLeases(s *stop.Stopper, db *client.DB, gossip *gossip.Gossip) {
s.RunWorker(func() {
descKeyPrefix := keys.MakeTablePrefix(uint32(sqlbase.DescriptorTable.ID))
gossipUpdateC := gossip.RegisterSystemConfigChannel()
for {
select {
case <-gossipUpdateC:
cfg, _ := gossip.GetSystemConfig()
if m.testingKnobs.GossipUpdateEvent != nil {
m.testingKnobs.GossipUpdateEvent(cfg)
}
// Read all tables and their versions
if log.V(2) {
log.Info(context.TODO(), "received a new config; will refresh leases")
}
// Loop through the configuration to find all the tables.
for _, kv := range cfg.Values {
if !bytes.HasPrefix(kv.Key, descKeyPrefix) {
continue
}
// Attempt to unmarshal config into a table/database descriptor.
var descriptor sqlbase.Descriptor
if err := kv.Value.GetProto(&descriptor); err != nil {
log.Warningf(context.TODO(), "%s: unable to unmarshal descriptor %v", kv.Key, kv.Value)
continue
}
switch union := descriptor.Union.(type) {
case *sqlbase.Descriptor_Table:
table := union.Table
table.MaybeUpgradeFormatVersion()
if err := table.ValidateTable(); err != nil {
log.Errorf(context.TODO(), "%s: received invalid table descriptor: %v", kv.Key, table)
continue
}
if log.V(2) {
log.Infof(context.TODO(), "%s: refreshing lease table: %d (%s), version: %d, deleted: %t",
kv.Key, table.ID, table.Name, table.Version, table.Dropped())
}
// Try to refresh the table lease to one >= this version.
if t := m.findTableState(table.ID, false /* create */); t != nil {
if err := t.purgeOldLeases(
db, table.Dropped(), table.Version, m.LeaseStore); err != nil {
log.Warningf(context.TODO(), "error purging leases for table %d(%s): %s",
table.ID, table.Name, err)
}
}
case *sqlbase.Descriptor_Database:
// Ignore.
}
}
if m.testingKnobs.TestingLeasesRefreshedEvent != nil {
m.testingKnobs.TestingLeasesRefreshedEvent(cfg)
}
case <-s.ShouldStop():
return
}
}
})
}
示例15: Start
// Start starts a goroutine that runs outstanding schema changes
// for tables received in the latest system configuration via gossip.
func (s *SchemaChangeManager) Start(stopper *stop.Stopper) {
stopper.RunWorker(func() {
descKeyPrefix := keys.MakeTablePrefix(uint32(sqlbase.DescriptorTable.ID))
gossipUpdateC := s.gossip.RegisterSystemConfigChannel()
timer := &time.Timer{}
delay := 360 * time.Second
if s.testingKnobs.AsyncExecQuickly {
delay = 20 * time.Millisecond
}
for {
select {
case <-gossipUpdateC:
cfg, _ := s.gossip.GetSystemConfig()
// Read all tables and their versions
if log.V(2) {
log.Info(context.TODO(), "received a new config")
}
schemaChanger := SchemaChanger{
nodeID: s.leaseMgr.nodeID.Get(),
db: s.db,
leaseMgr: s.leaseMgr,
testingKnobs: s.testingKnobs,
}
// Keep track of existing schema changers.
oldSchemaChangers := make(map[sqlbase.ID]struct{}, len(s.schemaChangers))
for k := range s.schemaChangers {
oldSchemaChangers[k] = struct{}{}
}
execAfter := timeutil.Now().Add(delay)
// Loop through the configuration to find all the tables.
for _, kv := range cfg.Values {
if !bytes.HasPrefix(kv.Key, descKeyPrefix) {
continue
}
// Attempt to unmarshal config into a table/database descriptor.
var descriptor sqlbase.Descriptor
if err := kv.Value.GetProto(&descriptor); err != nil {
log.Warningf(context.TODO(), "%s: unable to unmarshal descriptor %v", kv.Key, kv.Value)
continue
}
switch union := descriptor.Union.(type) {
case *sqlbase.Descriptor_Table:
table := union.Table
table.MaybeUpgradeFormatVersion()
if err := table.ValidateTable(); err != nil {
log.Errorf(context.TODO(), "%s: received invalid table descriptor: %v", kv.Key, table)
continue
}
// Keep track of outstanding schema changes.
// If all schema change commands always set UpVersion, why
// check for the presence of mutations?
// A schema change execution might fail soon after
// unsetting UpVersion, and we still want to process
// outstanding mutations. Similar with a table marked for deletion.
if table.UpVersion || table.Dropped() || table.Adding() ||
table.Renamed() || len(table.Mutations) > 0 {
if log.V(2) {
log.Infof(context.TODO(), "%s: queue up pending schema change; table: %d, version: %d",
kv.Key, table.ID, table.Version)
}
// Only track the first schema change. We depend on
// gossip to renotify us when a schema change has been
// completed.
schemaChanger.tableID = table.ID
if len(table.Mutations) == 0 {
schemaChanger.mutationID = sqlbase.InvalidMutationID
} else {
schemaChanger.mutationID = table.Mutations[0].MutationID
}
schemaChanger.execAfter = execAfter
// Keep track of this schema change.
// Remove from oldSchemaChangers map.
delete(oldSchemaChangers, table.ID)
if sc, ok := s.schemaChangers[table.ID]; ok {
if sc.mutationID == schemaChanger.mutationID {
// Ignore duplicate.
continue
}
}
s.schemaChangers[table.ID] = schemaChanger
}
case *sqlbase.Descriptor_Database:
// Ignore.
}
}
// Delete old schema changers.
for k := range oldSchemaChangers {
delete(s.schemaChangers, k)
}
timer = s.newTimer()
case <-timer.C:
if s.testingKnobs.AsyncExecNotification != nil &&
s.testingKnobs.AsyncExecNotification() != nil {
timer = s.newTimer()
//.........這裏部分代碼省略.........