本文整理匯總了Golang中github.com/ngaut/log.Infof函數的典型用法代碼示例。如果您正苦於以下問題:Golang Infof函數的具體用法?Golang Infof怎麽用?Golang Infof使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Infof函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: watchLeader
func (c *client) watchLeader(leaderPath string, revision int64) {
defer c.wg.Done()
WATCH:
for {
log.Infof("[pd] start watch pd leader on path %v, revision %v", leaderPath, revision)
rch := c.etcdClient.Watch(context.Background(), leaderPath, clientv3.WithRev(revision))
select {
case resp := <-rch:
if resp.Canceled {
log.Warn("[pd] leader watcher canceled")
continue WATCH
}
leaderAddr, rev, err := getLeader(c.etcdClient, leaderPath)
if err != nil {
log.Warn(err)
continue WATCH
}
log.Infof("[pd] found new pd-server leader addr: %v", leaderAddr)
c.workerMutex.Lock()
c.worker.stop(errors.New("[pd] leader change"))
c.worker = newRPCWorker(leaderAddr, c.clusterID)
c.workerMutex.Unlock()
revision = rev
case <-c.quit:
return
}
}
}
示例2: run
func (h *Handler) run() error {
log.Infof("open listen address '%s' and start service", h.l.Addr())
for {
if nc, err := h.l.Accept(); err != nil {
return errors.Trace(err)
} else {
h.counters.clientsAccepted.Add(1)
go func() {
h.counters.clients.Add(1)
defer h.counters.clients.Sub(1)
c := newConn(nc, h, h.config.ConnTimeout)
log.Infof("new connection: %s", c)
if err := c.serve(h); err != nil {
if errors.Cause(err) == io.EOF {
log.Infof("connection lost: %s [io.EOF]", c)
} else {
log.Warningf("connection lost: %s, err = %s", c, err)
}
} else {
log.Infof("connection exit: %s", c)
}
}()
}
}
return nil
}
示例3: Reset
func (s *Store) Reset() error {
if err := s.acquire(); err != nil {
return err
}
defer s.release()
log.Infof("store is reseting...")
for i := s.splist.Len(); i != 0; i-- {
v := s.splist.Remove(s.splist.Front()).(*StoreSnapshot)
v.Close()
}
for i := s.itlist.Len(); i != 0; i-- {
v := s.itlist.Remove(s.itlist.Front()).(*storeIterator)
v.Close()
}
if err := s.db.Clear(); err != nil {
s.db.Close()
s.db = nil
log.Errorf("store reset failed - %s", err)
return err
} else {
s.serial++
log.Infof("store is reset")
return nil
}
}
示例4: needFullReSync
// if no need full resync, returns false and sync offset
func (h *Handler) needFullReSync(c *conn, args [][]byte) (bool, int64) {
masterRunID := args[0]
if !bytes.EqualFold(masterRunID, h.runID) {
if !bytes.Equal(masterRunID, []byte{'?'}) {
log.Infof("Partial resynchronization not accepted, runid mismatch, server is %s, but client is %s", h.runID, masterRunID)
} else {
log.Infof("Full resync requested by slave.")
}
return true, 0
}
syncOffset, err := strconv.ParseInt(string(args[1]), 10, 64)
if err != nil {
log.Errorf("PSYNC parse sync offset err, try full resync - %s", err)
return true, 0
}
r := &h.repl
h.repl.RLock()
defer h.repl.RUnlock()
if r.backlogBuf == nil || syncOffset < r.backlogOffset ||
syncOffset > (r.backlogOffset+int64(r.backlogBuf.Len())) {
log.Infof("unable to partial resync with the slave for lack of backlog, slave offset %d", syncOffset)
if syncOffset > r.masterOffset {
log.Infof("slave tried to PSYNC with an offset %d larger than master offset %d", syncOffset, r.masterOffset)
}
return true, 0
}
return false, syncOffset
}
示例5: run
func (c *Customer) run() error {
ticker := time.NewTicker(1 * time.Second)
cnt := 0
defer func() {
ticker.Stop()
wg.Done()
log.Infof("Customer_%s QUIT succ", c.id)
}()
for {
select {
case n := <-c.recvCh:
c.balance += n
case <-ticker.C:
cnt += 1
if cnt > maxActionCnt {
return nil
}
log.Infof("[Customer_%s] round %d", c.id, cnt)
err := c.randomDo()
if err != nil {
log.Errorf("ERRORRRRR!!!!")
return errors.Trace(err)
}
}
}
}
示例6: start
func (w *GCWorker) start() {
log.Infof("[gc worker] %s start.", w.uuid)
ticker := time.NewTicker(gcWorkerTickInterval)
for {
select {
case <-ticker.C:
isLeader, err := w.checkLeader()
if err != nil {
log.Warnf("[gc worker] check leader err: %v", err)
break
}
if isLeader {
err = w.leaderTick()
if err != nil {
log.Warnf("[gc worker] leader tick err: %v", err)
}
}
case err := <-w.done:
w.gcIsRunning = false
w.lastFinish = time.Now()
if err != nil {
log.Errorf("[gc worker] runGCJob error: %v", err)
break
}
case <-w.quit:
log.Infof("[gc worker] (%s) quit.", w.uuid)
return
}
}
}
示例7: loadInfoSchema
// loadInfoSchema loads infoschema at startTS into handle, usedSchemaVersion is the currently used
// infoschema version, if it is the same as the schema version at startTS, we don't need to reload again.
func (do *Domain) loadInfoSchema(handle *infoschema.Handle, usedSchemaVersion int64, startTS uint64) error {
snapshot, err := do.store.GetSnapshot(kv.NewVersion(startTS))
if err != nil {
return errors.Trace(err)
}
m := meta.NewSnapshotMeta(snapshot)
latestSchemaVersion, err := m.GetSchemaVersion()
if err != nil {
return errors.Trace(err)
}
if usedSchemaVersion != 0 && usedSchemaVersion == latestSchemaVersion {
log.Debugf("[ddl] schema version is still %d, no need reload", usedSchemaVersion)
return nil
}
ok, err := do.tryLoadSchemaDiffs(m, usedSchemaVersion, latestSchemaVersion)
if err != nil {
// We can fall back to full load, don't need to return the error.
log.Errorf("[ddl] failed to load schema diff %v", err)
}
if ok {
log.Infof("[ddl] diff load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
return nil
}
schemas, err := do.getAllSchemasWithTablesFromMeta(m)
if err != nil {
return errors.Trace(err)
}
newISBuilder, err := infoschema.NewBuilder(handle).InitWithDBInfos(schemas, latestSchemaVersion)
if err != nil {
return errors.Trace(err)
}
log.Infof("[ddl] full load InfoSchema from version %d to %d", usedSchemaVersion, latestSchemaVersion)
return newISBuilder.Build()
}
示例8: CheckConfigUpdate
func (c *Conf) CheckConfigUpdate() {
if c.proxyConfig.Global.ConfAutoload == 1 {
for {
time.Sleep(time.Minute)
log.Infof("CheckConfigUpdate checking")
fileinfo, err := os.Stat(c.path)
if err != nil {
log.Errorf("CheckConfigUpdate error %s", err.Error())
continue
}
//config been modified
if c.lastModifiedTime.Before(fileinfo.ModTime()) {
log.Infof("CheckConfigUpdate config change and load new config")
defaultProxyConfig := getDefaultProxyConfig()
err = c.parseConfigFile(defaultProxyConfig)
if err != nil {
log.Errorf("CheckConfigUpdate error %s", err.Error())
continue
}
c.lastModifiedTime = fileinfo.ModTime()
//goroutine need mutex lock
c.mu.Lock()
c.proxyConfig = defaultProxyConfig
c.mu.Unlock()
log.Infof("CheckConfigUpdate new config load success")
}
}
}
}
示例9: doDDLJob
func (d *ddl) doDDLJob(ctx context.Context, job *model.Job) error {
// For every DDL, we must commit current transaction.
if err := ctx.CommitTxn(); err != nil {
return errors.Trace(err)
}
// Get a global job ID and put the DDL job in the queue.
err := d.addDDLJob(ctx, job)
if err != nil {
return errors.Trace(err)
}
// Notice worker that we push a new job and wait the job done.
asyncNotify(d.ddlJobCh)
log.Infof("[ddl] start DDL job %s", job)
var historyJob *model.Job
jobID := job.ID
// For a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public
// For every state changes, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease.
ticker := time.NewTicker(chooseLeaseTime(10*d.lease, 10*time.Second))
startTime := time.Now()
jobsGauge.WithLabelValues(JobType(ddlJobFlag).String(), job.Type.String()).Inc()
defer func() {
ticker.Stop()
jobsGauge.WithLabelValues(JobType(ddlJobFlag).String(), job.Type.String()).Dec()
retLabel := handleJobSucc
if err != nil {
retLabel = handleJobFailed
}
handleJobHistogram.WithLabelValues(JobType(ddlJobFlag).String(), job.Type.String(),
retLabel).Observe(time.Since(startTime).Seconds())
}()
for {
select {
case <-d.ddlJobDoneCh:
case <-ticker.C:
}
historyJob, err = d.getHistoryDDLJob(jobID)
if err != nil {
log.Errorf("[ddl] get history DDL job err %v, check again", err)
continue
} else if historyJob == nil {
log.Warnf("[ddl] DDL job %d is not in history, maybe not run", jobID)
continue
}
// If a job is a history job, the state must be JobDone or JobCancel.
if historyJob.State == model.JobDone {
log.Infof("[ddl] DDL job %d is finished", jobID)
return nil
}
return errors.Trace(historyJob.Error)
}
}
示例10: execute
// execute executes the two-phase commit protocol.
func (c *twoPhaseCommitter) execute() error {
ctx := context.Background()
defer func() {
// Always clean up all written keys if the txn does not commit.
c.mu.RLock()
writtenKeys := c.mu.writtenKeys
committed := c.mu.committed
c.mu.RUnlock()
if !committed {
go func() {
err := c.cleanupKeys(NewBackoffer(cleanupMaxBackoff, ctx), writtenKeys)
if err != nil {
log.Infof("2PC cleanup err: %v, tid: %d", err, c.startTS)
} else {
log.Infof("2PC clean up done, tid: %d", c.startTS)
}
}()
}
}()
binlogChan := c.prewriteBinlog()
err := c.prewriteKeys(NewBackoffer(prewriteMaxBackoff, ctx), c.keys)
if binlogChan != nil {
binlogErr := <-binlogChan
if binlogErr != nil {
return errors.Trace(binlogErr)
}
}
if err != nil {
log.Warnf("2PC failed on prewrite: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
commitTS, err := c.store.getTimestampWithRetry(NewBackoffer(tsoMaxBackoff, ctx))
if err != nil {
log.Warnf("2PC get commitTS failed: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
c.commitTS = commitTS
if c.store.oracle.IsExpired(c.startTS, maxTxnTimeUse) {
err = errors.Errorf("txn takes too much time, start: %d, commit: %d", c.startTS, c.commitTS)
return errors.Annotate(err, txnRetryableMark)
}
err = c.commitKeys(NewBackoffer(commitMaxBackoff, ctx), c.keys)
if err != nil {
if !c.mu.committed {
log.Warnf("2PC failed on commit: %v, tid: %d", err, c.startTS)
return errors.Trace(err)
}
log.Warnf("2PC succeed with error: %v, tid: %d", err, c.startTS)
}
return nil
}
示例11: DoGC
// DoGC sends GC command to KV, it is exported for testing purpose.
func (w *GCWorker) DoGC(safePoint uint64) error {
gcWorkerCounter.WithLabelValues("do_gc").Inc()
req := &kvrpcpb.Request{
Type: kvrpcpb.MessageType_CmdGC,
CmdGcReq: &kvrpcpb.CmdGCRequest{
SafePoint: safePoint,
},
}
bo := NewBackoffer(gcMaxBackoff, goctx.Background())
log.Infof("[gc worker] %s start gc, safePoint: %v.", w.uuid, safePoint)
startTime := time.Now()
regions := 0
var key []byte
for {
select {
case <-w.quit:
return errors.New("[gc worker] gc job canceled")
default:
}
loc, err := w.store.regionCache.LocateKey(bo, key)
if err != nil {
return errors.Trace(err)
}
resp, err := w.store.SendKVReq(bo, req, loc.Region, readTimeoutLong)
if err != nil {
return errors.Trace(err)
}
if regionErr := resp.GetRegionError(); regionErr != nil {
err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
continue
}
gcResp := resp.GetCmdGcResp()
if gcResp == nil {
return errors.Trace(errBodyMissing)
}
if gcResp.GetError() != nil {
return errors.Errorf("unexpected gc error: %s", gcResp.GetError())
}
regions++
key = loc.EndKey
if len(key) == 0 {
break
}
}
log.Infof("[gc worker] %s finish gc, safePoint: %v, regions: %v, cost time: %s", w.uuid, safePoint, regions, time.Since(startTime))
gcHistogram.WithLabelValues("do_gc").Observe(time.Since(startTime).Seconds())
return nil
}
示例12: runDDLJob
// runDDLJob runs a DDL job.
func (d *ddl) runDDLJob(t *meta.Meta, job *model.Job) {
log.Infof("[ddl] run DDL job %s", job)
if job.IsFinished() {
return
}
if job.State != model.JobRollback {
job.State = model.JobRunning
}
var err error
switch job.Type {
case model.ActionCreateSchema:
err = d.onCreateSchema(t, job)
case model.ActionDropSchema:
err = d.onDropSchema(t, job)
case model.ActionCreateTable:
err = d.onCreateTable(t, job)
case model.ActionDropTable:
err = d.onDropTable(t, job)
case model.ActionAddColumn:
err = d.onAddColumn(t, job)
case model.ActionDropColumn:
err = d.onDropColumn(t, job)
case model.ActionModifyColumn:
err = d.onModifyColumn(t, job)
case model.ActionAddIndex:
err = d.onCreateIndex(t, job)
case model.ActionDropIndex:
err = d.onDropIndex(t, job)
case model.ActionAddForeignKey:
err = d.onCreateForeignKey(t, job)
case model.ActionDropForeignKey:
err = d.onDropForeignKey(t, job)
case model.ActionTruncateTable:
err = d.onTruncateTable(t, job)
default:
// Invalid job, cancel it.
job.State = model.JobCancelled
err = errInvalidDDLJob.Gen("invalid ddl job %v", job)
}
// Save errors in job, so that others can know errors happened.
if err != nil {
// If job is not cancelled, we should log this error.
if job.State != model.JobCancelled {
log.Errorf("[ddl] run ddl job err %v", errors.ErrorStack(err))
} else {
log.Infof("[ddl] the job is normal to cancel because %v", errors.ErrorStack(err))
}
job.Error = toTError(err)
job.ErrorCount++
}
}
示例13: logger
func logger(runmode string) macaron.Handler {
return func(ctx *macaron.Context) {
if runmode == "dev" {
log.Debug("")
log.Debug("----------------------------------------------------------------------------------")
}
log.Infof("[%s] [%s]", ctx.Req.Method, ctx.Req.RequestURI)
log.Infof("[Header] %v", ctx.Req.Header)
}
}
示例14: doGC
func (w *GCWorker) doGC(safePoint uint64) error {
req := &kvrpcpb.Request{
Type: kvrpcpb.MessageType_CmdGC,
CmdGcReq: &kvrpcpb.CmdGCRequest{
SafePoint: safePoint,
},
}
bo := NewBackoffer(gcMaxBackoff)
log.Infof("[gc worker] %s start gc, safePoint: %v.", w.uuid, safePoint)
startTime := time.Now()
regions := 0
var key []byte
for {
select {
case <-w.quit:
return errors.New("[gc worker] gc job canceled")
default:
}
region, err := w.store.regionCache.GetRegion(bo, key)
if err != nil {
return errors.Trace(err)
}
resp, err := w.store.SendKVReq(bo, req, region.VerID())
if err != nil {
return errors.Trace(err)
}
if regionErr := resp.GetRegionError(); regionErr != nil {
err = bo.Backoff(boRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
continue
}
gcResp := resp.GetCmdGcResp()
if gcResp == nil {
return errors.Trace(errBodyMissing)
}
if gcResp.GetError() != nil {
return errors.Errorf("unexpected gc error: %s", gcResp.GetError())
}
regions++
key = region.EndKey()
if len(key) == 0 {
break
}
}
log.Infof("[gc worker] %s finish gc, safePoint: %v, regions: %v, cost time: %s", w.uuid, safePoint, regions, time.Now().Sub(startTime))
return nil
}
示例15: handleTopoEvent
func (s *Server) handleTopoEvent() {
for {
select {
case r := <-s.reqCh:
if s.slots[r.slotIdx].slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE {
s.bufferedReq.PushBack(r)
continue
}
for e := s.bufferedReq.Front(); e != nil; {
next := e.Next()
if s.dispatch(e.Value.(*PipelineRequest)) {
s.bufferedReq.Remove(e)
}
e = next
}
if !s.dispatch(r) {
log.Fatalf("should never happend, %+v, %+v", r, s.slots[r.slotIdx].slotInfo)
}
case e := <-s.evtbus:
switch e.(type) {
case *killEvent:
s.handleMarkOffline()
e.(*killEvent).done <- nil
default:
if s.top.IsSessionExpiredEvent(e) {
log.Fatalf("session expired: %+v", e)
}
evtPath := GetEventPath(e)
log.Infof("got event %s, %v, lastActionSeq %d", s.pi.ID, e, s.lastActionSeq)
if strings.Index(evtPath, models.GetActionResponsePath(s.conf.ProductName)) == 0 {
seq, err := strconv.Atoi(path.Base(evtPath))
if err != nil {
log.Warning(err)
} else {
if seq < s.lastActionSeq {
log.Infof("ignore, lastActionSeq %d, seq %d", s.lastActionSeq, seq)
continue
}
}
}
s.processAction(e)
}
}
}
}