本文整理匯總了Golang中github.com/ngaut/logging.Info函數的典型用法代碼示例。如果您正苦於以下問題:Golang Info函數的具體用法?Golang Info怎麽用?Golang Info使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Info函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: main
func main() {
fmt.Print(banner)
log.SetLevelByString("info")
args, err := docopt.Parse(usage, nil, true, "codis proxy v0.1", true)
if err != nil {
log.Error(err)
}
// set config file
if args["-c"] != nil {
configFile = args["-c"].(string)
}
// set output log file
if args["-L"] != nil {
log.SetOutputByName(args["-L"].(string))
}
// set log level
if args["--log-level"] != nil {
log.SetLevelByString(args["--log-level"].(string))
}
// set cpu
if args["--cpu"] != nil {
cpus, err = strconv.Atoi(args["--cpu"].(string))
if err != nil {
log.Fatal(err)
}
}
// set addr
if args["--addr"] != nil {
addr = args["--addr"].(string)
}
// set http addr
if args["--http-addr"] != nil {
httpAddr = args["--http-addr"].(string)
}
wd, _ := os.Getwd()
log.Info("wd:", wd)
log.CrashLog(wd + ".dump")
router.CheckUlimit(1024)
runtime.GOMAXPROCS(cpus)
http.HandleFunc("/setloglevel", handleSetLogLevel)
go http.ListenAndServe(httpAddr, nil)
log.Info("running on ", addr)
conf, err := router.LoadConf(configFile)
if err != nil {
log.Fatal(err)
}
s := router.NewServer(addr, httpAddr, conf)
s.Run()
log.Warning("exit")
}
示例2: CheckTimeout
func (agent *Agent) CheckTimeout() {
log.Info("checktimeout loop for every 5 sec")
agent.Lock.Lock()
defer agent.Lock.Unlock()
for _, task := range agent.Running {
// only check running task
if task.Status != StatusRunning {
continue
}
// we will kill timeout cronjob task
log.Info("check timeout for task:", task.TaskId, task.Job.Name)
if task.IsTimeout() {
if task.Job.OnTimeout() == TriggerKill {
agent.KillTask(task)
} else {
log.Warning("timeout but we just ignore this :", task.TaskId)
}
ts := &TaskStatus{
TaskPtr: task,
Command: nil,
Status: StatusTimeout,
CreateAt: time.Now().Unix(),
Err: fmt.Errorf("run task: %s jobname: %s timeout for %dsec", task.TaskId, task.Job.Name, time.Now().Unix()-task.ExecAt),
}
agent.JobStatusChan <- ts
}
}
}
示例3: Run
func (d *Dispatcher) Run() {
go d.slotsReloadLoop()
for {
select {
case req, ok := <-d.reqCh:
// dispatch req
if !ok {
log.Info("exit dispatch loop")
return
}
var server string
if req.readOnly {
server = d.slotTable.ReadServer(req.slot)
} else {
server = d.slotTable.WriteServer(req.slot)
}
taskRunner, ok := d.taskRunners[server]
if !ok {
log.Info("create task runner", server)
taskRunner = NewTaskRunner(server, d.connPool)
d.taskRunners[server] = taskRunner
}
taskRunner.in <- req
case info := <-d.slotInfoChan:
d.handleSlotInfoChanged(info)
}
}
}
示例4: Init
func (ps *ProxyServer) Init() {
log.Info("Proxy Server Init ....")
l, err := net.Listen("tcp4", "0.0.0.0:"+ps.Conf.Port)
// net.Listen(net, laddr)
if err != nil {
log.Fatalf("Proxy Server Listen on port : %s failed ", ps.Conf.Port)
}
log.Info("Proxy Server Listen on port ", ps.Conf.Port)
ps.Listen = l
}
示例5: Rebalance
// experimental simple auto rebalance :)
func Rebalance(zkConn zkhelper.Conn, delay int) error {
targetQuota, err := getQuotaMap(zkConn)
if err != nil {
return errors.Trace(err)
}
livingNodes, err := getLivingNodeInfos(zkConn)
if err != nil {
return errors.Trace(err)
}
log.Info("start rebalance")
for _, node := range livingNodes {
for len(node.CurSlots) > targetQuota[node.GroupId] {
for _, dest := range livingNodes {
if dest.GroupId != node.GroupId && len(dest.CurSlots) < targetQuota[dest.GroupId] {
slot := node.CurSlots[len(node.CurSlots)-1]
// create a migration task
t := NewMigrateTask(MigrateTaskInfo{
Delay: delay,
FromSlot: slot,
ToSlot: slot,
NewGroupId: dest.GroupId,
Status: MIGRATE_TASK_MIGRATING,
CreateAt: strconv.FormatInt(time.Now().Unix(), 10),
})
u, err := uuid.NewV4()
if err != nil {
return errors.Trace(err)
}
t.Id = u.String()
if ok, err := preMigrateCheck(t); ok {
// do migrate
err := t.run()
if err != nil {
log.Warning(err)
return errors.Trace(err)
}
} else {
log.Warning(err)
return errors.Trace(err)
}
node.CurSlots = node.CurSlots[0 : len(node.CurSlots)-1]
dest.CurSlots = append(dest.CurSlots, slot)
}
}
}
}
log.Info("rebalance finish")
return nil
}
示例6: Run
func (c *Conn) Run() {
defer func() {
r := recover()
if err, ok := r.(error); ok {
const size = 4096
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Errorf("lastCmd %s, %v, %s", c.lastCmd, err, buf)
}
c.Close()
}()
for {
c.alloc.Reset()
data, err := c.readPacket()
if err != nil {
if err.Error() != io.EOF.Error() {
log.Info(err)
}
return
}
if err := c.dispatch(data); err != nil {
log.Errorf("dispatch error %s, %s", errors.ErrorStack(err), c)
if err != mysql.ErrBadConn { //todo: fix this
c.writeError(err)
}
}
c.pkg.Sequence = 0
}
}
示例7: CheckReady
func (agent *Agent) CheckReady() {
agent.Lock.Lock()
defer agent.Lock.Unlock()
for id, job := range agent.Jobs {
if _, err := agent.Ready[id]; err {
log.Warning("cron job aready in ready queue: ", id, job.Name)
continue
}
if !job.NeedSchedule() || !job.IsValid() {
continue
}
now := time.Now().Unix()
task := &Task{
JobId: job.Id,
TaskId: fmt.Sprintf("%d-%d", now, job.Id),
Job: job,
Status: StatusReady,
ExecAt: 0,
}
log.Info("add job to read task queue: ", job.Id, job.Name)
agent.Ready[job.Id] = task
}
}
示例8: GerritHandler
func GerritHandler(w http.ResponseWriter, r *http.Request) {
// get request parameter.
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
js, err := simplejson.NewJson(body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
project, err := js.Get("refUpdate").Get("project").String()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
branch, err := js.Get("refUpdate").Get("refName").String()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
return
}
logging.Info(fmt.Sprintf("project:%s, branch:%s, timeout:%s",
project, branch, strconv.Itoa(config.DefaultTimeout)))
// build image.
go buildMain(project, branch, config.DefaultTimeout)
}
示例9: ForceRemoveDeadFence
func ForceRemoveDeadFence(zkConn zkhelper.Conn, productName string) error {
proxies, err := ProxyList(zkConn, productName, func(p *ProxyInfo) bool {
return p.State == PROXY_STATE_ONLINE
})
if err != nil {
return errors.Trace(err)
}
fenceProxies, err := GetFenceProxyMap(zkConn, productName)
if err != nil {
return errors.Trace(err)
}
// remove online proxies's fence
for _, proxy := range proxies {
delete(fenceProxies, proxy.Addr)
}
// delete dead fence in zookeeper
path := GetProxyFencePath(productName)
for remainFence, _ := range fenceProxies {
fencePath := filepath.Join(path, remainFence)
log.Info("removing fence: ", fencePath)
if err := zkhelper.DeleteRecursive(zkConn, fencePath, -1); err != nil {
return errors.Trace(err)
}
}
return nil
}
示例10: main
func main() {
flag.Parse()
log.Info("flag parse: ", *db, *port)
runtime.GOMAXPROCS(runtime.NumCPU() * 2)
LogVerbose(*verbose)
cfg := &agent.AgentConf{
DBtype: *dbtype,
MySQLdb: *db,
HttpPort: *port,
WorkDir: *work_dir,
QuitTime: *quit_time,
}
agent := agent.NewAgent(cfg)
quit := agent.QuitChan
go agent.Run()
// handle quit signal, we should quit after all TASK FINISHED
sc := make(chan os.Signal, 1)
signal.Notify(sc,
os.Kill,
os.Interrupt,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
<-sc
log.Warning("main receive quit signal...")
close(quit)
agent.Clean()
}
示例11: AddColumn
func (ta *Table) AddColumn(name string, columnType string, collation string, defval mysql.Value, extra string) {
index := len(ta.Columns)
name = strings.ToLower(name)
ta.Columns = append(ta.Columns, TableColumn{Name: name})
columnType = strings.ToLower(columnType)
endPos := strings.Index(columnType, "(") //handle something like: int(11)
if endPos > 0 {
ta.Columns[index].SqlType = str2mysqlType(strings.TrimSpace(columnType[:endPos]))
} else {
ta.Columns[index].SqlType = str2mysqlType(strings.TrimSpace(columnType))
}
ta.Columns[index].Collation = collation
if strings.Index(columnType, "unsigned") >= 0 {
ta.Columns[index].IsUnsigned = true
}
log.Info(name, ta.Columns[index].SqlType, columnType)
if extra == "auto_increment" {
ta.Columns[index].IsAuto = true
// Ignore default value, if any
return
}
if defval == nil {
return
}
ta.Columns[index].Default = defval
}
示例12: genLogFile
func (t *Task) genLogFile() {
defer func() {
if e := recover(); e != nil {
log.Warning("genLogFile fatal:", e)
}
}()
d := time.Now().Format("20060102")
filename := fmt.Sprintf("%s/DCMS-%s/%d-%s-%s.log",
t.Job.Dcms.Conf.WorkDir,
d,
t.Job.Id,
t.Job.Name,
t.TaskId)
log.Info("generate logfile :", filename)
logdir := fmt.Sprintf("%s/DCMS-%s", t.Job.Dcms.Conf.WorkDir, d)
if err := os.MkdirAll(logdir, os.ModePerm); err != nil {
log.Warningf("in run exec goroutine, mkdir workdir %s failed!!!! ", t.Job.Dcms.Conf.WorkDir)
}
if f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.ModePerm); err != nil {
log.Warning("in genLogFile os.OpenFile create failed: ", f)
t.logfile = nil
t.LogFilename = ""
} else {
t.logfile = f
t.LogFilename = filename
}
}
示例13: releaseDashboardNode
func releaseDashboardNode() {
zkPath := fmt.Sprintf("/zk/codis/db_%s/dashboard", globalEnv.ProductName())
if exists, _, _ := safeZkConn.Exists(zkPath); exists {
log.Info("removing dashboard node")
safeZkConn.Delete(zkPath, 0)
}
}
示例14: handleConn
func (s *Server) handleConn(c net.Conn) {
log.Info("new connection", c.RemoteAddr())
s.counter.Add("connections", 1)
client := &session{
Conn: c,
r: bufio.NewReader(c),
CreateAt: time.Now(),
}
var err error
defer func() {
if err != nil { //todo: fix this ugly error check
if GetOriginError(err.(*errors.Err)).Error() != io.EOF.Error() {
log.Warningf("close connection %v, %+v, %v", c.RemoteAddr(), client, errors.ErrorStack(err))
} else {
log.Infof("close connection %v, %+v", c.RemoteAddr(), client)
}
} else {
log.Infof("close connection %v, %+v", c.RemoteAddr(), client)
}
c.Close()
s.counter.Add("connections", -1)
}()
for {
err = s.redisTunnel(client)
if err != nil {
return
}
client.Ops++
}
}
示例15: waitOnline
func (s *Server) waitOnline() {
s.mu.Lock()
defer s.mu.Unlock()
for {
pi, err := s.top.GetProxyInfo(s.pi.Id)
if err != nil {
log.Fatal(errors.ErrorStack(err))
}
if pi.State == models.PROXY_STATE_MARK_OFFLINE {
s.handleMarkOffline()
}
if pi.State == models.PROXY_STATE_ONLINE {
s.pi.State = pi.State
println("good, we are on line", s.pi.Id)
log.Info("we are online", s.pi.Id)
_, err := s.top.WatchNode(path.Join(models.GetProxyPath(s.top.ProductName), s.pi.Id), s.evtbus)
if err != nil {
log.Fatal(errors.ErrorStack(err))
}
return
}
println("wait to be online ", s.pi.Id)
log.Warning(s.pi.Id, "wait to be online")
time.Sleep(3 * time.Second)
}
}