本文整理匯總了Golang中github.com/ngaut/logging.Infof函數的典型用法代碼示例。如果您正苦於以下問題:Golang Infof函數的具體用法?Golang Infof怎麽用?Golang Infof使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Infof函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: handleConn
func (s *Server) handleConn(c net.Conn) {
log.Info("new connection", c.RemoteAddr())
s.counter.Add("connections", 1)
client := &session{
Conn: c,
r: bufio.NewReader(c),
CreateAt: time.Now(),
}
var err error
defer func() {
if err != nil { //todo: fix this ugly error check
if GetOriginError(err.(*errors.Err)).Error() != io.EOF.Error() {
log.Warningf("close connection %v, %+v, %v", c.RemoteAddr(), client, errors.ErrorStack(err))
} else {
log.Infof("close connection %v, %+v", c.RemoteAddr(), client)
}
} else {
log.Infof("close connection %v, %+v", c.RemoteAddr(), client)
}
c.Close()
s.counter.Add("connections", -1)
}()
for {
err = s.redisTunnel(client)
if err != nil {
return
}
client.Ops++
}
}
示例2: Run
func (p *Proxy) Run() {
tcpAddr, err := net.ResolveTCPAddr("tcp", p.addr)
if err != nil {
log.Fatal(err)
}
listener, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
log.Fatal(err)
} else {
log.Infof("proxy listens on %s", p.addr)
}
defer listener.Close()
go p.dispatcher.Run()
for {
conn, err := listener.AcceptTCP()
if err != nil {
log.Error(err)
continue
}
log.Infof("accept client: %s", conn.RemoteAddr())
go p.handleConnection(conn)
}
}
示例3: slotsReloadLoop
// wait for the slot reload chan and reload cluster topology
// at most every slotReloadInterval
// it also reload topology at a relative long periodic interval
func (d *Dispatcher) slotsReloadLoop() {
periodicReloadInterval := 60 * time.Second
for {
select {
case <-time.After(d.slotReloadInterval):
select {
case _, ok := <-d.slotReloadChan:
if !ok {
log.Infof("exit reload slot table loop")
return
}
log.Infof("request reload triggered")
if slotInfos, err := d.reloadTopology(); err != nil {
log.Errorf("reload slot table failed")
} else {
d.slotInfoChan <- slotInfos
}
case <-time.After(periodicReloadInterval):
log.Infof("periodic reload triggered")
if slotInfos, err := d.reloadTopology(); err != nil {
log.Errorf("reload slot table failed")
} else {
d.slotInfoChan <- slotInfos
}
}
}
}
}
示例4: loadSchemaInfo
func (s *Server) loadSchemaInfo() error {
if err := s.parseShards(); err != nil {
return errors.Trace(err)
}
if err := s.parseSchemas(); err != nil {
return errors.Trace(err)
}
for _, v := range s.cfg.Schemas {
rc := v.RouterConifg
var overrides []tabletserver.SchemaOverride
for _, tr := range rc.TableRule {
or := tabletserver.SchemaOverride{Name: tr.Table}
pks := strings.Split(tr.ShardingKey, ",")
for _, pk := range pks {
or.PKColumns = append(or.PKColumns, strings.TrimSpace(pk))
}
log.Infof("table rule:%+v", tr)
or.Cache = &tabletserver.OverrideCacheDesc{Type: tr.RowCacheType, Prefix: or.Name, Table: or.Name}
overrides = append(overrides, or)
}
//fix hard code node
sc := s.cfg.Shards[0]
si := tabletserver.NewSchemaInfo(s.cfg.RowCacheConf, s.cfg.Shards[0].Master, sc.User, sc.Password, v.DB, overrides)
log.Infof("%+v", si)
s.autoSchamas[v.DB] = si
}
return nil
}
示例5: initRowCache
func (ti *TableInfo) initRowCache(tableType string, createTime sqltypes.Value, comment string, cachePool *CachePool) {
if cachePool.IsClosed() {
return
}
if strings.Contains(comment, "vtocc_nocache") {
log.Infof("%s commented as vtocc_nocache. Will not be cached.", ti.Name)
return
}
if tableType == "VIEW" {
log.Infof("%s is a view. Will not be cached.", ti.Name)
return
}
if ti.PKColumns == nil {
log.Infof("Table %s has no primary key. Will not be cached.", ti.Name)
return
}
for _, col := range ti.PKColumns {
if ti.Columns[col].SqlType == mysql.MYSQL_TYPE_NO_CACHE {
log.Infof("Table %s pk has unsupported column types. Will not be cached.", ti.Name)
return
}
}
ti.CacheType = schema.CACHE_RW
ti.Cache = NewRowCache(ti, cachePool)
}
示例6: NewServer
func NewServer(addr string, debugVarAddr string, conf *Conf) *Server {
log.Infof("%+v", conf)
s := &Server{
evtbus: make(chan interface{}, 100),
top: topo.NewTopo(conf.productName, conf.zkAddr, conf.f),
net_timeout: conf.net_timeout,
counter: stats.NewCounters("router"),
lastActionSeq: -1,
startAt: time.Now(),
addr: addr,
concurrentLimiter: tokenlimiter.NewTokenLimiter(100),
moper: NewMultiOperator(addr),
pools: cachepool.NewCachePool(),
}
s.broker = conf.broker
slot_num = conf.slot_num
s.mu.Lock()
s.pi.Id = conf.proxyId
s.pi.State = models.PROXY_STATE_OFFLINE
hname, err := os.Hostname()
if err != nil {
log.Fatal("get host name failed", err)
}
s.pi.Addr = hname + ":" + strings.Split(addr, ":")[1]
s.pi.DebugVarAddr = hname + ":" + strings.Split(debugVarAddr, ":")[1]
log.Infof("proxy_info:%+v", s.pi)
s.mu.Unlock()
//todo:fill more field
stats.Publish("evtbus", stats.StringFunc(func() string {
return strconv.Itoa(len(s.evtbus))
}))
stats.Publish("startAt", stats.StringFunc(func() string {
return s.startAt.String()
}))
s.RegisterAndWait()
_, err = s.top.WatchChildren(models.GetWatchActionPath(conf.productName), s.evtbus)
if err != nil {
log.Fatal(errors.ErrorStack(err))
}
s.FillSlots()
//start event handler
go s.handleTopoEvent()
log.Info("proxy start ok")
return s
}
示例7: LoadConf
func LoadConf(configFile string) (*Conf, error) {
srvConf := &Conf{}
conf, err := utils.InitConfigFromFile(configFile)
if err != nil {
log.Fatal(err)
}
srvConf.productName, _ = conf.ReadString("product", "test")
if len(srvConf.productName) == 0 {
log.Fatalf("invalid config: product entry is missing in %s", configFile)
}
srvConf.zkAddr, _ = conf.ReadString("zk", "")
if len(srvConf.zkAddr) == 0 {
log.Fatalf("invalid config: need zk entry is missing in %s", configFile)
}
srvConf.zkAddr = strings.TrimSpace(srvConf.zkAddr)
srvConf.proxyId, _ = conf.ReadString("proxy_id", "")
if len(srvConf.proxyId) == 0 {
log.Fatalf("invalid config: need proxy_id entry is missing in %s", configFile)
}
srvConf.netTimeout, _ = conf.ReadInt("net_timeout", 5)
srvConf.proto, _ = conf.ReadString("proto", "tcp")
srvConf.provider, _ = conf.ReadString("coordinator", "zookeeper")
log.Infof("%+v", srvConf)
return srvConf, nil
}
示例8: GetConn
func (c *Cluster) GetConn(key []byte, slave bool) (Conn, error) {
id := c.topo.GetNodeID(key, slave)
log.Infof("GetConn %s for key: %s", id, string(key))
pool, ok := c.pools[id]
if !ok {
// opt一定存在要做個判斷
opt := c.opts[id]
if opt == nil {
n := c.topo.GetNode(id)
if n == nil {
return nil, fmt.Errorf("Cluster GetConn ID %s not exists ", id)
}
opt := &Options{
Network: "tcp",
Addr: fmt.Sprintf("%s:%d", n.host, n.port),
Dialer: RedisConnDialer(n.host, n.port, n.id, c.pc),
DialTimeout: c.pc.dialTimeout,
ReadTimeout: c.pc.readTimeout,
WriteTimeout: c.pc.writeTimeout,
PoolSize: c.pc.poolSize,
IdleTimeout: c.pc.idleTimeout,
}
c.opts[id] = opt
}
pool = NewConnPool(opt)
c.pools[id] = pool
}
return pool.Get()
}
示例9: handleCrashedServer
func handleCrashedServer(s *models.Server) error {
switch s.Type {
case models.SERVER_TYPE_MASTER:
//get slave and do promote
slave, err := getSlave(s)
if err != nil {
log.Warning(errors.ErrorStack(err))
return err
}
log.Infof("try promote %+v", slave)
err = callHttp(nil, genUrl(*apiServer, "/api/server_group/", slave.GroupId, "/promote"), "POST", slave)
if err != nil {
log.Errorf("do promote %v failed %v", slave, errors.ErrorStack(err))
return err
}
refreshSlave(s) //刷新
case models.SERVER_TYPE_SLAVE:
log.Errorf("slave is down: %+v", s)
case models.SERVER_TYPE_OFFLINE:
//no need to handle it
default:
log.Fatalf("unkonwn type %+v", s)
}
return nil
}
示例10: NewSchemaInfo
func NewSchemaInfo(rowCacheConf RowCacheConfig, dbAddr string, user, pwd, dbName string, overrides []SchemaOverride) *SchemaInfo {
si := &SchemaInfo{
queries: cache.NewLRUCache(128 * 1024 * 1024),
tables: make(map[string]*TableInfo),
cachePool: NewCachePool(dbName, rowCacheConf, 3*time.Second, 3*time.Second),
}
var err error
si.connPool, err = mysql.Open(dbAddr, user, pwd, dbName)
if err != nil { //todo: return error
log.Fatal(err)
}
si.overrides = overrides
si.connPool.SetMaxIdleConnNum(100)
log.Infof("%+v", si.overrides)
si.cachePool.Open()
for _, or := range si.overrides {
si.CreateOrUpdateTable(or.Name)
}
si.override()
return si
}
示例11: Close
func (s *Session) Close() {
log.Infof("close session %p", s)
if !s.closed {
s.closed = true
s.Conn.Close()
}
}
示例12: override
func (si *SchemaInfo) override() {
for _, override := range si.overrides {
table, ok := si.tables[override.Name]
if !ok {
log.Warningf("Table not found for override: %v, %v", override, si.tables)
continue
}
if override.PKColumns != nil {
log.Infof("SetPK Table name %s, pk %v", override.Name, override.PKColumns)
if err := table.SetPK(override.PKColumns); err != nil {
log.Errorf("%s: %v", errors.ErrorStack(err), override)
continue
}
}
if si.cachePool.IsClosed() || override.Cache == nil {
log.Infof("%+v", override)
continue
}
switch override.Cache.Type {
case "RW":
table.CacheType = schema.CACHE_RW
table.Cache = NewRowCache(table, si.cachePool)
case "W":
table.CacheType = schema.CACHE_W
if len(override.Cache.Table) == 0 {
log.Warningf("Incomplete cache specs: %v", override)
continue
}
totable, ok := si.tables[override.Cache.Table]
if !ok {
log.Warningf("Table not found: %v", override)
continue
}
if totable.Cache == nil {
log.Warningf("Table has no cache: %v", override)
continue
}
table.Cache = totable.Cache
default:
log.Warningf("Ignoring cache override: %+v", override)
}
}
}
示例13: handleTopoEvent
// TODO Lius: main event handler waiting for prepared struct (already do read)
func (s *Server) handleTopoEvent() {
// do Listen in Server.Run by outer main invoke
for {
select {
case r := <-s.reqCh: // Lius: send to backend
if s.slots[r.slotIdx].slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE {
s.bufferedReq.PushBack(r)
continue
}
// Lius: buffed request because of migrating keys or other strategy
for e := s.bufferedReq.Front(); e != nil; {
next := e.Next()
s.dispatch(e.Value.(*PipelineRequest))
s.bufferedReq.Remove(e)
e = next
}
// Lius: send current request
s.dispatch(r)
case e := <-s.evtbus:
switch e.(type) {
case *killEvent:
s.handleMarkOffline()
e.(*killEvent).done <- nil
default:
evtPath := GetEventPath(e)
log.Infof("got event %s, %v, lastActionSeq %d", s.pi.Id, e, s.lastActionSeq)
if strings.Index(evtPath, models.GetActionResponsePath(s.conf.productName)) == 0 {
seq, err := strconv.Atoi(path.Base(evtPath))
if err != nil {
log.Warning(err)
} else {
if seq < s.lastActionSeq {
log.Info("ignore", seq)
continue
}
}
}
log.Infof("got event %s, %v, lastActionSeq %d", s.pi.Id, e, s.lastActionSeq)
s.processAction(e)
}
}
}
}
示例14: getActionObject
func (s *Server) getActionObject(seq int, target interface{}) {
act := &models.Action{Target: target}
log.Infof("%+v", act)
err := s.top.GetActionWithSeqObject(int64(seq), act)
if err != nil {
log.Fatal(errors.ErrorStack(err))
}
}
示例15: main
func main() {
autoflags.Define(&config)
flag.Parse()
log.SetLevelByString(config.LogLevel)
// to avoid pprof being optimized by gofmt
log.Debug(pprof.Handler("profile"))
if len(config.LogFile) != 0 {
log.SetOutputByName(config.LogFile)
log.SetRotateByDay()
}
if config.LogEveryN <= 0 {
proxy.LogEveryN = 1
} else {
proxy.LogEveryN = config.LogEveryN
}
log.Infof("%#v", config)
sigChan := make(chan os.Signal)
signal.Notify(sigChan, os.Interrupt, os.Kill)
log.Infof("pid %d", os.Getpid())
if len(config.DebugAddr) != 0 {
http.HandleFunc("/setloglevel", handleSetLogLevel)
go func() {
log.Fatal(http.ListenAndServe(config.DebugAddr, nil))
}()
log.Infof("debug service listens on %s", config.DebugAddr)
}
// shuffle startup nodes
startupNodes := strings.Split(config.StartupNodes, ",")
indexes := rand.Perm(len(startupNodes))
for i, startupNode := range startupNodes {
startupNodes[i] = startupNodes[indexes[i]]
startupNodes[indexes[i]] = startupNode
}
connPool := proxy.NewConnPool(config.BackendIdleConnections, config.ConnectTimeout, config.ReadPrefer != proxy.READ_PREFER_MASTER)
dispatcher := proxy.NewDispatcher(startupNodes, config.SlotsReloadInterval, connPool, config.ReadPrefer)
if err := dispatcher.InitSlotTable(); err != nil {
log.Fatal(err)
}
proxy := proxy.NewProxy(config.Addr, dispatcher, connPool)
go proxy.Run()
sig := <-sigChan
log.Infof("terminated by %#v", sig)
proxy.Exit()
}