本文整理汇总了Golang中github.com/ActiveState/log.Infof函数的典型用法代码示例。如果您正苦于以下问题:Golang Infof函数的具体用法?Golang Infof怎么用?Golang Infof使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Infof函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Tail
// Tail begins tailing the files for this instance.
func (instance *Instance) Tail(tracker storage.Tracker) {
log.Infof("Tailing %v logs for %v -- %+v",
instance.Type, instance.Identifier(), instance)
stopCh := make(chan bool)
instance.pubch = pubchannel.New("event.timeline", stopCh)
logfiles := instance.getLogFiles()
log.Infof("Determined log files: %+v", logfiles)
shortDockerId := instance.getShortDockerId()
tracker.RegisterInstance(shortDockerId)
for name, filename := range logfiles {
// call tailStream for standard docker logs and tailFile for user custom logs
if instance.DockerStreams && (name == STDOUT || name == STDERR) {
go instance.tailStream(name, filename, stopCh, tracker)
} else {
go instance.tailFile(name, filename, stopCh, tracker)
}
}
go func() {
docker.DockerListener.BlockUntilContainerStops(instance.DockerId)
log.Infof("Container for %v exited", instance.Identifier())
close(stopCh)
tracker.Remove(shortDockerId)
}()
}
示例2: main
func main() {
flag.Parse()
go common.RegisterTailCleanup()
apptail.LoadConfig()
fstorage := storage.NewFileStorage(*stateFile_path)
tracker := storage.NewTracker(fstorage, *debug)
tracker.LoadTailers()
interval := time.Duration(int64(apptail.GetConfig().PersistPositionIntervalSeconds))
go tracker.StartSubmissionTimer(interval * time.Second)
major, minor, patch := gozmq.Version()
log.Infof("Starting apptail (zeromq %d.%d.%d)", major, minor, patch)
log.Infof("Config: %+v\n", apptail.GetConfig())
uid := getUID()
natsclient := server.NewNatsClient(3)
mux := &sync.Mutex{}
n := 0
started_instances := StartedInstance{}
natsclient.Subscribe("logyard."+uid+".newinstance", func(instance *apptail.Instance) {
n++
if started_instances.checkInstanceAndUpdate(n, instance.DockerId, mux) {
go func() {
instance.Tail(tracker)
started_instances.delete(instance.DockerId, mux)
}()
}
})
natsclient.Publish("logyard."+uid+".start", []byte("{}"))
log.Infof("Waiting for app instances ...")
go docker.DockerListener.Listen()
// clean up the cache after restart
docker.DockerListener.TrackerCleanUp(tracker)
server.MarkRunning("apptail")
apptail_event.MonitorCloudEvents()
}
示例3: readFromTail
func (instance *Instance) readFromTail(t *tail.Tail, pub *zmqpubsub.Publisher, name string, stopCh chan bool, filename string, tracker storage.Tracker) {
var err error
FORLOOP:
for {
select {
case line, ok := <-t.Lines:
if !ok {
err = t.Wait()
break FORLOOP
}
currentOffset, err := t.Tell()
if err != nil {
log.Error(err.Error())
}
tracker.Update(instance.getShortDockerId(), filename, currentOffset)
instance.publishLine(pub, name, line)
case <-stopCh:
err = t.Stop()
break FORLOOP
}
}
if err != nil {
log.Warn(err)
instance.SendTimelineEvent("WARN -- Error tailing file (%s); %s", name, err)
}
log.Infof("Completed tailing %v log for %v", name, instance.Identifier())
}
示例4: Stop
func (d *AppLogDrain) Stop(reason error) {
log.Infof("Stopping drain %s for reason: %v", d.Id(), reason)
if err := d.removeDrain(); err != nil {
log.Errorf("Failed to remove drain %v: %v", d.Id(), err)
}
d.srv.Kill(reason)
}
示例5: Drain
func (r *ReadSeekCloseWrapper) Drain() {
var err, err2 error
buf := make([]byte, 0x10000)
for {
var count int
count, err = r.reader.Read(buf)
if count > 0 {
_, err2 = r.buffer.Write(buf[:count])
}
if err != nil && err != io.EOF && err != io.ErrClosedPipe {
log.Errorf("Error reading stream %v: %v", r.reader, err)
}
if err2 != nil {
log.Errorf("Error writing buffer: %v: %v", r.buffer, err2)
}
if err != nil || err2 != nil {
break
}
if r.buffer.Len() > 0 {
r.available.L.Lock()
r.available.Broadcast()
r.available.L.Unlock()
}
}
log.Infof("Read complete (error %v/%v)", err, err2)
}
示例6: BlockUntilContainerStops
func (l *dockerListener) BlockUntilContainerStops(id string) {
var total int
ch := make(chan bool)
id = id[:ID_LENGTH]
if len(id) != ID_LENGTH {
common.Fatal("Invalid docker ID length: %v", len(id))
}
// Add a wait channel
func() {
l.mux.Lock()
if _, ok := l.waiters[id]; ok {
log.Warn("already added")
} else {
l.waiters[id] = ch
}
total = len(l.waiters)
l.mux.Unlock()
runtime.Gosched()
}()
// Wait
log.Infof("Waiting for container %v to exit (total waiters: %d)", id, total)
<-ch
}
示例7: Wait
func (retry *ProgressiveRetryer) Wait(msg string) bool {
var delay time.Duration
// how long is the retry happening?
retryDuration := time.Now().Sub(retry.firstRetry)
// how long since the last retry?
silenceDuration := time.Now().Sub(retry.lastRetry)
if retry.firstRetry.IsZero() {
// first retry; just do it without waiting.
retry.reset()
delay = 0
} else if silenceDuration > RESET_AFTER {
// reset retry stats if Wait was not called in the last 20
// minutes (implying sufficiently successful period).
retry.reset()
delay = 0
} else if retry.hasRetryLimit() && retryDuration > retry.retryLimit {
// respect retryLimit
log.Errorf("%s -- giving up after retrying for %v.", msg, retry.retryLimit)
retry.reset()
return false
} else {
switch {
case retryDuration < time.Minute:
// once every 5 seconds for 1 minute
delay = 5 * time.Second
case retryDuration < (1+5)*time.Minute:
// once every 30 seconds for next 5 minutes
delay = 30 * time.Second
case retryDuration < (1+5+10)*time.Minute:
// once every 1 minute for next 10 minutes
delay = time.Minute
default:
// once every 5 minutes therein
delay = 5 * time.Minute
}
}
// Log the retry action
if delay == 0 {
log.Warnf("%s -- retrying now.", msg)
} else {
if retry.hasRetryLimit() {
// If there is a retry limit -- which are the tmp. and
// appdrain. drains -- this drain is to be considered
// unimportant for the sys admins. So we do not generate
// a WARN, thus putting it in cloud events.
log.Infof("%s -- retrying after %v (max %v).", msg, delay, retry.retryLimit)
} else {
log.Warnf("%s -- retrying after %v.", msg, delay)
}
}
time.Sleep(delay)
retry.lastRetry = time.Now()
return true
}
示例8: Wait
func (retry *ThriceRetryer) Wait(msg string) bool {
if retry.count < 3 {
retry.count += 1
log.Infof("retry #%d -- %v.", retry.count, msg)
return true
}
return false
}
示例9: NewCron
func NewCron(schedule string, command string, args []string) *Cron {
log.Infof("Running per schedule: %v", schedule)
c := &Cron{cron.New(), &sync.WaitGroup{}}
c.AddFunc(schedule, func() {
c.wg.Add(1)
log.Infof("Executing: %v %v", command, strings.Join(args, " "))
err := execute(command, args)
if err != nil {
log.Warnf("Failed: %v", err)
} else {
log.Info("Succeeded")
}
c.wg.Done()
})
return c
}
示例10: Remove
func (t *tracker) Remove(key string) {
if t.debug {
log.Infof("Removing the following key %s from cached instances", key)
}
t.mux.Lock()
delete(t.Cached.Instances, key)
t.mux.Unlock()
if err := t.Commit(); err != nil {
log.Fatal(err)
}
}
示例11: ServeHTTP
func (h *webSocketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
var errString string
if _, ok := err.(websocket.HandshakeError); !ok {
errString = fmt.Sprintf("Handshake error: %v", err)
} else {
errString = fmt.Sprintf("Unknown websocket error: %v", err)
}
log.Info(errString)
http.Error(w, errString, 500)
return
}
log.Infof("wsutil.ServeWS start - %v", getWsConnId(r, ws))
defer log.Infof("wsutil.ServeWS finish - %v", getWsConnId(r, ws))
h.handler.ServeWS(w, r, &WebSocketStream{ws})
ws.Close()
}
示例12: dumpState
func (t *tracker) dumpState(ops string) {
// Important to wrap this in a mutex since it's accessing shared resource
if t.debug {
for k, v := range t.Cached.Instances {
message := fmt.Sprintf("[%s] ContainerId: %s", ops, k)
for fname, buffer := range v {
log.Infof(message+" File: %s --> TailOffset: %d", fname, buffer)
}
}
}
}
示例13: NewStackatoParser
func NewStackatoParser(spec map[string]map[string]EventParserSpec) Parser {
parserSpec := map[string]EventParserGroup{}
for process, d := range spec {
if _, ok := parserSpec[process]; !ok {
parserSpec[process] = map[string]*EventParser{}
}
for eventName, evt := range d {
log.Infof("Loading parse spec %s/%s", process, eventName)
parserSpec[process][eventName] = evt.ToEventParser()
}
}
parser := NewParser(parserSpec)
parser.Build()
return parser
}
示例14: RemoveOrphanedDrains
// RemoveOrphanedDrains removes all drains created by applog_endpoint.
func RemoveOrphanedDrains() {
// Note that this is tricky to do when horizontally scalling
// applog_endpoint. Could be solved easily by using nodeID or ip
// addr in the drain name.
logyardConfig := logyard.GetConfig()
for name, _ := range logyardConfig.Drains {
if strings.HasPrefix(name, DRAIN_PREFIX) {
log.Infof("Removing orphaned drain %v", name)
err := logyard.DeleteDrain(name)
if err != nil {
log.Warnf("Failed to delete drain %v -- %v",
name, err)
}
}
}
}
示例15: addDrain
// addDrain adds a logyard drain for the apptail.{appGUID} stream
// pointing to ourself (port)
func (d *AppLogDrain) addDrain() error {
uri := fmt.Sprintf("%s://%v:%v",
LINESERVER_PROTO,
server.LocalIPMust(),
d.port)
filter := fmt.Sprintf("apptail.%s", d.appGUID)
drainURI, err := drain.ConstructDrainURI(
d.drainName, uri, []string{filter}, nil)
if err != nil {
return err
}
if err = logyard.AddDrain(d.drainName, drainURI); err != nil {
return err
}
log.Infof("Added drain %v => %v", d.drainName, drainURI)
return nil
}