本文整理匯總了Golang中github.com/funkygao/log4go.Trace函數的典型用法代碼示例。如果您正苦於以下問題:Golang Trace函數的具體用法?Golang Trace怎麽用?Golang Trace使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Trace函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: housekeeping
func (q *queue) housekeeping() {
defer func() {
log.Trace("queue[%s] housekeeping done", q.ident())
q.wg.Done()
}()
log.Trace("queue[%s] start housekeeping...", q.ident())
purgeTick := time.NewTicker(q.purgeInterval)
defer purgeTick.Stop()
cursorChkpnt := time.NewTicker(time.Second)
defer cursorChkpnt.Stop()
for {
select {
case <-purgeTick.C:
if err := q.Purge(); err != nil {
log.Error("queue[%s] purge: %s", q.ident(), err)
}
case <-cursorChkpnt.C:
if err := q.cursor.dump(); err != nil {
log.Error("queue[%s] cursor checkpoint: %s", q.ident(), err)
}
case <-q.quit:
return
}
}
}
示例2: RunForever
func (this *controller) RunForever() (err error) {
log.Info("controller[%s] starting", this.Id())
if err = this.orchestrator.RegisterActor(this.Id(), this.Bytes()); err != nil {
return err
}
defer this.orchestrator.ResignActor(this.Id())
if err = manager.Default.Start(); err != nil {
return
}
log.Trace("manager[%s] started", manager.Default.Name())
go this.runWebServer()
jobDispatchQuit := make(chan struct{})
go this.dispatchJobQueues(jobDispatchQuit)
webhookDispatchQuit := make(chan struct{})
go this.dispatchWebhooks(webhookDispatchQuit)
select {
case <-jobDispatchQuit:
log.Warn("dispatchJobQueues quit")
case <-webhookDispatchQuit:
log.Warn("dispatchWebhooks quit")
}
manager.Default.Stop()
log.Trace("manager[%s] stopped", manager.Default.Name())
return
}
示例3: watchDeadPartitions
func (this *pubStore) watchDeadPartitions() {
ticker := time.NewTicker(time.Minute * 2) // TODO
defer ticker.Stop()
var lastTopics = make(map[string]struct{})
for {
select {
case <-ticker.C:
deadPartitions := manager.Default.DeadPartitions()
for topic, dp := range deadPartitions {
this.markPartitionsDead(topic, dp)
lastTopics[topic] = struct{}{}
}
for lastDeadTopic := range lastTopics {
if _, present := deadPartitions[lastDeadTopic]; !present {
// this topic was marked dead last round, but this round it comes alive
log.Trace("%s come alive again", lastDeadTopic)
this.markPartitionsDead(lastDeadTopic, nil)
delete(lastTopics, lastDeadTopic)
}
}
case <-this.shutdownCh:
return
}
}
}
示例4: Start
func (this *subStore) Start() (err error) {
this.subManager = newSubManager()
this.wg.Add(1)
go func() {
defer this.wg.Done()
var remoteAddr string
for {
select {
case <-this.shutdownCh:
log.Trace("sub store[%s] stopped", this.Name())
return
case remoteAddr = <-this.closedConnCh:
this.wg.Add(1)
go func(id string) {
this.subManager.killClient(id)
this.wg.Done()
}(remoteAddr)
}
}
}()
return
}
示例5: Start
func (this *pubStore) Start() (err error) {
if ctx.KafkaHome() == "" {
return fmt.Errorf("empty kafka_home in ~/.gafka.cf")
}
if !gio.DirExists(ctx.KafkaHome()) {
return fmt.Errorf("kafka not installed in %s, run 'gk deploy -kfkonly'", ctx.KafkaHome())
}
// warmup: create pools according the current kafka topology
for _, cluster := range meta.Default.ClusterNames() {
this.pubPools[cluster] = newPubPool(this, cluster,
meta.Default.BrokerList(cluster), this.pubPoolsCapcity)
}
this.wg.Add(1)
go func() {
defer this.wg.Done()
for {
select {
case <-meta.Default.RefreshEvent():
this.doRefresh()
case <-this.shutdownCh:
log.Trace("pub store[%s] stopped", this.Name())
return
}
}
}()
return
}
示例6: Attr
func (f *File) Attr(ctx context.Context, o *fuse.Attr) error {
f.RLock()
defer f.RUnlock()
*o = f.attr
// calculate size
if !f.opened {
if err := f.dir.reconnectKafkaIfNecessary(); err != nil {
return err
}
latestOffset, err := f.dir.GetOffset(f.topic, f.partitionId, sarama.OffsetNewest)
if err != nil {
log.Error(err)
return err
}
oldestOffset, err := f.dir.GetOffset(f.topic, f.partitionId, sarama.OffsetOldest)
if err != nil {
log.Error(err)
return err
}
o.Size = uint64(latestOffset - oldestOffset)
} else {
o.Size = uint64(len(f.content))
}
log.Trace("File Attr, topic=%s, partitionId=%d, size=%d", f.topic, f.partitionId, o.Size)
return nil
}
示例7: Close
// Close stops the queue for reading and writing
func (q *queue) Close() error {
close(q.quit)
// wait for pump and housekeeping finish
q.wg.Wait()
q.mu.Lock()
defer q.mu.Unlock()
for _, s := range q.segments {
if err := s.Close(); err != nil {
return err
}
}
q.head = nil
q.tail = nil
q.segments = nil
log.Trace("queue[%s] dumping cursor", q.ident())
if err := q.cursor.dump(); err != nil {
return err
}
q.cursor = nil
return nil
}
示例8: Warmup
func (this *Client) Warmup() {
var (
sess *mgo.Session
err error
t1 = time.Now()
)
for retries := 0; retries < 3; retries++ {
for _, server := range this.selector.ServerList() {
sess, err = this.getConn(server.Uri())
if err != nil {
log.Error("Warmup %v fail: %s", server.Uri(), err)
break
} else {
this.putFreeConn(server.Uri(), sess)
}
}
if err == nil {
break
}
}
if err == nil {
log.Trace("Mongodb warmup within %s: %+v",
time.Since(t1), this.freeconns)
} else {
log.Error("Mongodb failed to warmup within %s: %s",
time.Since(t1), err)
}
}
示例9: do
func (this *profiler) do(callName string, ctx *rpc.Context, format string,
args ...interface{}) {
if this == nil {
return
}
elapsed := time.Since(this.t1)
slow := elapsed > config.Engine.Servants.CallSlowThreshold
if !(slow || this.on) {
return
}
body := fmt.Sprintf(format, args...)
if slow {
svtStats.incCallSlow()
header := fmt.Sprintf("SLOW=%s/%s Q=%s ",
elapsed, time.Since(this.t0), callName)
log.Warn(header + this.truncatedStr(body))
} else if this.on {
header := fmt.Sprintf("T=%s/%s Q=%s ",
elapsed, time.Since(this.t0), callName)
log.Trace(header + this.truncatedStr(body))
}
}
示例10: NewMessage
// NewMessage is the supported way to obtain a new Message. This makes
// use of a "slab allocator" which greatly reduces the load on the
// garbage collector.
func NewMessage(size int) *Message {
var ch chan *Message
for _, slabClass := range messagePool { // TODO binary search
if size <= slabClass.maxSize {
ch = slabClass.ch
size = slabClass.maxSize
break
}
}
var msg *Message
select {
case msg = <-ch:
default:
// message pool empty:
// too busy or size greater than largest slab class
log.Trace("allocating message memory pool: %dB", size)
msg = &Message{}
msg.slabSize = size
msg.bodyBuf = make([]byte, 0, msg.slabSize)
}
msg.Body = msg.bodyBuf
return msg
}
示例11: Close
// Close must be called before Recycle
func (this *syncProducerClient) Close() {
log.Trace("cluster[%s] closing kafka sync client: %d", this.cluster, this.id)
// will close the producer and the kafka tcp conn
this.SyncProducer.Close()
this.closed = true
}
示例12: ReadAll
func (f *File) ReadAll(ctx context.Context) ([]byte, error) {
f.RLock()
defer f.RUnlock()
log.Trace("File ReadAll, topic=%s, partitionId=%d", f.topic, f.partitionId)
return f.content, nil
}
示例13: Release
func (f *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
log.Trace("File Release, req=%#v, topic=%s, partitionId=%d", req,
f.topic, f.partitionId)
f.opened = false
close(f.closeCh)
f.content = make([]byte, 0, 16<<10)
return f.consumer.Close()
}
示例14: waitExit
func (this *subServer) waitExit(exit <-chan struct{}) {
<-exit
if this.httpServer != nil {
// HTTP response will have "Connection: close"
this.httpServer.SetKeepAlivesEnabled(false)
// avoid new connections
if err := this.httpListener.Close(); err != nil {
log.Error(err.Error())
}
log.Trace("%s on %s listener closed", this.name, this.httpServer.Addr)
}
if this.httpsServer != nil {
// HTTP response will have "Connection: close"
this.httpsServer.SetKeepAlivesEnabled(false)
// avoid new connections
if err := this.httpsListener.Close(); err != nil {
log.Error(err.Error())
}
log.Trace("%s on %s listener closed", this.name, this.httpsServer.Addr)
}
this.idleConnsLock.Lock()
t := time.Now().Add(time.Millisecond * 100)
for c := range this.idleConns {
c.SetReadDeadline(t)
}
this.idleConnsLock.Unlock()
if this.idleConnsWg.WaitTimeout(Options.SubTimeout) {
log.Warn("%s waiting for all connected client close timeout: %s",
this.name, Options.SubTimeout)
}
this.subMetrics.Flush()
this.timer.Stop()
this.gw.wg.Done()
close(this.closed)
}
示例15: Warmup
func (this *ClientPool) Warmup() {
t1 := time.Now()
for _, client := range this.clients {
client.Warmup()
}
log.Trace("Memcache pool warmup within %s: %+v",
time.Since(t1), this.FreeConnMap())
}