本文整理匯總了Golang中github.com/funkygao/log4go.Warn函數的典型用法代碼示例。如果您正苦於以下問題:Golang Warn函數的具體用法?Golang Warn怎麽用?Golang Warn使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Warn函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: createJobHandler
// @rest POST /v1/jobs/:appid/:topic/:ver
func (this *manServer) createJobHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
topic := params.ByName(UrlParamTopic)
if !manager.Default.ValidateTopicName(topic) {
log.Warn("illegal topic: %s", topic)
writeBadRequest(w, "illegal topic")
return
}
realIp := getHttpRemoteIp(r)
if !this.throttleAddTopic.Pour(realIp, 1) {
writeQuotaExceeded(w)
return
}
hisAppid := params.ByName(UrlParamAppid)
appid := r.Header.Get(HttpHeaderAppid)
pubkey := r.Header.Get(HttpHeaderPubkey)
ver := params.ByName(UrlParamVersion)
if !manager.Default.AuthAdmin(appid, pubkey) {
log.Warn("suspicous create job %s(%s) {appid:%s pubkey:%s topic:%s ver:%s}",
r.RemoteAddr, realIp, appid, pubkey, topic, ver)
writeAuthFailure(w, manager.ErrAuthenticationFail)
return
}
cluster, found := manager.Default.LookupCluster(hisAppid)
if !found {
log.Error("create job %s(%s) {appid:%s topic:%s ver:%s} invalid appid",
r.RemoteAddr, realIp, hisAppid, topic, ver)
writeBadRequest(w, "invalid appid")
return
}
log.Info("create job[%s] %s(%s) {appid:%s topic:%s ver:%s}",
appid, r.RemoteAddr, realIp, hisAppid, topic, ver)
rawTopic := manager.Default.KafkaTopic(hisAppid, topic, ver)
if err := job.Default.CreateJobQueue(Options.AssignJobShardId, hisAppid, rawTopic); err != nil {
log.Error("create job[%s] %s(%s) {shard:%d appid:%s topic:%s ver:%s} %v",
appid, r.RemoteAddr, realIp, Options.AssignJobShardId, hisAppid, topic, ver, err)
writeServerError(w, err.Error())
return
}
if err := this.gw.zkzone.CreateJobQueue(rawTopic, cluster); err != nil {
log.Error("app[%s] %s(%s) create job: {shard:%d appid:%s topic:%s ver:%s} %v",
appid, r.RemoteAddr, realIp, Options.AssignJobShardId, hisAppid, topic, ver, err)
writeServerError(w, err.Error())
return
}
w.WriteHeader(http.StatusCreated)
w.Write(ResponseOk)
}
示例2: RunForever
func (this *controller) RunForever() (err error) {
log.Info("controller[%s] starting", this.Id())
if err = this.orchestrator.RegisterActor(this.Id(), this.Bytes()); err != nil {
return err
}
defer this.orchestrator.ResignActor(this.Id())
if err = manager.Default.Start(); err != nil {
return
}
log.Trace("manager[%s] started", manager.Default.Name())
go this.runWebServer()
jobDispatchQuit := make(chan struct{})
go this.dispatchJobQueues(jobDispatchQuit)
webhookDispatchQuit := make(chan struct{})
go this.dispatchWebhooks(webhookDispatchQuit)
select {
case <-jobDispatchQuit:
log.Warn("dispatchJobQueues quit")
case <-webhookDispatchQuit:
log.Warn("dispatchWebhooks quit")
}
manager.Default.Stop()
log.Trace("manager[%s] stopped", manager.Default.Name())
return
}
示例3: frequentOffsetCommit
func (this *WatchConsumers) frequentOffsetCommit() (n int64) {
const frequentThreshold = time.Second * 10
this.Zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
for group, consumers := range zkcluster.ConsumersByGroup("") {
for _, c := range consumers {
if !c.Online {
continue
}
if c.ConsumerZnode == nil {
log.Warn("cluster[%s] group[%s] topic[%s/%s] unrecognized consumer", zkcluster.Name(), group, c.Topic, c.PartitionId)
continue
}
gtp := structs.GroupTopicPartition{Group: group, Topic: c.Topic, PartitionID: c.PartitionId}
if t, present := this.offsetMtimeMap[gtp]; present {
if interval := c.Mtime.Time().Sub(t); interval < frequentThreshold {
if this.logFrequentConsumer {
log.Warn("cluster[%s] group[%s] topic[%s/%s] too frequent offset commit: %s", zkcluster.Name(), group, c.Topic, c.PartitionId, interval)
}
n++
}
}
this.offsetMtimeMap[gtp] = c.Mtime.Time()
}
}
})
return
}
示例4: Exec
func (this *mysql) Exec(query string, args ...interface{}) (afftectedRows int64,
lastInsertId int64, err error) {
if this.db == nil {
return 0, 0, ErrNotOpen
}
if this.breaker.Open() {
return 0, 0, ErrCircuitOpen
}
var result sql.Result
result, err = this.db.Exec(query, args...)
if err != nil {
if this.isSystemError(err) {
log.Warn("mysql exec breaks: %s", err.Error())
this.breaker.Fail()
}
return 0, 0, err
}
afftectedRows, err = result.RowsAffected()
if err != nil {
if this.isSystemError(err) {
log.Warn("mysql exec2 breaks: %s", err.Error())
this.breaker.Fail()
}
} else {
this.breaker.Succeed()
}
lastInsertId, _ = result.LastInsertId()
return
}
示例5: watchTopicPartitionsChange
// watchTopicPartitionsChange watch partition changes on a topic.
func (cg *ConsumerGroup) watchTopicPartitionsChange(topic string, stopper <-chan struct{},
topicPartitionsChanged chan<- string, outstanding *sync.WaitGroup) {
defer outstanding.Done()
_, ch, err := cg.kazoo.Topic(topic).WatchPartitions()
if err != nil {
if err == zk.ErrNoNode {
err = ErrInvalidTopic
}
log.Error("[%s/%s] topic[%s] watch partitions: %s", cg.group.Name, cg.shortID(), topic, err)
cg.emitError(err, topic, -1)
return
}
var (
backoff = time.Duration(5)
maxRetries = 3
)
select {
case <-cg.stopper:
return
case <-stopper:
return
case <-ch:
// when partitions scales up, the zk node might not be completely ready, await it ready
//
// even if zk node ready, kafka broker might not be ready:
// kafka server: Request was for a topic or partition that does not exist on this broker
// so we blindly wait: should be enough for most cases
// in rare cases, that is still not enough: imagine partitions 1->1000, which takes long
// ok, just return that err to client to retry
time.Sleep(time.Second * backoff)
for retries := 0; retries < maxRetries; retries++ {
// retrieve brokers/topics/{topic}/partitions/{partition}/state and find the leader broker id
// the new partitions state znode might not be ready yet
if partitions, err := cg.kazoo.Topic(topic).Partitions(); err == nil {
if _, err = retrievePartitionLeaders(partitions); err == nil {
log.Debug("[%s/%s] topic[%s] partitions change complete", cg.group.Name, cg.shortID(), topic)
break
} else {
log.Warn("[%s/%s] topic[%s] partitions change retry#%d waiting: %v", cg.group.Name, cg.shortID(), topic, retries, err)
backoff-- // don't worry if negative
time.Sleep(time.Second * backoff)
}
} else {
log.Warn("[%s/%s] topic[%s] partitions change retry#%d waiting: %v", cg.group.Name, cg.shortID(), topic, retries, err)
backoff--
time.Sleep(time.Second * backoff)
}
}
// safe to trigger rebalance
select {
case topicPartitionsChanged <- topic:
default:
}
}
}
示例6: subLags
func (this *WatchSub) subLags() (lags int) {
now := time.Now()
// find sub lags
for _, zkcluster := range this.zkclusters {
for group, consumers := range zkcluster.ConsumersByGroup("") {
for _, c := range consumers {
if !c.Online {
continue
}
if c.ConsumerZnode == nil {
log.Warn("cluster[%s] group[%s] topic[%s/%s] unrecognized consumer", zkcluster.Name(), group, c.Topic, c.PartitionId)
continue
}
if time.Since(c.ConsumerZnode.Uptime()) < time.Minute*2 {
log.Info("cluster[%s] group[%s] just started, topic[%s/%s]", zkcluster.Name(), group, c.Topic, c.PartitionId)
this.unsuspect(group, c.Topic, c.PartitionId)
continue
}
// offset commit every 1m, sublag runs every 1m, so the gap might be 2m
// TODO lag too much, even if it's still alive, emit alarm
elapsed := time.Since(c.Mtime.Time())
if c.Lag == 0 || elapsed < time.Minute*3 {
this.unsuspect(group, c.Topic, c.PartitionId)
continue
}
// it might be lagging, but need confirm with last round
if !this.isSuspect(group, c.Topic, c.PartitionId) {
// suspect it, next round if it is still lagging, put on trial
log.Warn("cluster[%s] group[%s] suspected topic[%s/%s] %d - %d = %d, offset commit elapsed: %s",
zkcluster.Name(), group, c.Topic, c.PartitionId, c.ProducerOffset, c.ConsumerOffset, c.Lag, elapsed.String())
this.suspect(group, c.Topic, c.PartitionId, c.ProducerOffset, c.ConsumerOffset, now)
continue
}
if this.isCriminal(group, c.Topic, c.PartitionId, c.ProducerOffset, c.ConsumerOffset, now) {
// bingo! consumer is lagging and seems to be DEAD
log.Error("cluster[%s] group[%s] confirmed topic[%s/%s] %d - %d = %d, offset commit elapsed: %s",
zkcluster.Name(), group, c.Topic, c.PartitionId, c.ProducerOffset, c.ConsumerOffset, c.Lag, elapsed.String())
lags++
} else {
log.Warn("cluster[%s] group[%s] lagging but still alive topic[%s/%s] %d - %d = %d, offset commit elapsed: %s",
zkcluster.Name(), group, c.Topic, c.PartitionId, c.ProducerOffset, c.ConsumerOffset, c.Lag, elapsed.String())
}
}
}
}
return
}
示例7: get
func (this *ResourcePool) get(wait bool) (resource Resource, err error) {
if this == nil || this.IsClosed() {
return nil, CLOSED_ERR
}
var (
wrapper resourceWrapper
stillOpen bool
)
select {
case wrapper, stillOpen = <-this.resourcePool:
if !stillOpen {
return nil, CLOSED_ERR
}
this.waitCount.Set(0) // reset
if wrapper.resource != nil {
this.diagnosticTracker.BorrowResource(wrapper.resource)
}
default:
if !wait {
return nil, nil
}
this.waitCount.Add(1)
log.Warn("ResourcePool[%s] busy, pending:%d waited:%s",
this.name, this.WaitCount(), this.waitTime.Get())
t1 := time.Now()
wrapper = <-this.resourcePool
this.waitTime.Add(time.Now().Sub(t1))
}
// Close the aged idle resource
timeout := this.idleTimeout.Get()
if wrapper.resource != nil && timeout > 0 &&
wrapper.timeUsed.Add(timeout).Sub(time.Now()) < 0 {
this.diagnosticTracker.ReturnResource(wrapper.resource)
log.Warn("ResourcePool[%s] resource:%d idle too long: closed", this.name,
wrapper.resource.Id())
wrapper.resource.Close()
wrapper.resource = nil
}
if wrapper.resource == nil {
wrapper.resource, err = this.factory()
if err != nil {
this.resourcePool <- resourceWrapper{}
} else {
this.diagnosticTracker.BorrowResource(wrapper.resource)
}
}
return wrapper.resource, err
}
示例8: main
func (this *Start) main() {
ctx.LoadFromHome()
this.zkzone = zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone)))
zkConnEvt, ok := this.zkzone.SessionEvents()
if !ok {
panic("someone stealing my events")
}
registry.Default = zkr.New(this.zkzone)
log.Info("ehaproxy[%s] starting...", gafka.BuildId)
go this.runMonitorServer(this.httpAddr)
zkConnected := false
for {
instances, instancesChange, err := registry.Default.WatchInstances()
if err != nil {
log.Error("zone[%s] %s", this.zkzone.Name(), err)
time.Sleep(time.Second)
continue
}
if zkConnected {
if len(instances) > 0 {
this.reload(instances)
} else {
// resilience to zk problem by local cache
log.Warn("backend all shutdown? skip this change")
time.Sleep(time.Second)
continue
}
}
select {
case <-this.quitCh:
return
case evt := <-zkConnEvt:
if evt.State == zklib.StateHasSession && !zkConnected {
log.Info("zk connected")
zkConnected = true
} else if zkConnected {
log.Warn("zk jitter: %+v", evt)
}
case <-instancesChange:
log.Info("instances changed!!")
}
}
}
示例9: processConsumerOffsetsMessage
// consume topic: __consumer_offsets and process the message to get offsets of consumers
func (this *ZkCluster) processConsumerOffsetsMessage(msg *sarama.ConsumerMessage) {
var keyver, valver uint16
var partition uint32
var offset, timestamp uint64
buf := bytes.NewBuffer(msg.Key)
err := binary.Read(buf, binary.BigEndian, &keyver)
if (err != nil) || ((keyver != 0) && (keyver != 1)) {
log.Warn("Failed to decode %s:%v offset %v: keyver", msg.Topic, msg.Partition, msg.Offset)
return
}
group, err := readString(buf)
if err != nil {
log.Warn("Failed to decode %s:%v offset %v: group", msg.Topic, msg.Partition, msg.Offset)
return
}
topic, err := readString(buf)
if err != nil {
log.Warn("Failed to decode %s:%v offset %v: topic", msg.Topic, msg.Partition, msg.Offset)
return
}
err = binary.Read(buf, binary.BigEndian, &partition)
if err != nil {
log.Warn("Failed to decode %s:%v offset %v: partition", msg.Topic, msg.Partition, msg.Offset)
return
}
buf = bytes.NewBuffer(msg.Value)
err = binary.Read(buf, binary.BigEndian, &valver)
if (err != nil) || ((valver != 0) && (valver != 1)) {
log.Warn("Failed to decode %s:%v offset %v: valver", msg.Topic, msg.Partition, msg.Offset)
return
}
err = binary.Read(buf, binary.BigEndian, &offset)
if err != nil {
log.Warn("Failed to decode %s:%v offset %v: offset", msg.Topic, msg.Partition, msg.Offset)
return
}
_, err = readString(buf)
if err != nil {
log.Warn("Failed to decode %s:%v offset %v: metadata", msg.Topic, msg.Partition, msg.Offset)
return
}
err = binary.Read(buf, binary.BigEndian, ×tamp)
if err != nil {
log.Warn("Failed to decode %s:%v offset %v: timestamp", msg.Topic, msg.Partition, msg.Offset)
return
}
partitionOffset := &PartitionOffset{
Cluster: this.Name(),
Topic: topic,
Partition: int32(partition),
Group: group,
Timestamp: int64(timestamp),
Offset: int64(offset),
}
log.Debug("%+v", partitionOffset)
return
}
示例10: Query
func (this *mysql) Query(query string, args ...interface{}) (rows *sql.Rows,
err error) {
if this.db == nil {
return nil, ErrNotOpen
}
if this.breaker.Open() {
return nil, ErrCircuitOpen
}
var stmt *sql.Stmt = nil
if this.stmtsStore != nil {
if stmtc, present := this.stmtsStore.Get(query); present {
stmt = stmtc.(*sql.Stmt)
} else {
// FIXME thundering hurd
stmt, err = this.db.Prepare(query)
if err != nil {
if this.isSystemError(err) {
log.Warn("mysql prepare breaks: %s", err.Error())
this.breaker.Fail()
}
return nil, err
}
this.mutex.Lock()
this.stmtsStore.Set(query, stmt)
this.mutex.Unlock()
log.Debug("[%s] stmt[%s] open", this.dsn, query)
}
}
// Under the hood, db.Query() actually prepares, executes, and closes
// a prepared statement. That's three round-trips to the database.
if stmt != nil {
rows, err = stmt.Query(args...)
} else {
rows, err = this.db.Query(query, args...)
}
if err != nil {
if this.isSystemError(err) {
log.Warn("mysql query breaks: %s", err.Error())
this.breaker.Fail()
}
} else {
this.breaker.Succeed()
}
return
}
示例11: dumpMaintainConfigPhp
func dumpMaintainConfigPhp(info []string) {
if config.maintainTargetFile == "" ||
config.maintainTemplateFile == "" ||
maintainTemplateContents == "" {
log.Warn("Invalid maintain conf, disabled")
return
}
templateData := make(map[string]string)
for _, s := range info {
// s is like "kingdom_1:30"
parts := strings.SplitN(s, ":", 2)
templateData[parts[0]] = parts[1]
}
t := template.Must(template.New("maintain").Parse(maintainTemplateContents))
wr := new(bytes.Buffer)
t.Execute(wr, templateData)
err := ioutil.WriteFile(config.maintainTargetFile, wr.Bytes(), 0644)
if err != nil {
log.Error("dump[%s]: %s", config.maintainTargetFile, err.Error())
} else {
log.Info("dumped[%s]: %+v", config.maintainTargetFile, templateData)
}
}
示例12: RefreshBrokerList
// TODO from live meta or zk?
func (this *pubPool) RefreshBrokerList(brokerList []string) {
if len(brokerList) == 0 {
if len(this.brokerList) > 0 {
log.Warn("%s meta store found empty broker list, refresh refused", this.cluster)
}
return
}
setOld, setNew := set.NewSet(), set.NewSet()
for _, b := range this.brokerList {
setOld.Add(b)
}
for _, b := range brokerList {
setNew.Add(b)
}
if !setOld.Equal(setNew) {
log.Info("%s broker list from %+v to %+v", this.cluster, this.brokerList, brokerList)
// rebuild the kafka conn pool
this.brokerList = brokerList
this.Close()
this.buildPools()
}
}
示例13: TopicPartitions
func (this *zkMetaStore) TopicPartitions(cluster, topic string) []int32 {
ct := structs.ClusterTopic{Cluster: cluster, Topic: topic}
this.pmapLock.RLock()
if partitionIDs, present := this.partitionsMap[ct]; present {
this.pmapLock.RUnlock()
return partitionIDs
}
this.pmapLock.RUnlock()
this.pmapLock.Lock()
defer this.pmapLock.Unlock()
// double check
if partitionIDs, present := this.partitionsMap[ct]; present {
return partitionIDs
}
// cache miss
this.mu.RLock()
c, ok := this.clusters[cluster]
this.mu.RUnlock()
if !ok {
log.Warn("invalid cluster: %s", cluster)
return nil
}
partitionIDs := c.Partitions(topic)
// set cache
this.partitionsMap[ct] = partitionIDs
return partitionIDs
}
示例14: dumpFaeConfigPhp
func dumpFaeConfigPhp(servers []string) {
if config.faeTargetFile == "" ||
config.faeTemplateFile == "" ||
faeTemplateContents == "" {
log.Warn("Invalid fae conf, disabled")
return
}
type tempateVar struct {
Servers []string
Ports []string
}
templateData := tempateVar{Servers: make([]string, 0), Ports: make([]string, 0)}
for _, s := range servers {
// s is like "12.3.11.2:9001"
parts := strings.SplitN(s, ":", 2)
templateData.Servers = append(templateData.Servers, parts[0])
templateData.Ports = append(templateData.Ports, parts[1])
}
t := template.Must(template.New("fae").Parse(faeTemplateContents))
wr := new(bytes.Buffer)
t.Execute(wr, templateData)
err := ioutil.WriteFile(config.faeTargetFile, wr.Bytes(), 0644)
if err != nil {
log.Error("dump[%s]: %s", config.faeTargetFile, err.Error())
} else {
log.Info("dumped[%s]: %+v", config.faeTargetFile, templateData)
}
}
示例15: do
func (this *profiler) do(callName string, ctx *rpc.Context, format string,
args ...interface{}) {
if this == nil {
return
}
elapsed := time.Since(this.t1)
slow := elapsed > config.Engine.Servants.CallSlowThreshold
if !(slow || this.on) {
return
}
body := fmt.Sprintf(format, args...)
if slow {
svtStats.incCallSlow()
header := fmt.Sprintf("SLOW=%s/%s Q=%s ",
elapsed, time.Since(this.t0), callName)
log.Warn(header + this.truncatedStr(body))
} else if this.on {
header := fmt.Sprintf("T=%s/%s Q=%s ",
elapsed, time.Since(this.t0), callName)
log.Trace(header + this.truncatedStr(body))
}
}