本文整理汇总了Golang中github.com/cihub/seelog.Debugf函数的典型用法代码示例。如果您正苦于以下问题:Golang Debugf函数的具体用法?Golang Debugf怎么用?Golang Debugf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Debugf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestSTreeMod
func TestSTreeMod(t *testing.T) {
defer log.Flush()
Convey("Test clone\n", t, func() {
s, err := NewSTreeJson(strings.NewReader(`{"key1": "val1", "key.2": 1234, "key3": {"key4": true, "key5": -12.34}}`))
So(err, ShouldBeNil)
c, err := s.clone()
So(err, ShouldBeNil)
s["key1"] = "valMod"
s3, err := s.STreeVal(".key3")
s3["key4"] = false
log.Debugf("Test clone - s: %v", s)
log.Debugf("Test clone - c: %v", c)
v1, err := c.StrVal(".key1")
So(err, ShouldBeNil)
So(v1, ShouldEqual, "val1")
v2, err := c.BoolVal(".key3.key4")
So(err, ShouldBeNil)
So(v2, ShouldBeTrue)
})
}
示例2: Rescind
// Rescind should be called to indicate you no longer wish to be the leader
func (rl *regionLeader) Rescind() {
rl.cleanup.Do(func() {
log.Debugf("[Sync:RegionLeader] Cleaning up leadership of '%v'...", rl.lockNode)
close(rl.rescinded)
// keep trying to delete the ZK node (to release leadership) until we're sure it doesn't exist
for {
err := zookeeper.Delete(rl.lockNode, -1)
if err == nil || err == gozk.ErrNoNode {
log.Debugf("[Sync:RegionLeader] Have deleted leadership node '%v'", rl.lockNode)
inst.Counter(1.0, "sync.regionleader.rescinded")
break
}
log.Warnf("[Sync:RegionLeader] Failed to cleanup/rescind leadership (will retry): %v", err)
time.Sleep(cleanupDelay)
}
// Unregister region leader
mu.Lock()
for i := 0; i < len(rls); i++ {
if rls[i] == rl {
rls = append(rls[:i], rls[i+1:]...)
break
}
}
mu.Unlock()
})
}
示例3: getOffsetsForPartition
func (stormClient *StormClient) getOffsetsForPartition(consumerGroup string, partition int, partitionPath string) {
zkNodeStat := &zk.Stat{}
stateStr, zkNodeStat, err := stormClient.conn.Get(partitionPath)
switch {
case err == nil:
offset, topic, errConversion := parseStormSpoutStateJson(string(stateStr))
if (stormClient.app.Storage.topicBlacklist != nil) && stormClient.app.Storage.topicBlacklist.MatchString(topic) {
log.Debugf("Skip checking Storn offsets for topic %s from group %s in cluster %s as topic has been blacklisted", topic, consumerGroup, stormClient.cluster)
return
}
switch {
case errConversion == nil:
log.Debugf("About to sync Storm offset: [%s,%s,%v]::[%v,%v]\n", consumerGroup, topic, partition, offset, zkNodeStat.Mtime)
partitionOffset := &PartitionOffset{
Cluster: stormClient.cluster,
Topic: topic,
Partition: int32(partition),
Group: consumerGroup,
Timestamp: int64(zkNodeStat.Mtime), // note: this is millis
Offset: int64(offset),
}
timeoutSendOffset(stormClient.app.Storage.offsetChannel, partitionOffset, 1)
default:
log.Errorf("Something is very wrong! Cannot parse state json for partition %v of consumer group %s in ZK path %s: %s. Error: %v",
partition, consumerGroup, partitionPath, stateStr, errConversion)
}
default:
log.Warnf("Failed to read data for partition %v of consumer group %s in ZK path %s. Error: %v", partition, consumerGroup, partitionPath, err)
}
}
示例4: validateClientScheme
func validateClientScheme(pBuffer []byte, scheme int) (result bool, schem int, challenge []byte, digest []byte) {
digest_offset := -1
challenge_offset := -1
if scheme == 0 {
digest_offset = getDigestOffset0(pBuffer)
challenge_offset = getDHOffset0(pBuffer)
} else if scheme == 1 {
digest_offset = getDigestOffset1(pBuffer)
challenge_offset = getDHOffset1(pBuffer)
}
p1 := pBuffer[:digest_offset]
digest = pBuffer[digest_offset : digest_offset+32]
p2 := pBuffer[digest_offset+32:]
buf := new(bytes.Buffer)
buf.Write(p1)
buf.Write(p2)
p := buf.Bytes()
log.Debugf("Scheme: {%v} client digest offset: {%v}", scheme, digest_offset)
tempHash, _ := HMACsha256(p, GENUINE_FP_KEY[:30])
log.Debugf("Temp: {%0X}", tempHash)
log.Debugf("Dig : {%0X}", digest)
result = bytes.Compare(digest, tempHash) == 0
challenge = pBuffer[challenge_offset : challenge_offset+128]
schem = scheme
return
}
示例5: read_file
func (d *DocWalker) read_file(path string, info os.FileInfo, err error) error {
if info.Mode().IsRegular() {
file := filepath.Base(path)
log.Debugf("Trying file %s", file)
matched, err := regexp.MatchString(d.filepattern, file)
log.Debugf("File match: %v, error: %v", matched, err)
if matched && err == nil {
fr := new(filereader.TrecFileReader)
fr.Init(path)
go func() {
for doc := range fr.ReadAll() {
d.output <- doc
}
d.workers <- fr.Path()
return
}()
d.worker_count += 1
/*log.Errorf("Now have %d workers", d.worker_count)*/
}
}
return nil
}
示例6: HandlePublish
// FIXME: support qos = 2
func HandlePublish(mqtt *Mqtt, conn *net.Conn, client **ClientRep) {
if *client == nil {
panic("client_resp is nil, that means we don't have ClientRep for this client sending PUBLISH")
return
}
client_id := (*client).Mqtt.ClientId
client_rep := *client
client_rep.UpdateLastTime()
topic := mqtt.TopicName
payload := string(mqtt.Data)
qos := mqtt.FixedHeader.QosLevel
retain := mqtt.FixedHeader.Retain
message_id := mqtt.MessageId
timestamp := time.Now().Unix()
log.Debugf("Handling PUBLISH, client_id: %s, topic:(%s), payload:(%s), qos=%d, retain=%t, message_id=%d",
client_id, topic, payload, qos, retain, message_id)
// Create new MQTT message
mqtt_msg := CreateMqttMessage(topic, payload, client_id, qos, message_id, timestamp, retain)
msg_internal_id := mqtt_msg.InternalId
log.Debugf("Created new MQTT message, internal id:(%s)", msg_internal_id)
PublishMessage(mqtt_msg)
// Send PUBACK if QOS is 1
if qos == 1 {
SendPuback(message_id, conn, client_rep.WriteLock)
log.Debugf("PUBACK sent to client(%s)", client_id)
}
}
示例7: RetryDeliver
func RetryDeliver(sleep uint64, dest_client_id string, qos uint8, msg *MqttMessage) {
defer func() {
if r := recover(); r != nil {
log.Debugf("got panic, will print stack")
debug.PrintStack()
panic(r)
}
}()
if sleep > 3600*4 {
log.Debugf("too long retry delay(%s), abort retry deliver", sleep)
return
}
time.Sleep(time.Duration(sleep) * time.Second)
if G_redis_client.IsFlyingMessagePendingAck(dest_client_id, msg.MessageId) {
DeliverMessage(dest_client_id, qos, msg)
log.Debugf("Retried delivering message %s:%d, will sleep %d seconds before next attampt",
dest_client_id, msg.MessageId, sleep*2)
RetryDeliver(sleep*2, dest_client_id, qos, msg)
} else {
log.Debugf("message (%s:%d) is not pending ACK, stop retry delivering",
dest_client_id, msg.MessageId)
}
}
示例8: Tokens
func (tz *BadXMLTokenizer) Tokens() <-chan *Token {
token_channel := make(chan *Token)
log.Debugf("Created channel %v as part of Tokens(), with"+
" Scanner = %v", token_channel, tz)
go func(ret chan *Token, tz *BadXMLTokenizer) {
for {
log.Tracef("Scanner calling Next()")
tok, err := tz.Next()
log.Tracef("scanner.Next() returned %s, %v", tok, err)
switch err {
case nil:
log.Debugf("Pushing %s into token channel %v",
tok, ret)
ret <- tok
case io.EOF:
log.Debugf("received EOF, closing channel")
close(ret)
log.Debugf("Closed.")
log.Flush()
return
panic("I should have exited the goroutine but " +
"didn't")
}
}
}(token_channel, tz)
return token_channel
}
示例9: handleAction
func (this *Orchestrator) handleAction(action Action) {
var err error = nil
ocSideOnly := false
ocSide, ocSideOk := action.(OrchestratorSideAction)
action.SetTriggeredTime(time.Now())
log.Debugf("action %s is executable on the orchestrator side: %t", action, ocSideOk)
if ocSideOk {
ocSideOnly = ocSide.OrchestratorSideOnly()
log.Debugf("action %s is executable on only the orchestrator side: %t", action, ocSideOnly)
err = ocSide.ExecuteOnOrchestrator()
if err != nil {
log.Errorf("ignoring an error occurred while ExecuteOnOrchestrator: %s", err)
}
}
if !ocSideOnly {
// pass to the inspector handler.
entity := GetTransitionEntity(action.EntityID())
if entity == nil {
err = fmt.Errorf("could find entity %s for %s", action.EntityID(), action)
log.Errorf("ignoring an error: %s", err)
} else {
log.Debugf("Main[%s]->Handler: sending an action %s", entity.ID, action)
entity.ActionFromMain <- action
log.Debugf("Main[%s]->Handler: sent an action %s", entity.ID, action)
}
}
// make sequence for tracing
if this.collectTrace {
this.actionSequence = append(this.actionSequence, action)
}
}
示例10: Boot
func Boot(client *docker.Client, opt *docker.CreateContainerOptions,
exitCh chan error) (*docker.Container, error) {
log.Debugf("Creating container for image %s", opt.Config.Image)
container, err := client.CreateContainer(*opt)
if err != nil {
return container, err
}
log.Debugf("Starting container %s", container.ID)
go func() {
exitCh <- dockerpty.Start(client, container, opt.HostConfig)
}()
trial := 0
for {
container, err = client.InspectContainer(container.ID)
if container.State.StartedAt.Unix() > 0 {
break
}
if trial > 30 {
return container, fmt.Errorf("container %s seems not started. state=%#v", container.ID, container.State)
}
trial += 1
time.Sleep(time.Duration(trial*100) * time.Millisecond)
}
log.Debugf("container state=%#v", container.State)
return container, nil
}
示例11: getBytesWithTTL
// getBytesWithTTL - get the path, and cache in the session
// return from cache is found and the ttl isn't expired, otherwise get it and
// store it in cache
func (to *Session) getBytesWithTTL(path string, ttl int64) ([]byte, error) {
var body []byte
var err error
getFresh := false
if cacheEntry, ok := to.Cache[path]; ok {
if cacheEntry.Entered > time.Now().Unix()-ttl {
seelog.Debugf("Cache HIT for %s%s", to.URL, path)
body = cacheEntry.Bytes
} else {
seelog.Debugf("Cache HIT but EXPIRED for %s%s", to.URL, path)
getFresh = true
}
} else {
to.Cache = make(map[string]CacheEntry)
seelog.Debugf("Cache MISS for %s%s", to.URL, path)
getFresh = true
}
if getFresh {
body, err = to.getBytes(path)
if err != nil {
return nil, err
}
newEntry := CacheEntry{
Entered: time.Now().Unix(),
Bytes: body,
}
to.Cache[path] = newEntry
}
return body, nil
}
示例12: listenForMetrics
func (m *MetricsManager) listenForMetrics() {
if m.statsdEnabled {
defer m.statsBuffer.Close()
}
var metric *Metric
for {
metric = <-m.metricsChannel
log.Debugf("Received metric: %s - %v", metric.Name, metric.Value)
if m.statsdEnabled {
log.Debugf("Logging metrics")
switch metric.Type {
case "counter":
m.statsBuffer.Incr(metric.Name, metric.Value)
case "guage":
m.statsBuffer.Gauge(metric.Name, metric.Value)
case "timing":
m.statsBuffer.Timing(metric.Name, metric.Value)
default:
log.Errorf("Unknown metric type received: %s", metric.Type)
}
}
stringToPublish := fmt.Sprintf("%s:%d", metric.Name, metric.Value)
messageHeaders := make(map[string]string)
messageBody := []byte(stringToPublish)
metricMessage := message.NewMessage(&messageHeaders, &messageBody)
m.queueManager.Publish(metricsQueueName, metricMessage)
}
}
示例13: Connect
func (r *RabbitConnection) Connect(connected chan bool) {
for {
log.Debug("[Rabbit] Attempting to connect…")
if err := r.tryToConnect(); err != nil {
sleepFor := time.Second
log.Debugf("[Rabbit] Failed to connect, sleeping %s…", sleepFor.String())
time.Sleep(sleepFor)
continue
}
connected <- true
r.connected = true
notifyClose := make(chan *amqp.Error)
r.Connection.NotifyClose(notifyClose)
// Block until we get disconnected, or shut down
select {
case err := <-notifyClose:
r.connected = false
log.Debugf("[Rabbit] AMQP connection closed (notifyClose): %s", err.Error())
return
case <-r.closeChan:
// Shut down connection
log.Debug("[Rabbit] Closing AMQP connection (closeChan closed)…")
if err := r.Connection.Close(); err != nil {
log.Errorf("Failed to close AMQP connection: %v", err)
}
r.connected = false
return
}
}
}
示例14: collect
func (container *StatsContainer) collect() {
dockerID := container.containerMetadata.DockerID
for {
select {
case <-container.ctx.Done():
seelog.Debugf("Stopping stats collection for container %s", dockerID)
return
default:
seelog.Debugf("Collecting stats for container %s", dockerID)
dockerStats, err := container.client.Stats(dockerID, container.ctx)
if err != nil {
seelog.Warnf("Error retrieving stats for container %s: %v", dockerID, err)
continue
}
for rawStat := range dockerStats {
stat, err := dockerStatsToContainerStats(rawStat)
if err == nil {
container.statsQueue.Add(stat)
} else {
seelog.Warnf("Error converting stats for container %s: %v", dockerID, err)
}
}
seelog.Debugf("Disconnected from docker stats for container %s", dockerID)
}
}
}
示例15: pushMessage
func pushMessage(appId string, app *RegApp, rawMsg *storage.RawMessage, header *Header, body []byte) bool {
//if len(app.SendIds) != 0 {
// regapp with sendids
log.Infof("msgid %d: before push to (device %s) (regid %s)", rawMsg.MsgId, app.DevId, app.RegId)
if rawMsg.SendId != "" {
found := false
for _, sendid := range app.SendIds {
if sendid == rawMsg.SendId {
found = true
break
}
}
if !found {
log.Debugf("msgid %d: check sendid (%s) failed", rawMsg.MsgId, rawMsg.SendId)
return false
}
}
x := DevicesMap.Get(app.DevId)
if x == nil {
log.Debugf("msgid %d: device %s offline", rawMsg.MsgId, app.DevId)
return false
}
client := x.(*Client)
client.SendMessage2(header, body)
log.Infof("msgid %d: after push to (device %s) (regid %s)", rawMsg.MsgId, app.DevId, app.RegId)
storage.Instance.MsgStatsSend(rawMsg.MsgId)
storage.Instance.AppStatsSend(rawMsg.AppId)
return true
}