本文整理汇总了Golang中github.com/prometheus/log.Warnf函数的典型用法代码示例。如果您正苦于以下问题:Golang Warnf函数的具体用法?Golang Warnf怎么用?Golang Warnf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Warnf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Append
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *model.Sample) {
for ln, lv := range sample.Metric {
if len(lv) == 0 {
delete(sample.Metric, ln)
}
}
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
// Switch locks.
s.fpLocker.Unlock(rawFP)
s.fpLocker.Lock(fp)
}
series := s.getOrCreateSeries(fp, sample.Metric)
if sample.Timestamp <= series.lastTime {
// Don't log and track equal timestamps, as they are a common occurrence
// when using client-side timestamps (e.g. Pushgateway or federation).
// It would be even better to also compare the sample values here, but
// we don't have efficient access to a series's last value.
if sample.Timestamp != series.lastTime {
log.Warnf("Ignoring sample with out-of-order timestamp for fingerprint %v (%v): %v is not after %v", fp, series.metric, sample.Timestamp, series.lastTime)
s.outOfOrderSamplesCount.Inc()
}
s.fpLocker.Unlock(fp)
return
}
completedChunksCount := series.add(&model.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
s.ingestedSamplesCount.Inc()
s.incNumChunksToPersist(completedChunksCount)
}
示例2: lookupAll
func lookupAll(name string, qtype uint16) (*dns.Msg, error) {
conf, err := dns.ClientConfigFromFile(resolvConf)
if err != nil {
return nil, fmt.Errorf("could not load resolv.conf: %s", err)
}
client := &dns.Client{}
response := &dns.Msg{}
for _, server := range conf.Servers {
servAddr := net.JoinHostPort(server, conf.Port)
for _, suffix := range conf.Search {
response, err = lookup(name, qtype, client, servAddr, suffix, false)
if err != nil {
log.Warnf("resolving %s.%s failed: %s", name, suffix, err)
continue
}
if len(response.Answer) > 0 {
return response, nil
}
}
response, err = lookup(name, qtype, client, servAddr, "", false)
if err == nil {
return response, nil
}
}
return response, fmt.Errorf("could not resolve %s: No server responded", name)
}
示例3: Append
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *clientmodel.Sample) {
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
// Switch locks.
s.fpLocker.Unlock(rawFP)
s.fpLocker.Lock(fp)
}
series := s.getOrCreateSeries(fp, sample.Metric)
completedChunksCount := series.add(&metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
s.ingestedSamplesCount.Inc()
s.incNumChunksToPersist(completedChunksCount)
}
示例4: refresh
func (dd *DNSDiscovery) refresh(name string, ch chan<- *config.TargetGroup) error {
response, err := lookupSRV(name)
dnsSDLookupsCount.Inc()
if err != nil {
dnsSDLookupFailuresCount.Inc()
return err
}
tg := &config.TargetGroup{}
for _, record := range response.Answer {
addr, ok := record.(*dns.SRV)
if !ok {
log.Warnf("%q is not a valid SRV record", record)
continue
}
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")
target := clientmodel.LabelValue(fmt.Sprintf("%s:%d", addr.Target, addr.Port))
tg.Targets = append(tg.Targets, clientmodel.LabelSet{
clientmodel.AddressLabel: target,
DNSNameLabel: clientmodel.LabelValue(name),
})
}
tg.Source = dnsSourcePrefix + ":" + name
ch <- tg
return nil
}
示例5: Store
// Store sends a batch of samples to OpenTSDB via its HTTP API.
func (c *Client) Store(samples clientmodel.Samples) error {
reqs := make([]StoreSamplesRequest, 0, len(samples))
for _, s := range samples {
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
log.Warnf("cannot send value %f to OpenTSDB, skipping sample %#v", v, s)
continue
}
metric := TagValue(s.Metric[clientmodel.MetricNameLabel])
reqs = append(reqs, StoreSamplesRequest{
Metric: metric,
Timestamp: s.Timestamp.Unix(),
Value: v,
Tags: tagsFromMetric(s.Metric),
})
}
u, err := url.Parse(c.url)
if err != nil {
return err
}
u.Path = putEndpoint
buf, err := json.Marshal(reqs)
if err != nil {
return err
}
resp, err := c.httpClient.Post(
u.String(),
contentTypeJSON,
bytes.NewBuffer(buf),
)
if err != nil {
return err
}
defer resp.Body.Close()
// API returns status code 204 for successful writes.
// http://opentsdb.net/docs/build/html/api_http/put.html
if resp.StatusCode == http.StatusNoContent {
return nil
}
// API returns status code 400 on error, encoding error details in the
// response content in JSON.
buf, err = ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
var r map[string]int
if err := json.Unmarshal(buf, &r); err != nil {
return err
}
return fmt.Errorf("failed to write %d samples to OpenTSDB, %d succeeded", r["failed"], r["success"])
}
示例6: Append
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *clientmodel.Sample) {
for ln, lv := range sample.Metric {
if len(lv) == 0 {
delete(sample.Metric, ln)
}
}
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
// Switch locks.
s.fpLocker.Unlock(rawFP)
s.fpLocker.Lock(fp)
}
series := s.getOrCreateSeries(fp, sample.Metric)
if sample.Timestamp <= series.lastTime {
s.fpLocker.Unlock(fp)
log.Warnf("Ignoring sample with out-of-order timestamp for fingerprint %v (%v): %v is not after %v", fp, series.metric, sample.Timestamp, series.lastTime)
s.outOfOrderSamplesCount.Inc()
return
}
completedChunksCount := series.add(&metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
s.ingestedSamplesCount.Inc()
s.incNumChunksToPersist(completedChunksCount)
}
示例7: queueAlertNotifications
func (m *Manager) queueAlertNotifications(rule *AlertingRule, timestamp clientmodel.Timestamp) {
activeAlerts := rule.ActiveAlerts()
if len(activeAlerts) == 0 {
return
}
notifications := make(notification.NotificationReqs, 0, len(activeAlerts))
for _, aa := range activeAlerts {
if aa.State != StateFiring {
// BUG: In the future, make AlertManager support pending alerts?
continue
}
// Provide the alert information to the template.
l := map[string]string{}
for k, v := range aa.Labels {
l[string(k)] = string(v)
}
tmplData := struct {
Labels map[string]string
Value clientmodel.SampleValue
}{
Labels: l,
Value: aa.Value,
}
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := "{{$labels := .Labels}}{{$value := .Value}}"
expand := func(text string) string {
tmpl := template.NewTemplateExpander(defs+text, "__alert_"+rule.Name(), tmplData, timestamp, m.queryEngine, m.externalURL.Path)
result, err := tmpl.Expand()
if err != nil {
result = err.Error()
log.Warnf("Error expanding alert template %v with data '%v': %v", rule.Name(), tmplData, err)
}
return result
}
notifications = append(notifications, ¬ification.NotificationReq{
Summary: expand(rule.summary),
Description: expand(rule.description),
Runbook: rule.runbook,
Labels: aa.Labels.Merge(clientmodel.LabelSet{
alertNameLabel: clientmodel.LabelValue(rule.Name()),
}),
Value: aa.Value,
ActiveSince: aa.ActiveSince.Time(),
RuleString: rule.String(),
GeneratorURL: m.externalURL.String() + strutil.GraphLinkForExpression(rule.vector.String()),
})
}
m.notificationHandler.SubmitReqs(notifications)
}
示例8: handle
// handle recursively applies the handler h to the nodes in the subtree
// represented by node.
func (srvs services) handle(node *etcd.Node, h func(*etcd.Node)) {
if pathPat.MatchString(node.Key) {
h(node)
} else {
log.Warnf("unhandled key %q", node.Key)
}
if node.Dir {
for _, n := range node.Nodes {
srvs.handle(n, h)
}
}
}
示例9: runIteration
func (m *Manager) runIteration() {
now := clientmodel.Now()
wg := sync.WaitGroup{}
m.Lock()
rulesSnapshot := make([]Rule, len(m.rules))
copy(rulesSnapshot, m.rules)
m.Unlock()
for _, rule := range rulesSnapshot {
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
start := time.Now()
vector, err := rule.eval(now, m.queryEngine)
duration := time.Since(start)
if err != nil {
evalFailures.Inc()
log.Warnf("Error while evaluating rule %q: %s", rule, err)
return
}
switch r := rule.(type) {
case *AlertingRule:
m.queueAlertNotifications(r, now)
evalDuration.WithLabelValues(ruleTypeAlerting).Observe(
float64(duration / time.Millisecond),
)
case *RecordingRule:
evalDuration.WithLabelValues(ruleTypeRecording).Observe(
float64(duration / time.Millisecond),
)
default:
panic(fmt.Errorf("Unknown rule type: %T", rule))
}
for _, s := range vector {
m.sampleAppender.Append(&clientmodel.Sample{
Metric: s.Metric.Metric,
Value: s.Value,
Timestamp: s.Timestamp,
})
}
}(rule)
}
wg.Wait()
}
示例10: isDegraded
// isDegraded returns whether the storage is in "graceful degradation mode",
// which is the case if the number of chunks waiting for persistence has reached
// a percentage of maxChunksToPersist that exceeds
// percentChunksToPersistForDegradation. The method is not goroutine safe (but
// only ever called from the goroutine dealing with series maintenance).
// Changes of degradation mode are logged.
func (s *memorySeriesStorage) isDegraded() bool {
nowDegraded := s.getNumChunksToPersist() > s.maxChunksToPersist*percentChunksToPersistForDegradation/100
if s.degraded && !nowDegraded {
log.Warn("Storage has left graceful degradation mode. Things are back to normal.")
} else if !s.degraded && nowDegraded {
log.Warnf(
"%d chunks waiting for persistence (%d%% of the allowed maximum %d). Storage is now in graceful degradation mode. Series files are not synced anymore if following the adaptive strategy. Checkpoints are not performed more often than every %v. Series maintenance happens as frequently as possible.",
s.getNumChunksToPersist(),
s.getNumChunksToPersist()*100/s.maxChunksToPersist,
s.maxChunksToPersist,
s.checkpointInterval)
}
s.degraded = nowDegraded
return s.degraded
}
示例11: maybeAddMapping
// maybeAddMapping adds a fingerprint mapping to fpm if the FastFingerprint of m is different from fp.
func maybeAddMapping(fp clientmodel.Fingerprint, m clientmodel.Metric, fpm fpMappings) {
if rawFP := m.FastFingerprint(); rawFP != fp {
log.Warnf(
"Metric %v with fingerprint %v is mapped from raw fingerprint %v.",
m, fp, rawFP,
)
if mappedFPs, ok := fpm[rawFP]; ok {
mappedFPs[metricToUniqueString(m)] = fp
} else {
fpm[rawFP] = map[string]clientmodel.Fingerprint{
metricToUniqueString(m): fp,
}
}
}
}
示例12: sendSamples
func (t *StorageQueueManager) sendSamples(s clientmodel.Samples) {
t.sendSemaphore <- true
defer func() {
<-t.sendSemaphore
}()
// Samples are sent to the remote storage on a best-effort basis. If a
// sample isn't sent correctly the first time, it's simply dropped on the
// floor.
begin := time.Now()
err := t.tsdb.Store(s)
duration := time.Since(begin) / time.Millisecond
labelValue := success
if err != nil {
log.Warnf("error sending %d samples to remote storage: %s", len(s), err)
labelValue = failure
t.sendErrors.Inc()
}
t.samplesCount.WithLabelValues(labelValue).Add(float64(len(s)))
t.sendLatency.Observe(float64(duration))
}
示例13: refresh
func (dd *DNSDiscovery) refresh(name string, ch chan<- *config.TargetGroup) error {
response, err := lookupAll(name, dd.qtype)
dnsSDLookupsCount.Inc()
if err != nil {
dnsSDLookupFailuresCount.Inc()
return err
}
tg := &config.TargetGroup{}
for _, record := range response.Answer {
target := clientmodel.LabelValue("")
switch addr := record.(type) {
case *dns.SRV:
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")
target = clientmodel.LabelValue(fmt.Sprintf("%s:%d", addr.Target, addr.Port))
case *dns.A:
target = clientmodel.LabelValue(fmt.Sprintf("%s:%d", addr.A, dd.port))
case *dns.AAAA:
target = clientmodel.LabelValue(fmt.Sprintf("%s:%d", addr.AAAA, dd.port))
default:
log.Warnf("%q is not a valid SRV record", record)
continue
}
tg.Targets = append(tg.Targets, clientmodel.LabelSet{
clientmodel.AddressLabel: target,
DNSNameLabel: clientmodel.LabelValue(name),
})
}
tg.Source = name
ch <- tg
return nil
}
示例14: Main
func Main() int {
if err := parse(os.Args[1:]); err != nil {
return 2
}
versionInfoTmpl.Execute(os.Stdout, BuildInfo)
if cfg.printVersion {
return 0
}
memStorage := local.NewMemorySeriesStorage(&cfg.storage)
var (
sampleAppender storage.SampleAppender
remoteStorageQueues []*remote.StorageQueueManager
)
if cfg.opentsdbURL == "" && cfg.influxdbURL == "" {
log.Warnf("No remote storage URLs provided; not sending any samples to long-term storage")
sampleAppender = memStorage
} else {
fanout := storage.Fanout{memStorage}
addRemoteStorage := func(c remote.StorageClient) {
qm := remote.NewStorageQueueManager(c, 100*1024)
fanout = append(fanout, qm)
remoteStorageQueues = append(remoteStorageQueues, qm)
}
if cfg.opentsdbURL != "" {
addRemoteStorage(opentsdb.NewClient(cfg.opentsdbURL, cfg.remoteStorageTimeout))
}
if cfg.influxdbURL != "" {
addRemoteStorage(influxdb.NewClient(cfg.influxdbURL, cfg.remoteStorageTimeout, cfg.influxdbDatabase, cfg.influxdbRetentionPolicy))
}
sampleAppender = fanout
}
var (
notificationHandler = notification.NewNotificationHandler(&cfg.notification)
targetManager = retrieval.NewTargetManager(sampleAppender)
queryEngine = promql.NewEngine(memStorage, &cfg.queryEngine)
)
ruleManager := rules.NewManager(&rules.ManagerOptions{
SampleAppender: sampleAppender,
NotificationHandler: notificationHandler,
QueryEngine: queryEngine,
PrometheusURL: cfg.prometheusURL,
PathPrefix: cfg.web.PathPrefix,
})
flags := map[string]string{}
cfg.fs.VisitAll(func(f *flag.Flag) {
flags[f.Name] = f.Value.String()
})
status := &web.PrometheusStatus{
BuildInfo: BuildInfo,
TargetPools: targetManager.Pools,
Rules: ruleManager.Rules,
Flags: flags,
Birth: time.Now(),
}
webHandler := web.New(memStorage, queryEngine, ruleManager, status, &cfg.web)
if !reloadConfig(cfg.configFile, status, targetManager, ruleManager) {
os.Exit(1)
}
// Wait for reload or termination signals. Start the handler for SIGHUP as
// early as possible, but ignore it until we are ready to handle reloading
// our config.
hup := make(chan os.Signal)
hupReady := make(chan bool)
signal.Notify(hup, syscall.SIGHUP)
go func() {
<-hupReady
for range hup {
reloadConfig(cfg.configFile, status, targetManager, ruleManager)
}
}()
// Start all components.
if err := memStorage.Start(); err != nil {
log.Errorln("Error opening memory series storage:", err)
return 1
}
defer func() {
if err := memStorage.Stop(); err != nil {
log.Errorln("Error stopping storage:", err)
}
}()
// The storage has to be fully initialized before registering.
registry.MustRegister(memStorage)
registry.MustRegister(notificationHandler)
for _, q := range remoteStorageQueues {
//.........这里部分代码省略.........
示例15: updateSlaves
func (e *periodicExporter) updateSlaves() {
log.Debug("discovering slaves...")
// This will redirect us to the elected mesos master
redirectURL := fmt.Sprintf("%s/master/redirect", e.opts.masterURL)
rReq, err := http.NewRequest("GET", redirectURL, nil)
if err != nil {
panic(err)
}
tr := http.Transport{
DisableKeepAlives: true,
}
rresp, err := tr.RoundTrip(rReq)
if err != nil {
log.Warn(err)
return
}
defer rresp.Body.Close()
// This will/should return http://master.ip:5050
masterLoc := rresp.Header.Get("Location")
if masterLoc == "" {
log.Warnf("%d response missing Location header", rresp.StatusCode)
return
}
log.Debugf("current elected master at: %s", masterLoc)
// Find all active slaves
stateURL := fmt.Sprintf("%s/master/state.json", masterLoc)
resp, err := http.Get(stateURL)
if err != nil {
log.Warn(err)
return
}
defer resp.Body.Close()
type slave struct {
Active bool `json:"active"`
Hostname string `json:"hostname"`
Pid string `json:"pid"`
}
var req struct {
Slaves []*slave `json:"slaves"`
}
if err := json.NewDecoder(resp.Body).Decode(&req); err != nil {
log.Warnf("failed to deserialize request: %s", err)
return
}
var slaveURLs []string
for _, slave := range req.Slaves {
if slave.Active {
// Extract slave port from pid
_, port, err := net.SplitHostPort(slave.Pid)
if err != nil {
port = "5051"
}
url := fmt.Sprintf("http://%s:%s", slave.Hostname, port)
slaveURLs = append(slaveURLs, url)
}
}
log.Debugf("%d slaves discovered", len(slaveURLs))
e.slaves.Lock()
e.slaves.urls = slaveURLs
e.slaves.Unlock()
}