本文整理汇总了Golang中github.com/prometheus/common/log.Warnf函数的典型用法代码示例。如果您正苦于以下问题:Golang Warnf函数的具体用法?Golang Warnf怎么用?Golang Warnf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Warnf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Send
// Send queues the given notification requests for processing.
// Panics if called on a handler that is not running.
func (n *Notifier) Send(alerts ...*model.Alert) {
n.mtx.Lock()
defer n.mtx.Unlock()
// Queue capacity should be significantly larger than a single alert
// batch could be.
if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
alerts = alerts[d:]
log.Warnf("Alert batch larger than queue capacity, dropping %d alerts", d)
n.dropped.Add(float64(d))
}
// If the queue is full, remove the oldest alerts in favor
// of newer ones.
if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 {
n.queue = n.queue[d:]
log.Warnf("Alert notification queue full, dropping %d alerts", d)
n.dropped.Add(float64(d))
}
n.queue = append(n.queue, alerts...)
// Notify sending goroutine that there are alerts to be processed.
n.setMore()
}
示例2: Append
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *model.Sample) {
for ln, lv := range sample.Metric {
if len(lv) == 0 {
delete(sample.Metric, ln)
}
}
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
// Switch locks.
s.fpLocker.Unlock(rawFP)
s.fpLocker.Lock(fp)
}
series := s.getOrCreateSeries(fp, sample.Metric)
if sample.Timestamp <= series.lastTime {
// Don't log and track equal timestamps, as they are a common occurrence
// when using client-side timestamps (e.g. Pushgateway or federation).
// It would be even better to also compare the sample values here, but
// we don't have efficient access to a series's last value.
if sample.Timestamp != series.lastTime {
log.Warnf("Ignoring sample with out-of-order timestamp for fingerprint %v (%v): %v is not after %v", fp, series.metric, sample.Timestamp, series.lastTime)
s.outOfOrderSamplesCount.Inc()
}
s.fpLocker.Unlock(fp)
return
}
completedChunksCount := series.add(&model.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
s.ingestedSamplesCount.Inc()
s.incNumChunksToPersist(completedChunksCount)
}
示例3: sendSamples
func (t *StorageQueueManager) sendSamples(s model.Samples) {
t.sendSemaphore <- true
go func() {
defer func() {
<-t.sendSemaphore
}()
// Samples are sent to the remote storage on a best-effort basis. If a
// sample isn't sent correctly the first time, it's simply dropped on the
// floor.
begin := time.Now()
err := t.tsdb.Store(s)
duration := time.Since(begin).Seconds()
labelValue := success
if err != nil {
log.Warnf("error sending %d samples to remote storage: %s", len(s), err)
labelValue = failure
t.failedBatches.Inc()
t.failedSamples.Add(float64(len(s)))
}
t.samplesCount.WithLabelValues(labelValue).Add(float64(len(s)))
t.sendLatency.Observe(duration)
}()
}
示例4: lookupAll
func lookupAll(name string, qtype uint16) (*dns.Msg, error) {
conf, err := dns.ClientConfigFromFile(resolvConf)
if err != nil {
return nil, fmt.Errorf("could not load resolv.conf: %s", err)
}
client := &dns.Client{}
response := &dns.Msg{}
for _, server := range conf.Servers {
servAddr := net.JoinHostPort(server, conf.Port)
for _, suffix := range conf.Search {
response, err = lookup(name, qtype, client, servAddr, suffix, false)
if err != nil {
log.Warnf("resolving %s.%s failed: %s", name, suffix, err)
continue
}
if len(response.Answer) > 0 {
return response, nil
}
}
response, err = lookup(name, qtype, client, servAddr, "", false)
if err == nil {
return response, nil
}
}
return response, fmt.Errorf("could not resolve %s: No server responded", name)
}
示例5: Run
// Run dispatches notifications continuously.
func (n *Notifier) Run() {
numAMs := len(n.opts.AlertmanagerURLs)
// Just warn once in the beginning to prevent noisy logs.
if numAMs == 0 {
log.Warnf("No AlertManagers configured, not dispatching any alerts")
return
}
for {
select {
case <-n.ctx.Done():
return
case <-n.more:
}
alerts := n.nextBatch()
if numAMs > 0 {
if len(alerts) > 0 {
numErrors := n.sendAll(alerts...)
// Increment the dropped counter if we could not send
// successfully to a single AlertManager.
if numErrors == numAMs {
n.dropped.Add(float64(len(alerts)))
}
}
} else {
n.dropped.Add(float64(len(alerts)))
}
// If the queue still has items left, kick off the next iteration.
if n.queueLen() > 0 {
n.setMore()
}
}
}
示例6: Store
// Store sends a batch of samples to Graphite.
func (c *Client) Store(samples model.Samples) error {
conn, err := net.DialTimeout(c.transport, c.address, c.timeout)
if err != nil {
return err
}
defer conn.Close()
var buf bytes.Buffer
for _, s := range samples {
k := pathFromMetric(s.Metric, c.prefix)
t := float64(s.Timestamp.UnixNano()) / 1e9
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
log.Warnf("cannot send value %f to Graphite,"+
"skipping sample %#v", v, s)
continue
}
fmt.Fprintf(&buf, "%s %f %f\n", k, v, t)
}
_, err = conn.Write(buf.Bytes())
if err != nil {
return err
}
return nil
}
示例7: Store
// Store sends a batch of samples to OpenTSDB via its HTTP API.
func (c *Client) Store(samples model.Samples) error {
reqs := make([]StoreSamplesRequest, 0, len(samples))
for _, s := range samples {
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
log.Warnf("cannot send value %f to OpenTSDB, skipping sample %#v", v, s)
continue
}
metric := TagValue(s.Metric[model.MetricNameLabel])
reqs = append(reqs, StoreSamplesRequest{
Metric: metric,
Timestamp: s.Timestamp.Unix(),
Value: v,
Tags: tagsFromMetric(s.Metric),
})
}
u, err := url.Parse(c.url)
if err != nil {
return err
}
u.Path = putEndpoint
buf, err := json.Marshal(reqs)
if err != nil {
return err
}
resp, err := c.httpClient.Post(
u.String(),
contentTypeJSON,
bytes.NewBuffer(buf),
)
if err != nil {
return err
}
defer resp.Body.Close()
// API returns status code 204 for successful writes.
// http://opentsdb.net/docs/build/html/api_http/put.html
if resp.StatusCode == http.StatusNoContent {
return nil
}
// API returns status code 400 on error, encoding error details in the
// response content in JSON.
buf, err = ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
var r map[string]int
if err := json.Unmarshal(buf, &r); err != nil {
return err
}
return fmt.Errorf("failed to write %d samples to OpenTSDB, %d succeeded", r["failed"], r["success"])
}
示例8: GetRuleAlertNotifications
func (m *Manager) GetRuleAlertNotifications(rule *AlertingRule, timestamp model.Time) notification.NotificationReqs {
activeAlerts := rule.ActiveAlerts()
if len(activeAlerts) == 0 {
return notification.NotificationReqs{}
}
notifications := make(notification.NotificationReqs, 0, len(activeAlerts))
for _, aa := range activeAlerts {
if aa.State != StateFiring {
// BUG: In the future, make AlertManager support pending alerts?
continue
}
// Provide the alert information to the template.
l := map[string]string{}
for k, v := range aa.Labels {
l[string(k)] = string(v)
}
tmplData := struct {
Labels map[string]string
Value float64
}{
Labels: l,
Value: float64(aa.Value),
}
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := "{{$labels := .Labels}}{{$value := .Value}}"
expand := func(text string) string {
tmpl := template.NewTemplateExpander(defs+text, "__alert_"+rule.Name(), tmplData, timestamp, m.queryEngine, m.externalURL.Path)
result, err := tmpl.Expand()
if err != nil {
result = err.Error()
log.Warnf("Error expanding alert template %v with data '%v': %v", rule.Name(), tmplData, err)
}
return result
}
notifications = append(notifications, ¬ification.NotificationReq{
Summary: expand(rule.summary),
Description: expand(rule.description),
Runbook: rule.runbook,
Labels: aa.Labels.Merge(model.LabelSet{
alertNameLabel: model.LabelValue(rule.Name()),
}),
Value: aa.Value,
ActiveSince: aa.ActiveSince.Time(),
RuleString: rule.String(),
GeneratorURL: m.externalURL.String() + strutil.GraphLinkForExpression(rule.vector.String()),
})
}
return notifications
}
示例9: Send
// Send queues the given notification requests for processing.
// Panics if called on a handler that is not running.
func (n *Notifier) Send(alerts ...*model.Alert) {
n.mtx.Lock()
defer n.mtx.Unlock()
// Attach external labels before relabelling and sending.
for _, a := range alerts {
for ln, lv := range n.opts.ExternalLabels {
if _, ok := a.Labels[ln]; !ok {
a.Labels[ln] = lv
}
}
}
alerts = n.relabelAlerts(alerts)
// Queue capacity should be significantly larger than a single alert
// batch could be.
if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
alerts = alerts[d:]
log.Warnf("Alert batch larger than queue capacity, dropping %d alerts", d)
n.dropped.Add(float64(d))
}
// If the queue is full, remove the oldest alerts in favor
// of newer ones.
if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 {
n.queue = n.queue[d:]
log.Warnf("Alert notification queue full, dropping %d alerts", d)
n.dropped.Add(float64(d))
}
n.queue = append(n.queue, alerts...)
// Notify sending goroutine that there are alerts to be processed.
n.setMore()
}
示例10: eval
// eval runs a single evaluation cycle in which all rules are evaluated in parallel.
// In the future a single group will be evaluated sequentially to properly handle
// rule dependency.
func (g *Group) eval() {
var (
now = model.Now()
wg sync.WaitGroup
)
for _, rule := range g.rules {
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
start := time.Now()
evalTotal.Inc()
vector, err := rule.eval(now, g.opts.QueryEngine)
if err != nil {
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
if _, ok := err.(promql.ErrQueryCanceled); !ok {
log.Warnf("Error while evaluating rule %q: %s", rule, err)
}
evalFailures.Inc()
}
var rtyp ruleType
switch r := rule.(type) {
case *AlertingRule:
rtyp = ruleTypeRecording
g.sendAlerts(r, now)
case *RecordingRule:
rtyp = ruleTypeAlert
default:
panic(fmt.Errorf("unknown rule type: %T", rule))
}
evalDuration.WithLabelValues(string(rtyp)).Observe(
float64(time.Since(start)) / float64(time.Second),
)
for _, s := range vector {
g.opts.SampleAppender.Append(s)
}
}(rule)
}
wg.Wait()
}
示例11: isDegraded
// isDegraded returns whether the storage is in "graceful degradation mode",
// which is the case if the number of chunks waiting for persistence has reached
// a percentage of maxChunksToPersist that exceeds
// percentChunksToPersistForDegradation. The method is not goroutine safe (but
// only ever called from the goroutine dealing with series maintenance).
// Changes of degradation mode are logged.
func (s *memorySeriesStorage) isDegraded() bool {
nowDegraded := s.getNumChunksToPersist() > s.maxChunksToPersist*percentChunksToPersistForDegradation/100
if s.degraded && !nowDegraded {
log.Warn("Storage has left graceful degradation mode. Things are back to normal.")
} else if !s.degraded && nowDegraded {
log.Warnf(
"%d chunks waiting for persistence (%d%% of the allowed maximum %d). Storage is now in graceful degradation mode. Series files are not synced anymore if following the adaptive strategy. Checkpoints are not performed more often than every %v. Series maintenance happens as frequently as possible.",
s.getNumChunksToPersist(),
s.getNumChunksToPersist()*100/s.maxChunksToPersist,
s.maxChunksToPersist,
s.checkpointInterval)
}
s.degraded = nowDegraded
return s.degraded
}
示例12: maybeAddMapping
// maybeAddMapping adds a fingerprint mapping to fpm if the FastFingerprint of m is different from fp.
func maybeAddMapping(fp model.Fingerprint, m model.Metric, fpm fpMappings) {
if rawFP := m.FastFingerprint(); rawFP != fp {
log.Warnf(
"Metric %v with fingerprint %v is mapped from raw fingerprint %v.",
m, fp, rawFP,
)
if mappedFPs, ok := fpm[rawFP]; ok {
mappedFPs[metricToUniqueString(m)] = fp
} else {
fpm[rawFP] = map[string]model.Fingerprint{
metricToUniqueString(m): fp,
}
}
}
}
示例13: append
func (sl *scrapeLoop) append(samples model.Samples) {
numOutOfOrder := 0
for _, s := range samples {
if err := sl.appender.Append(s); err != nil {
if err == local.ErrOutOfOrderSample {
numOutOfOrder++
} else {
log.Warnf("Error inserting sample: %s", err)
}
}
}
if numOutOfOrder > 0 {
log.With("numDropped", numOutOfOrder).Warn("Error on ingesting out-of-order samples")
}
}
示例14: sendSamples
func (t *StorageQueueManager) sendSamples(s model.Samples) {
// Samples are sent to the remote storage on a best-effort basis. If a
// sample isn't sent correctly the first time, it's simply dropped on the
// floor.
begin := time.Now()
err := t.tsdb.Store(s)
duration := time.Since(begin).Seconds()
labelValue := success
if err != nil {
log.Warnf("error sending %d samples to remote storage: %s", len(s), err)
labelValue = failure
}
t.sentSamplesTotal.WithLabelValues(labelValue).Add(float64(len(s)))
t.sentBatchDuration.WithLabelValues(labelValue).Observe(duration)
}
示例15: runIteration
func (m *Manager) runIteration() {
now := model.Now()
wg := sync.WaitGroup{}
m.Lock()
rulesSnapshot := make([]Rule, len(m.rules))
copy(rulesSnapshot, m.rules)
m.Unlock()
for _, rule := range rulesSnapshot {
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
start := time.Now()
vector, err := rule.eval(now, m.queryEngine)
duration := time.Since(start)
if err != nil {
evalFailures.Inc()
log.Warnf("Error while evaluating rule %q: %s", rule, err)
return
}
switch r := rule.(type) {
case *AlertingRule:
m.queueAlertNotifications(r, now)
evalDuration.WithLabelValues(ruleTypeAlerting).Observe(
float64(duration / time.Millisecond),
)
case *RecordingRule:
evalDuration.WithLabelValues(ruleTypeRecording).Observe(
float64(duration / time.Millisecond),
)
default:
panic(fmt.Errorf("unknown rule type: %T", rule))
}
for _, s := range vector {
m.sampleAppender.Append(s)
}
}(rule)
}
wg.Wait()
}