本文整理汇总了Golang中github.com/prometheus/log.Warn函数的典型用法代码示例。如果您正苦于以下问题:Golang Warn函数的具体用法?Golang Warn怎么用?Golang Warn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Warn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: ServeHTTP
func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := route.Context(r)
name := strings.Trim(route.Param(ctx, "filepath"), "/")
if name == "" {
name = "index.html"
}
file, err := GetFile(StaticFiles, name)
if err != nil {
if err != io.EOF {
log.Warn("Could not get file: ", err)
}
w.WriteHeader(http.StatusNotFound)
return
}
contentType := http.DetectContentType(file)
if strings.Contains(contentType, "text/plain") || strings.Contains(contentType, "application/octet-stream") {
parts := strings.Split(name, ".")
contentType = mimeMap[parts[len(parts)-1]]
}
w.Header().Set("Content-Type", contentType)
w.Header().Set("Cache-Control", "public, max-age=259200")
w.Write(file)
}
示例2: Append
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *clientmodel.Sample) {
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
// Switch locks.
s.fpLocker.Unlock(rawFP)
s.fpLocker.Lock(fp)
}
series := s.getOrCreateSeries(fp, sample.Metric)
completedChunksCount := series.add(&metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
s.ingestedSamplesCount.Inc()
s.incNumChunksToPersist(completedChunksCount)
}
示例3: handleNotification
func (n *notifier) handleNotification(a *Alert, op notificationOp, config *pb.NotificationConfig) {
for _, pdConfig := range config.PagerdutyConfig {
if err := n.sendPagerDutyNotification(pdConfig.GetServiceKey(), op, a); err != nil {
log.Errorln("Error sending PagerDuty notification:", err)
}
}
for _, emailConfig := range config.EmailConfig {
if op == notificationOpResolve && !emailConfig.GetSendResolved() {
continue
}
if *smtpSmartHost == "" {
log.Warn("No SMTP smarthost configured, not sending email notification.")
continue
}
if err := n.sendEmailNotification(emailConfig.GetEmail(), op, a); err != nil {
log.Errorln("Error sending email notification:", err)
}
}
for _, poConfig := range config.PushoverConfig {
if op == notificationOpResolve && !poConfig.GetSendResolved() {
continue
}
if err := n.sendPushoverNotification(poConfig.GetToken(), op, poConfig.GetUserKey(), a); err != nil {
log.Errorln("Error sending Pushover notification:", err)
}
}
for _, hcConfig := range config.HipchatConfig {
if op == notificationOpResolve && !hcConfig.GetSendResolved() {
continue
}
if err := n.sendHipChatNotification(op, hcConfig, a); err != nil {
log.Errorln("Error sending HipChat notification:", err)
}
}
for _, scConfig := range config.SlackConfig {
if op == notificationOpResolve && !scConfig.GetSendResolved() {
continue
}
if err := n.sendSlackNotification(op, scConfig, a); err != nil {
log.Errorln("Error sending Slack notification:", err)
}
}
for _, fdConfig := range config.FlowdockConfig {
if op == notificationOpResolve && !fdConfig.GetSendResolved() {
continue
}
if err := n.sendFlowdockNotification(op, fdConfig, a); err != nil {
log.Errorln("Error sending Flowdock notification:", err)
}
}
for _, whConfig := range config.WebhookConfig {
if op == notificationOpResolve && !whConfig.GetSendResolved() {
continue
}
if err := n.sendWebhookNotification(op, whConfig, a); err != nil {
log.Errorln("Error sending Webhook notification:", err)
}
}
}
示例4: Append
// Append queues a sample to be sent to the remote storage. It drops the
// sample on the floor if the queue is full. It implements
// storage.SampleAppender.
func (t *StorageQueueManager) Append(s *clientmodel.Sample) {
select {
case t.queue <- s:
default:
t.samplesCount.WithLabelValues(dropped).Inc()
log.Warn("Remote storage queue full, discarding sample.")
}
}
示例5: Append
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *model.Sample) {
for ln, lv := range sample.Metric {
if len(lv) == 0 {
delete(sample.Metric, ln)
}
}
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
// Switch locks.
s.fpLocker.Unlock(rawFP)
s.fpLocker.Lock(fp)
}
series := s.getOrCreateSeries(fp, sample.Metric)
if sample.Timestamp <= series.lastTime {
// Don't log and track equal timestamps, as they are a common occurrence
// when using client-side timestamps (e.g. Pushgateway or federation).
// It would be even better to also compare the sample values here, but
// we don't have efficient access to a series's last value.
if sample.Timestamp != series.lastTime {
log.Warnf("Ignoring sample with out-of-order timestamp for fingerprint %v (%v): %v is not after %v", fp, series.metric, sample.Timestamp, series.lastTime)
s.outOfOrderSamplesCount.Inc()
}
s.fpLocker.Unlock(fp)
return
}
completedChunksCount := series.add(&model.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
s.ingestedSamplesCount.Inc()
s.incNumChunksToPersist(completedChunksCount)
}
示例6: isDegraded
// isDegraded returns whether the storage is in "graceful degradation mode",
// which is the case if the number of chunks waiting for persistence has reached
// a percentage of maxChunksToPersist that exceeds
// percentChunksToPersistForDegradation. The method is not goroutine safe (but
// only ever called from the goroutine dealing with series maintenance).
// Changes of degradation mode are logged.
func (s *memorySeriesStorage) isDegraded() bool {
nowDegraded := s.getNumChunksToPersist() > s.maxChunksToPersist*percentChunksToPersistForDegradation/100
if s.degraded && !nowDegraded {
log.Warn("Storage has left graceful degradation mode. Things are back to normal.")
} else if !s.degraded && nowDegraded {
log.Warnf(
"%d chunks waiting for persistence (%d%% of the allowed maximum %d). Storage is now in graceful degradation mode. Series files are not synced anymore if following the adaptive strategy. Checkpoints are not performed more often than every %v. Series maintenance happens as frequently as possible.",
s.getNumChunksToPersist(),
s.getNumChunksToPersist()*100/s.maxChunksToPersist,
s.maxChunksToPersist,
s.checkpointInterval)
}
s.degraded = nowDegraded
return s.degraded
}
示例7: Append
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *clientmodel.Sample) {
for ln, lv := range sample.Metric {
if len(lv) == 0 {
delete(sample.Metric, ln)
}
}
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
// Switch locks.
s.fpLocker.Unlock(rawFP)
s.fpLocker.Lock(fp)
}
series := s.getOrCreateSeries(fp, sample.Metric)
if sample.Timestamp <= series.lastTime {
s.fpLocker.Unlock(fp)
log.Warnf("Ignoring sample with out-of-order timestamp for fingerprint %v (%v): %v is not after %v", fp, series.metric, sample.Timestamp, series.lastTime)
s.outOfOrderSamplesCount.Inc()
return
}
completedChunksCount := series.add(&metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
s.ingestedSamplesCount.Inc()
s.incNumChunksToPersist(completedChunksCount)
}
示例8: Run
// Run dispatches notifications continuously.
func (n *NotificationHandler) Run() {
for reqs := range n.pendingNotifications {
if n.alertmanagerURL == "" {
log.Warn("No alert manager configured, not dispatching notification")
n.notificationDropped.Inc()
continue
}
begin := time.Now()
err := n.sendNotifications(reqs)
if err != nil {
log.Error("Error sending notification: ", err)
n.notificationErrors.Inc()
}
n.notificationLatency.Observe(float64(time.Since(begin) / time.Millisecond))
}
close(n.stopped)
}
示例9: fetch
func (e *periodicExporter) fetch(urlChan <-chan string, metricsChan chan<- prometheus.Metric, wg *sync.WaitGroup) {
defer wg.Done()
for u := range urlChan {
u, err := url.Parse(u)
if err != nil {
log.Warn("could not parse slave URL: ", err)
continue
}
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
log.Warn("could not parse network address: ", err)
continue
}
monitorURL := fmt.Sprintf("%s/monitor/statistics.json", u)
resp, err := httpClient.Get(monitorURL)
if err != nil {
log.Warn(err)
e.errors.WithLabelValues(host).Inc()
continue
}
defer resp.Body.Close()
var stats []mesos_stats.Monitor
if err = json.NewDecoder(resp.Body).Decode(&stats); err != nil {
log.Warn("failed to deserialize response: ", err)
e.errors.WithLabelValues(host).Inc()
continue
}
for _, stat := range stats {
metricsChan <- prometheus.MustNewConstMetric(
cpuLimitDesc,
prometheus.GaugeValue,
float64(stat.Statistics.CpusLimit),
stat.Source, host, stat.FrameworkId,
)
metricsChan <- prometheus.MustNewConstMetric(
cpuSysDesc,
prometheus.CounterValue,
float64(stat.Statistics.CpusSystemTimeSecs),
stat.Source, host, stat.FrameworkId,
)
metricsChan <- prometheus.MustNewConstMetric(
cpuUsrDesc,
prometheus.CounterValue,
float64(stat.Statistics.CpusUserTimeSecs),
stat.Source, host, stat.FrameworkId,
)
metricsChan <- prometheus.MustNewConstMetric(
memLimitDesc,
prometheus.GaugeValue,
float64(stat.Statistics.MemLimitBytes),
stat.Source, host, stat.FrameworkId,
)
metricsChan <- prometheus.MustNewConstMetric(
memRssDesc,
prometheus.GaugeValue,
float64(stat.Statistics.MemRssBytes),
stat.Source, host, stat.FrameworkId,
)
}
}
}
示例10: updateSlaves
func (e *periodicExporter) updateSlaves() {
log.Debug("discovering slaves...")
// This will redirect us to the elected mesos master
redirectURL := fmt.Sprintf("%s://%s/master/redirect", e.queryURL.Scheme, e.queryURL.Host)
rReq, err := http.NewRequest("GET", redirectURL, nil)
if err != nil {
panic(err)
}
tr := http.Transport{
DisableKeepAlives: true,
}
rresp, err := tr.RoundTrip(rReq)
if err != nil {
log.Warn(err)
return
}
defer rresp.Body.Close()
// This will/should return http://master.ip:5050
masterLoc := rresp.Header.Get("Location")
if masterLoc == "" {
log.Warnf("%d response missing Location header", rresp.StatusCode)
return
}
log.Debugf("current elected master at: %s", masterLoc)
// Starting from 0.23.0, a Mesos Master does not set the scheme in the "Location" header.
// Use the scheme from the master URL in this case.
var stateURL string
if strings.HasPrefix(masterLoc, "http") {
stateURL = fmt.Sprintf("%s/master/state.json", masterLoc)
} else {
stateURL = fmt.Sprintf("%s:%s/master/state.json", e.queryURL.Scheme, masterLoc)
}
var state masterState
// Find all active slaves
err = getJSON(&state, stateURL)
if err != nil {
log.Warn(err)
return
}
var slaveURLs []string
for _, slave := range state.Slaves {
if slave.Active {
// Extract slave port from pid
_, port, err := net.SplitHostPort(slave.PID)
if err != nil {
port = "5051"
}
url := fmt.Sprintf("http://%s:%s", slave.Hostname, port)
slaveURLs = append(slaveURLs, url)
}
}
log.Debugf("%d slaves discovered", len(slaveURLs))
e.slaves.Lock()
e.slaves.urls = slaveURLs
e.slaves.Unlock()
}
示例11: updateSlaves
func (e *periodicExporter) updateSlaves() {
log.Debug("discovering slaves...")
// This will redirect us to the elected mesos master
redirectURL := fmt.Sprintf("%s/master/redirect", e.opts.masterURL)
rReq, err := http.NewRequest("GET", redirectURL, nil)
if err != nil {
panic(err)
}
tr := http.Transport{
DisableKeepAlives: true,
}
rresp, err := tr.RoundTrip(rReq)
if err != nil {
log.Warn(err)
return
}
defer rresp.Body.Close()
// This will/should return http://master.ip:5050
masterLoc := rresp.Header.Get("Location")
if masterLoc == "" {
log.Warnf("%d response missing Location header", rresp.StatusCode)
return
}
log.Debugf("current elected master at: %s", masterLoc)
// Find all active slaves
stateURL := fmt.Sprintf("%s/master/state.json", masterLoc)
resp, err := http.Get(stateURL)
if err != nil {
log.Warn(err)
return
}
defer resp.Body.Close()
type slave struct {
Active bool `json:"active"`
Hostname string `json:"hostname"`
Pid string `json:"pid"`
}
var req struct {
Slaves []*slave `json:"slaves"`
}
if err := json.NewDecoder(resp.Body).Decode(&req); err != nil {
log.Warnf("failed to deserialize request: %s", err)
return
}
var slaveURLs []string
for _, slave := range req.Slaves {
if slave.Active {
// Extract slave port from pid
_, port, err := net.SplitHostPort(slave.Pid)
if err != nil {
port = "5051"
}
url := fmt.Sprintf("http://%s:%s", slave.Hostname, port)
slaveURLs = append(slaveURLs, url)
}
}
log.Debugf("%d slaves discovered", len(slaveURLs))
e.slaves.Lock()
e.slaves.urls = slaveURLs
e.slaves.Unlock()
}
示例12: fetch
func (e *periodicExporter) fetch(urlChan <-chan string, metricsChan chan<- prometheus.Metric, wg *sync.WaitGroup) {
defer wg.Done()
for u := range urlChan {
u, err := url.Parse(u)
if err != nil {
log.Warn("could not parse slave URL: ", err)
continue
}
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
log.Warn("could not parse network address: ", err)
continue
}
taskInfo := map[string]exporterTaskInfo{}
var state slaveState
stateURL := fmt.Sprintf("%s/state.json", u)
err = getJSON(&state, stateURL)
if err != nil {
log.Warn(err)
e.errors.WithLabelValues(host).Inc()
continue
}
for _, fw := range state.Frameworks {
for _, ex := range fw.Executors {
for _, t := range ex.Tasks {
taskInfo[t.ID] = exporterTaskInfo{fw.Name, t.Name}
}
}
}
monitorURL := fmt.Sprintf("%s/monitor/statistics.json", u)
var stats []mesos_stats.Monitor
err = getJSON(&stats, monitorURL)
if err != nil {
log.Warn(err)
e.errors.WithLabelValues(host).Inc()
continue
}
for _, stat := range stats {
tinfo, ok := taskInfo[stat.Source]
if !ok {
continue
}
metricsChan <- prometheus.MustNewConstMetric(
cpuLimitDesc,
prometheus.GaugeValue,
float64(stat.Statistics.CpusLimit),
stat.Source, host, stat.FrameworkId, tinfo.FrameworkName, tinfo.TaskName,
)
metricsChan <- prometheus.MustNewConstMetric(
cpuSysDesc,
prometheus.CounterValue,
float64(stat.Statistics.CpusSystemTimeSecs),
stat.Source, host, stat.FrameworkId, tinfo.FrameworkName, tinfo.TaskName,
)
metricsChan <- prometheus.MustNewConstMetric(
cpuUsrDesc,
prometheus.CounterValue,
float64(stat.Statistics.CpusUserTimeSecs),
stat.Source, host, stat.FrameworkId, tinfo.FrameworkName, tinfo.TaskName,
)
metricsChan <- prometheus.MustNewConstMetric(
memLimitDesc,
prometheus.GaugeValue,
float64(stat.Statistics.MemLimitBytes),
stat.Source, host, stat.FrameworkId, tinfo.FrameworkName, tinfo.TaskName,
)
metricsChan <- prometheus.MustNewConstMetric(
memRssDesc,
prometheus.GaugeValue,
float64(stat.Statistics.MemRssBytes),
stat.Source, host, stat.FrameworkId, tinfo.FrameworkName, tinfo.TaskName,
)
}
metricsSnapshotURL := fmt.Sprintf("%s/metrics/snapshot", u)
var ms metricsSnapshot
err = getJSON(&ms, metricsSnapshotURL)
if err != nil {
log.Warn(err)
e.errors.WithLabelValues(host).Inc()
continue
}
for _, mm := range slaveMetrics {
metricValue, ok := ms[mm.snapshotKey]
if !ok {
continue
}
if mm.convertFn != nil {
//.........这里部分代码省略.........
示例13: scrapeMaster
func (e *periodicExporter) scrapeMaster() {
stateURL := fmt.Sprintf("%s://%s/master/state.json", e.queryURL.Scheme, e.queryURL.Host)
log.Debugf("Scraping master at %s", stateURL)
var state masterState
err := getJSON(&state, stateURL)
if err != nil {
log.Warn(err)
return
}
metrics := []prometheus.Metric{}
for _, fw := range state.Frameworks {
metrics = append(metrics, prometheus.MustNewConstMetric(
frameworkResourcesUsedCPUs,
prometheus.GaugeValue,
fw.UsedResources.CPUs,
fw.ID, fw.Name,
))
metrics = append(metrics, prometheus.MustNewConstMetric(
frameworkResourcesUsedDisk,
prometheus.GaugeValue,
megabytesToBytes(fw.UsedResources.Disk),
fw.ID, fw.Name,
))
metrics = append(metrics, prometheus.MustNewConstMetric(
frameworkResourcesUsedMemory,
prometheus.GaugeValue,
megabytesToBytes(fw.UsedResources.Mem),
fw.ID, fw.Name,
))
}
snapshotURL := fmt.Sprintf("%s://%s/metrics/snapshot", e.queryURL.Scheme, e.queryURL.Host)
var ms metricsSnapshot
err = getJSON(&ms, snapshotURL)
if err != nil {
log.Warn(err)
return
}
for _, mm := range masterMetrics {
metricValue, ok := ms[mm.snapshotKey]
if !ok {
continue
}
if mm.convertFn != nil {
metricValue = mm.convertFn(metricValue)
}
metrics = append(metrics, prometheus.MustNewConstMetric(
mm.desc, mm.valueType, metricValue, state.Hostname,
))
}
e.Lock()
e.metrics = metrics
e.Unlock()
}
示例14: loadSeriesMapAndHeads
// loadSeriesMapAndHeads loads the fingerprint to memory-series mapping and all
// the chunks contained in the checkpoint (and thus not yet persisted to series
// files). The method is capable of loading the checkpoint format v1 and v2. If
// recoverable corruption is detected, or if the dirty flag was set from the
// beginning, crash recovery is run, which might take a while. If an
// unrecoverable error is encountered, it is returned. Call this method during
// start-up while nothing else is running in storage land. This method is
// utterly goroutine-unsafe.
func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist int64, err error) {
var chunkDescsTotal int64
fingerprintToSeries := make(map[clientmodel.Fingerprint]*memorySeries)
sm = &seriesMap{m: fingerprintToSeries}
defer func() {
if sm != nil && p.dirty {
log.Warn("Persistence layer appears dirty.")
err = p.recoverFromCrash(fingerprintToSeries)
if err != nil {
sm = nil
}
}
if err == nil {
numMemChunkDescs.Add(float64(chunkDescsTotal))
}
}()
f, err := os.Open(p.headsFileName())
if os.IsNotExist(err) {
return sm, 0, nil
}
if err != nil {
log.Warn("Could not open heads file:", err)
p.dirty = true
return
}
defer f.Close()
r := bufio.NewReaderSize(f, fileBufSize)
buf := make([]byte, len(headsMagicString))
if _, err := io.ReadFull(r, buf); err != nil {
log.Warn("Could not read from heads file:", err)
p.dirty = true
return sm, 0, nil
}
magic := string(buf)
if magic != headsMagicString {
log.Warnf(
"unexpected magic string, want %q, got %q",
headsMagicString, magic,
)
p.dirty = true
return
}
version, err := binary.ReadVarint(r)
if (version != headsFormatVersion && version != headsFormatLegacyVersion) || err != nil {
log.Warnf("unknown heads format version, want %d", headsFormatVersion)
p.dirty = true
return sm, 0, nil
}
numSeries, err := codable.DecodeUint64(r)
if err != nil {
log.Warn("Could not decode number of series:", err)
p.dirty = true
return sm, 0, nil
}
for ; numSeries > 0; numSeries-- {
seriesFlags, err := r.ReadByte()
if err != nil {
log.Warn("Could not read series flags:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
headChunkPersisted := seriesFlags&flagHeadChunkPersisted != 0
fp, err := codable.DecodeUint64(r)
if err != nil {
log.Warn("Could not decode fingerprint:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
var metric codable.Metric
if err := metric.UnmarshalFromReader(r); err != nil {
log.Warn("Could not decode metric:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
var persistWatermark int64
var modTime time.Time
if version != headsFormatLegacyVersion {
// persistWatermark only present in v2.
persistWatermark, err = binary.ReadVarint(r)
if err != nil {
log.Warn("Could not decode persist watermark:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
modTimeNano, err := binary.ReadVarint(r)
if err != nil {
log.Warn("Could not decode modification time:", err)
p.dirty = true
//.........这里部分代码省略.........
示例15: main
func main() {
flag.Parse()
if !strings.HasPrefix(*pathPrefix, "/") {
*pathPrefix = "/" + *pathPrefix
}
if !strings.HasSuffix(*pathPrefix, "/") {
*pathPrefix = *pathPrefix + "/"
}
versionInfoTmpl.Execute(os.Stdout, BuildInfo)
conf := config.MustLoadFromFile(*configFile)
silencer := manager.NewSilencer()
defer silencer.Close()
err := silencer.LoadFromFile(*silencesFile)
if err != nil {
log.Warn("Couldn't load silences, starting up with empty silence list: ", err)
}
saveSilencesTicker := time.NewTicker(10 * time.Second)
go func() {
for range saveSilencesTicker.C {
if err := silencer.SaveToFile(*silencesFile); err != nil {
log.Error("Error saving silences to file: ", err)
}
}
}()
defer saveSilencesTicker.Stop()
amURL, err := alertmanagerURL(*hostname, *pathPrefix, *listenAddress, *externalURL)
if err != nil {
log.Fatalln("Error building Alertmanager URL:", err)
}
notifier := manager.NewNotifier(conf.NotificationConfig, amURL)
defer notifier.Close()
inhibitor := new(manager.Inhibitor)
inhibitor.SetInhibitRules(conf.InhibitRules())
options := &manager.MemoryAlertManagerOptions{
Inhibitor: inhibitor,
Silencer: silencer,
Notifier: notifier,
MinRefreshInterval: *minRefreshPeriod,
}
alertManager := manager.NewMemoryAlertManager(options)
alertManager.SetAggregationRules(conf.AggregationRules())
go alertManager.Run()
// Web initialization.
flags := map[string]string{}
flag.VisitAll(func(f *flag.Flag) {
flags[f.Name] = f.Value.String()
})
statusHandler := &web.StatusHandler{
Config: conf.String(),
Flags: flags,
BuildInfo: BuildInfo,
Birth: time.Now(),
PathPrefix: *pathPrefix,
}
webService := &web.WebService{
// REST API Service.
AlertManagerService: &api.AlertManagerService{
Manager: alertManager,
Silencer: silencer,
PathPrefix: *pathPrefix,
},
// Template-based page handlers.
AlertsHandler: &web.AlertsHandler{
Manager: alertManager,
IsSilencedInterrogator: silencer,
PathPrefix: *pathPrefix,
},
SilencesHandler: &web.SilencesHandler{
Silencer: silencer,
PathPrefix: *pathPrefix,
},
StatusHandler: statusHandler,
}
go webService.ServeForever(*listenAddress, *pathPrefix)
// React to configuration changes.
watcher := config.NewFileWatcher(*configFile)
go watcher.Watch(func(conf *config.Config) {
inhibitor.SetInhibitRules(conf.InhibitRules())
notifier.SetNotificationConfigs(conf.NotificationConfig)
alertManager.SetAggregationRules(conf.AggregationRules())
statusHandler.UpdateConfig(conf.String())
})
log.Info("Running notification dispatcher...")
notifier.Dispatch()
}