本文整理汇总了Golang中github.com/prometheus/common/log.Errorf函数的典型用法代码示例。如果您正苦于以下问题:Golang Errorf函数的具体用法?Golang Errorf怎么用?Golang Errorf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Errorf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: reloadConfig
func reloadConfig(filename string, rls ...Reloadable) (success bool) {
log.Infof("Loading configuration file %s", filename)
defer func() {
if success {
configSuccess.Set(1)
configSuccessTime.Set(float64(time.Now().Unix()))
} else {
configSuccess.Set(0)
}
}()
conf, err := config.LoadFile(filename)
if err != nil {
log.Errorf("Couldn't load configuration (-config.file=%s): %v", filename, err)
// TODO(julius): Remove this notice when releasing 0.17.0 or 0.18.0.
if err.Error() == "unknown fields in global config: labels" {
log.Errorf("NOTE: The 'labels' setting in the global configuration section has been renamed to 'external_labels' and now has changed semantics (see release notes at https://github.com/prometheus/prometheus/blob/master/CHANGELOG.md). Please update your configuration file accordingly.")
}
return false
}
success = true
for _, rl := range rls {
success = success && rl.ApplyConfig(conf)
}
return success
}
示例2: matchRegularExpressions
func matchRegularExpressions(reader io.Reader, config HTTPProbe) bool {
body, err := ioutil.ReadAll(reader)
if err != nil {
log.Errorf("Error reading HTTP body: %s", err)
return false
}
for _, expression := range config.FailIfMatchesRegexp {
re, err := regexp.Compile(expression)
if err != nil {
log.Errorf("Could not compile expression %q as regular expression: %s", expression, err)
return false
}
if re.Match(body) {
return false
}
}
for _, expression := range config.FailIfNotMatchesRegexp {
re, err := regexp.Compile(expression)
if err != nil {
log.Errorf("Could not compile expression %q as regular expression: %s", expression, err)
return false
}
if !re.Match(body) {
return false
}
}
return true
}
示例3: ApplyConfig
// ApplyConfig updates the rule manager's state as the config requires. If
// loading the new rules failed the old rule set is restored. Returns true on success.
func (m *Manager) ApplyConfig(conf *config.Config) bool {
m.Lock()
defer m.Unlock()
defer m.transferAlertState()()
success := true
m.interval = time.Duration(conf.GlobalConfig.EvaluationInterval)
rulesSnapshot := make([]Rule, len(m.rules))
copy(rulesSnapshot, m.rules)
m.rules = m.rules[:0]
var files []string
for _, pat := range conf.RuleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
log.Errorf("Error retrieving rule files for %s: %s", pat, err)
success = false
}
files = append(files, fs...)
}
if err := m.loadRuleFiles(files...); err != nil {
// If loading the new rules failed, restore the old rule set.
m.rules = rulesSnapshot
log.Errorf("Error loading rules, previous rule set restored: %s", err)
success = false
}
return success
}
示例4: scrape
func (e *Exporter) scrape(csvRows chan<- []string) {
defer close(csvRows)
e.totalScrapes.Inc()
resp, err := e.client.Get(e.URI)
if err != nil {
e.up.Set(0)
log.Errorf("Can't scrape HAProxy: %v", err)
return
}
defer resp.Body.Close()
e.up.Set(1)
reader := csv.NewReader(resp.Body)
reader.TrailingComma = true
reader.Comment = '#'
for {
row, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
log.Errorf("Can't read CSV: %v", err)
e.csvParseFailures.Inc()
break
}
if len(row) == 0 {
continue
}
csvRows <- row
}
}
示例5: Mutes
// Mutes returns true iff the given label set is muted.
func (ih *Inhibitor) Mutes(lset model.LabelSet) bool {
alerts := ih.alerts.GetPending()
defer alerts.Close()
// TODO(fabxc): improve erroring for iterators so it does not
// go silenced here.
for alert := range alerts.Next() {
if err := alerts.Err(); err != nil {
log.Errorf("Error iterating alerts: %s", err)
continue
}
if alert.Resolved() {
continue
}
for _, rule := range ih.rules {
if rule.Mutes(alert.Labels, lset) {
ih.marker.SetInhibited(lset.Fingerprint(), true)
return true
}
}
}
if err := alerts.Err(); err != nil {
log.Errorf("Error after iterating alerts: %s", err)
}
ih.marker.SetInhibited(lset.Fingerprint(), false)
return false
}
示例6: getOrCreateSeries
func (s *memorySeriesStorage) getOrCreateSeries(fp model.Fingerprint, m model.Metric) *memorySeries {
series, ok := s.fpToSeries.get(fp)
if !ok {
var cds []*chunkDesc
var modTime time.Time
unarchived, err := s.persistence.unarchiveMetric(fp)
if err != nil {
log.Errorf("Error unarchiving fingerprint %v (metric %v): %v", fp, m, err)
}
if unarchived {
s.seriesOps.WithLabelValues(unarchive).Inc()
// We have to load chunkDescs anyway to do anything with
// the series, so let's do it right now so that we don't
// end up with a series without any chunkDescs for a
// while (which is confusing as it makes the series
// appear as archived or purged).
cds, err = s.loadChunkDescs(fp, 0)
if err != nil {
log.Errorf("Error loading chunk descs for fingerprint %v (metric %v): %v", fp, m, err)
}
modTime = s.persistence.seriesFileModTime(fp)
} else {
// This was a genuinely new series, so index the metric.
s.persistence.indexMetric(fp, m)
s.seriesOps.WithLabelValues(create).Inc()
}
series = newMemorySeries(m, cds, modTime)
s.fpToSeries.put(fp, series)
s.numSeries.Inc()
}
return series
}
示例7: purgeArchivedMetric
// purgeArchivedMetric deletes an archived fingerprint and its corresponding
// metric entirely. It also queues the metric for un-indexing (no need to call
// unindexMetric for the deleted metric.) It does not touch the series file,
// though. The caller must have locked the fingerprint.
func (p *persistence) purgeArchivedMetric(fp model.Fingerprint) (err error) {
defer func() {
if err != nil {
p.setDirty(fmt.Errorf("error in method purgeArchivedMetric(%v): %s", fp, err))
}
}()
metric, err := p.archivedMetric(fp)
if err != nil || metric == nil {
return err
}
deleted, err := p.archivedFingerprintToMetrics.Delete(codable.Fingerprint(fp))
if err != nil {
return err
}
if !deleted {
log.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToMetrics index. This should never happen.", fp)
}
deleted, err = p.archivedFingerprintToTimeRange.Delete(codable.Fingerprint(fp))
if err != nil {
return err
}
if !deleted {
log.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToTimeRange index. This should never happen.", fp)
}
p.unindexMetric(fp, metric)
return nil
}
示例8: Sources
// Sources implements the TargetProvider interface.
func (kd *Discovery) Sources() []string {
sourceNames := make([]string, 0, len(kd.apiServers))
for _, apiServer := range kd.apiServers {
sourceNames = append(sourceNames, apiServersTargetGroupName+":"+apiServer.Host)
}
nodes, _, err := kd.getNodes()
if err != nil {
// If we can't list nodes then we can't watch them. Assume this is a misconfiguration
// & log & return empty.
log.Errorf("Unable to initialize Kubernetes nodes: %s", err)
return []string{}
}
sourceNames = append(sourceNames, kd.nodeSources(nodes)...)
services, _, err := kd.getServices()
if err != nil {
// If we can't list services then we can't watch them. Assume this is a misconfiguration
// & log & return empty.
log.Errorf("Unable to initialize Kubernetes services: %s", err)
return []string{}
}
sourceNames = append(sourceNames, kd.serviceSources(services)...)
return sourceNames
}
示例9: addService
func (d *serviceDiscovery) addService(service *Service) *config.TargetGroup {
namespace, ok := d.services[service.ObjectMeta.Namespace]
if !ok {
namespace = map[string]*Service{}
d.services[service.ObjectMeta.Namespace] = namespace
}
namespace[service.ObjectMeta.Name] = service
endpointURL := fmt.Sprintf(serviceEndpointsURL, service.ObjectMeta.Namespace, service.ObjectMeta.Name)
res, err := d.kd.queryAPIServerPath(endpointURL)
if err != nil {
log.Errorf("Error getting service endpoints: %s", err)
return nil
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to get service endpoints: %d", res.StatusCode)
return nil
}
var eps Endpoints
if err := json.NewDecoder(res.Body).Decode(&eps); err != nil {
log.Errorf("Error getting service endpoints: %s", err)
return nil
}
return d.updateServiceTargetGroup(service, &eps)
}
示例10: setMetrics
func (e *Exporter) setMetrics(jsonStats <-chan []StatsEntry) (statsMap map[string]float64) {
statsMap = make(map[string]float64)
stats := <-jsonStats
for _, s := range stats {
statsMap[s.Name] = s.Value
}
if len(statsMap) == 0 {
return
}
for _, def := range e.gaugeDefs {
if value, ok := statsMap[def.key]; ok {
// latency gauges need to be converted from microseconds to seconds
if strings.HasSuffix(def.key, "latency") {
value = value / 1000000
}
e.gaugeMetrics[def.id].Set(value)
} else {
log.Errorf("Expected PowerDNS stats key not found: %s", def.key)
e.jsonParseFailures.Inc()
}
}
for _, def := range e.counterVecDefs {
for key, label := range def.labelMap {
if value, ok := statsMap[key]; ok {
e.counterVecMetrics[def.id].WithLabelValues(label).Set(value)
} else {
log.Errorf("Expected PowerDNS stats key not found: %s", key)
e.jsonParseFailures.Inc()
}
}
}
return
}
示例11: scrape
func (e *Exporter) scrape() {
resp, err := e.client.Get(e.URL)
if err != nil {
e.up.Set(0)
log.Errorf("Can't scrape Spring Actuator: %v", err)
return
}
defer resp.Body.Close()
if !(resp.StatusCode >= 200 && resp.StatusCode < 300) {
e.up.Set(0)
log.Errorf("Can't scrape Spring Actuator: StatusCode: %d", resp.StatusCode)
return
}
e.up.Set(1)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Errorf("Reading response body failed %v", err)
return
}
var metrics map[string]*json.RawMessage
if err := json.Unmarshal(body, &metrics); err != nil {
log.Fatalf("JSON unmarshaling failed: %s", err)
}
e.export(metrics)
}
示例12: scrapeMetrics
func (e *Exporter) scrapeMetrics(json *gabs.Container, ch chan<- prometheus.Metric) {
elements, _ := json.ChildrenMap()
for key, element := range elements {
switch key {
case "message":
log.Errorf("Problem collecting metrics: %s\n", element.Data().(string))
return
case "version":
data := element.Data()
version, ok := data.(string)
if !ok {
log.Errorf(fmt.Sprintf("Bad conversion! Unexpected value \"%v\" for version\n", data))
} else {
gauge, _ := e.Gauges.Fetch("metrics_version", "Marathon metrics version", "version")
gauge.WithLabelValues(version).Set(1)
gauge.Collect(ch)
}
case "counters":
e.scrapeCounters(element)
case "gauges":
e.scrapeGauges(element)
case "histograms":
e.scrapeHistograms(element)
case "meters":
e.scrapeMeters(element)
case "timers":
e.scrapeTimers(element)
}
}
}
示例13: providersFromConfig
// providersFromConfig returns all TargetProviders configured in cfg.
func providersFromConfig(cfg *config.ScrapeConfig) map[string]TargetProvider {
providers := map[string]TargetProvider{}
app := func(mech string, i int, tp TargetProvider) {
providers[fmt.Sprintf("%s/%d", mech, i)] = tp
}
for i, c := range cfg.DNSSDConfigs {
app("dns", i, discovery.NewDNS(c))
}
for i, c := range cfg.FileSDConfigs {
app("file", i, discovery.NewFileDiscovery(c))
}
for i, c := range cfg.ConsulSDConfigs {
k, err := discovery.NewConsul(c)
if err != nil {
log.Errorf("Cannot create Consul discovery: %s", err)
continue
}
app("consul", i, k)
}
for i, c := range cfg.MarathonSDConfigs {
app("marathon", i, discovery.NewMarathon(c))
}
for i, c := range cfg.KubernetesSDConfigs {
k, err := discovery.NewKubernetesDiscovery(c)
if err != nil {
log.Errorf("Cannot create Kubernetes discovery: %s", err)
continue
}
app("kubernetes", i, k)
}
for i, c := range cfg.ServersetSDConfigs {
app("serverset", i, discovery.NewServersetDiscovery(c))
}
for i, c := range cfg.NerveSDConfigs {
app("nerve", i, discovery.NewNerveDiscovery(c))
}
for i, c := range cfg.EC2SDConfigs {
app("ec2", i, discovery.NewEC2Discovery(c))
}
for i, c := range cfg.GCESDConfigs {
gced, err := discovery.NewGCEDiscovery(c)
if err != nil {
log.Errorf("Cannot initialize GCE discovery: %s", err)
continue
}
app("gce", i, gced)
}
for i, c := range cfg.AzureSDConfigs {
app("azure", i, discovery.NewAzureDiscovery(c))
}
if len(cfg.StaticConfigs) > 0 {
app("static", 0, NewStaticProvider(cfg.StaticConfigs))
}
return providers
}
示例14: Run
// Run implements the TargetProvider interface.
func (fd *FileDiscovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
defer close(ch)
defer fd.stop()
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Errorf("Error creating file watcher: %s", err)
return
}
fd.watcher = watcher
fd.refresh(ch)
ticker := time.NewTicker(fd.interval)
defer ticker.Stop()
for {
// Stopping has priority over refreshing. Thus we wrap the actual select
// clause to always catch done signals.
select {
case <-ctx.Done():
return
default:
select {
case <-ctx.Done():
return
case event := <-fd.watcher.Events:
// fsnotify sometimes sends a bunch of events without name or operation.
// It's unclear what they are and why they are sent - filter them out.
if len(event.Name) == 0 {
break
}
// Everything but a chmod requires rereading.
if event.Op^fsnotify.Chmod == 0 {
break
}
// Changes to a file can spawn various sequences of events with
// different combinations of operations. For all practical purposes
// this is inaccurate.
// The most reliable solution is to reload everything if anything happens.
fd.refresh(ch)
case <-ticker.C:
// Setting a new watch after an update might fail. Make sure we don't lose
// those files forever.
fd.refresh(ch)
case err := <-fd.watcher.Errors:
if err != nil {
log.Errorf("Error on file watch: %s", err)
}
}
}
}
}
示例15: ApplyConfig
// ApplyConfig updates the rule manager's state as the config requires. If
// loading the new rules failed the old rule set is restored. Returns true on success.
func (m *Manager) ApplyConfig(conf *config.Config) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
// Get all rule files and load the groups they define.
var files []string
for _, pat := range conf.RuleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
log.Errorf("Error retrieving rule files for %s: %s", pat, err)
return false
}
files = append(files, fs...)
}
groups, err := m.loadGroups(files...)
if err != nil {
log.Errorf("Error loading rules, previous rule set restored: %s", err)
return false
}
var wg sync.WaitGroup
for _, newg := range groups {
// To be replaced with a configurable per-group interval.
newg.interval = time.Duration(conf.GlobalConfig.EvaluationInterval)
wg.Add(1)
// If there is an old group with the same identifier, stop it and wait for
// it to finish the current iteration. Then copy its into the new group.
oldg, ok := m.groups[newg.name]
delete(m.groups, newg.name)
go func(newg *Group) {
if ok {
oldg.stop()
newg.copyState(oldg)
}
go newg.run()
wg.Done()
}(newg)
}
// Stop remaining old groups.
for _, oldg := range m.groups {
oldg.stop()
}
wg.Wait()
m.groups = groups
return true
}