本文整理汇总了Golang中github.com/prometheus/common/model.Now函数的典型用法代码示例。如果您正苦于以下问题:Golang Now函数的具体用法?Golang Now怎么用?Golang Now使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Now函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: loop
func (s *memorySeriesStorage) loop() {
checkpointTimer := time.NewTimer(s.checkpointInterval)
dirtySeriesCount := 0
defer func() {
checkpointTimer.Stop()
log.Info("Maintenance loop stopped.")
close(s.loopStopped)
}()
memoryFingerprints := s.cycleThroughMemoryFingerprints()
archivedFingerprints := s.cycleThroughArchivedFingerprints()
loop:
for {
select {
case <-s.loopStopping:
break loop
case <-checkpointTimer.C:
err := s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker)
if err != nil {
log.Errorln("Error while checkpointing:", err)
} else {
dirtySeriesCount = 0
}
// If a checkpoint takes longer than checkpointInterval, unluckily timed
// combination with the Reset(0) call below can lead to a case where a
// time is lurking in C leading to repeated checkpointing without break.
select {
case <-checkpointTimer.C: // Get rid of the lurking time.
default:
}
checkpointTimer.Reset(s.checkpointInterval)
case fp := <-memoryFingerprints:
if s.maintainMemorySeries(fp, model.Now().Add(-s.dropAfter)) {
dirtySeriesCount++
// Check if we have enough "dirty" series so that we need an early checkpoint.
// However, if we are already behind persisting chunks, creating a checkpoint
// would be counterproductive, as it would slow down chunk persisting even more,
// while in a situation like that, where we are clearly lacking speed of disk
// maintenance, the best we can do for crash recovery is to persist chunks as
// quickly as possible. So only checkpoint if the urgency score is < 1.
if dirtySeriesCount >= s.checkpointDirtySeriesLimit &&
s.calculatePersistenceUrgencyScore() < 1 {
checkpointTimer.Reset(0)
}
}
case fp := <-archivedFingerprints:
s.maintainArchivedSeries(fp, model.Now().Add(-s.dropAfter))
}
}
// Wait until both channels are closed.
for range memoryFingerprints {
}
for range archivedFingerprints {
}
}
示例2: loop
func (s *memorySeriesStorage) loop() {
checkpointTimer := time.NewTimer(s.checkpointInterval)
dirtySeriesCount := 0
defer func() {
checkpointTimer.Stop()
log.Info("Maintenance loop stopped.")
close(s.loopStopped)
}()
memoryFingerprints := s.cycleThroughMemoryFingerprints()
archivedFingerprints := s.cycleThroughArchivedFingerprints()
loop:
for {
select {
case <-s.loopStopping:
break loop
case <-checkpointTimer.C:
err := s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker)
if err != nil {
log.Errorln("Error while checkpointing:", err)
} else {
dirtySeriesCount = 0
}
checkpointTimer.Reset(s.checkpointInterval)
case fp := <-memoryFingerprints:
if s.maintainMemorySeries(fp, model.Now().Add(-s.dropAfter)) {
dirtySeriesCount++
// Check if we have enough "dirty" series so that we need an early checkpoint.
// However, if we are already behind persisting chunks, creating a checkpoint
// would be counterproductive, as it would slow down chunk persisting even more,
// while in a situation like that, where we are clearly lacking speed of disk
// maintenance, the best we can do for crash recovery is to persist chunks as
// quickly as possible. So only checkpoint if the storage is not in "graceful
// degradation mode".
if dirtySeriesCount >= s.checkpointDirtySeriesLimit && !s.isDegraded() {
checkpointTimer.Reset(0)
}
}
case fp := <-archivedFingerprints:
s.maintainArchivedSeries(fp, model.Now().Add(-s.dropAfter))
}
}
// Wait until both channels are closed.
for range memoryFingerprints {
}
for range archivedFingerprints {
}
}
示例3: alerts
func (api *API) alerts(r *http.Request) (interface{}, *apiError) {
// Generate snapshot of notifications for all current alerts
var reqs notification.NotificationReqs
now := model.Now()
for _, rule := range api.RuleManager.AlertingRules() {
reqs = append(reqs, api.RuleManager.GetRuleAlertNotifications(rule, now)...)
}
// Generate an alert map
alerts := make([]map[string]interface{}, 0, len(reqs))
for _, req := range reqs {
alerts = append(alerts, map[string]interface{}{
"summary": req.Summary,
"description": req.Description,
"runbook": req.Runbook,
"labels": req.Labels,
"payload": map[string]interface{}{
"value": req.Value,
"activeSince": req.ActiveSince,
"generatorURL": req.GeneratorURL,
"alertingRule": req.RuleString,
},
})
}
return alerts, nil
}
示例4: preloadChunksForInstant
// preloadChunksForInstant preloads chunks for the latest value in the given
// range. If the last sample saved in the memorySeries itself is the latest
// value in the given range, it will in fact preload zero chunks and just take
// that value.
func (s *memorySeries) preloadChunksForInstant(
fp model.Fingerprint,
from model.Time, through model.Time,
mss *MemorySeriesStorage,
) (SeriesIterator, error) {
// If we have a lastSamplePair in the series, and thas last samplePair
// is in the interval, just take it in a singleSampleSeriesIterator. No
// need to pin or load anything.
lastSample := s.lastSamplePair()
if !through.Before(lastSample.Timestamp) &&
!from.After(lastSample.Timestamp) &&
lastSample != model.ZeroSamplePair {
iter := &boundedIterator{
it: &singleSampleSeriesIterator{
samplePair: lastSample,
metric: s.metric,
},
start: model.Now().Add(-mss.dropAfter),
}
return iter, nil
}
// If we are here, we are out of luck and have to delegate to the more
// expensive method.
return s.preloadChunksForRange(fp, from, through, mss)
}
示例5: TestRetentionCutoff
func TestRetentionCutoff(t *testing.T) {
now := model.Now()
insertStart := now.Add(-2 * time.Hour)
s, closer := NewTestStorage(t, 2)
defer closer.Close()
// Stop maintenance loop to prevent actual purging.
close(s.loopStopping)
<-s.loopStopped
<-s.logThrottlingStopped
// Recreate channel to avoid panic when we really shut down.
s.loopStopping = make(chan struct{})
s.dropAfter = 1 * time.Hour
for i := 0; i < 120; i++ {
smpl := &model.Sample{
Metric: model.Metric{"job": "test"},
Timestamp: insertStart.Add(time.Duration(i) * time.Minute), // 1 minute intervals.
Value: 1,
}
s.Append(smpl)
}
s.WaitForIndexing()
var fp model.Fingerprint
for f := range s.fingerprintsForLabelPairs(model.LabelPair{Name: "job", Value: "test"}) {
fp = f
break
}
pl := s.NewPreloader()
defer pl.Close()
// Preload everything.
it := pl.PreloadRange(fp, insertStart, now)
val := it.ValueAtOrBeforeTime(now.Add(-61 * time.Minute))
if val.Timestamp != model.Earliest {
t.Errorf("unexpected result for timestamp before retention period")
}
vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now})
// We get 59 values here because the model.Now() is slightly later
// than our now.
if len(vals) != 59 {
t.Errorf("expected 59 values but got %d", len(vals))
}
if expt := now.Add(-1 * time.Hour).Add(time.Minute); vals[0].Timestamp != expt {
t.Errorf("unexpected timestamp for first sample: %v, expected %v", vals[0].Timestamp.Time(), expt.Time())
}
}
示例6: runIteration
func (m *Manager) runIteration() {
now := model.Now()
wg := sync.WaitGroup{}
m.Lock()
rulesSnapshot := make([]Rule, len(m.rules))
copy(rulesSnapshot, m.rules)
m.Unlock()
for _, rule := range rulesSnapshot {
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
start := time.Now()
vector, err := rule.eval(now, m.queryEngine)
duration := time.Since(start)
if err != nil {
evalFailures.Inc()
log.Warnf("Error while evaluating rule %q: %s", rule, err)
return
}
switch r := rule.(type) {
case *AlertingRule:
m.queueAlertNotifications(r, now)
evalDuration.WithLabelValues(ruleTypeAlerting).Observe(
float64(duration / time.Millisecond),
)
case *RecordingRule:
evalDuration.WithLabelValues(ruleTypeRecording).Observe(
float64(duration / time.Millisecond),
)
default:
panic(fmt.Errorf("Unknown rule type: %T", rule))
}
for _, s := range vector {
m.sampleAppender.Append(&model.Sample{
Metric: s.Metric.Metric,
Value: s.Value,
Timestamp: s.Timestamp,
})
}
}(rule)
}
wg.Wait()
}
示例7: eval
// eval runs a single evaluation cycle in which all rules are evaluated in parallel.
// In the future a single group will be evaluated sequentially to properly handle
// rule dependency.
func (g *Group) eval() {
var (
now = model.Now()
wg sync.WaitGroup
)
for _, rule := range g.rules {
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
start := time.Now()
evalTotal.Inc()
vector, err := rule.eval(now, g.opts.QueryEngine)
if err != nil {
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
if _, ok := err.(promql.ErrQueryCanceled); !ok {
log.Warnf("Error while evaluating rule %q: %s", rule, err)
}
evalFailures.Inc()
}
var rtyp ruleType
switch r := rule.(type) {
case *AlertingRule:
rtyp = ruleTypeRecording
g.sendAlerts(r, now)
case *RecordingRule:
rtyp = ruleTypeAlert
default:
panic(fmt.Errorf("unknown rule type: %T", rule))
}
evalDuration.WithLabelValues(string(rtyp)).Observe(
float64(time.Since(start)) / float64(time.Second),
)
for _, s := range vector {
g.opts.SampleAppender.Append(s)
}
}(rule)
}
wg.Wait()
}
示例8: consoles
func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
ctx := route.Context(r)
name := route.Param(ctx, "filepath")
file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
text, err := ioutil.ReadAll(file)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Provide URL parameters as a map for easy use. Advanced users may have need for
// parameters beyond the first, so provide RawParams.
rawParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
params := map[string]string{}
for k, v := range rawParams {
params[k] = v[0]
}
data := struct {
RawParams url.Values
Params map[string]string
Path string
}{
RawParams: rawParams,
Params: params,
Path: strings.TrimLeft(name, "/"),
}
tmpl := template.NewTemplateExpander(string(text), "__console_"+name, data, model.Now(), h.queryEngine, h.options.ExternalURL.Path)
filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
result, err := tmpl.ExpandHTML(filenames)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
示例9: preloadChunks
// preloadChunks is an internal helper method.
func (s *memorySeries) preloadChunks(
indexes []int, fp model.Fingerprint, mss *MemorySeriesStorage,
) (SeriesIterator, error) {
loadIndexes := []int{}
pinnedChunkDescs := make([]*chunk.Desc, 0, len(indexes))
for _, idx := range indexes {
cd := s.chunkDescs[idx]
pinnedChunkDescs = append(pinnedChunkDescs, cd)
cd.Pin(mss.evictRequests) // Have to pin everything first to prevent immediate eviction on chunk loading.
if cd.IsEvicted() {
loadIndexes = append(loadIndexes, idx)
}
}
chunk.Ops.WithLabelValues(chunk.Pin).Add(float64(len(pinnedChunkDescs)))
if len(loadIndexes) > 0 {
if s.chunkDescsOffset == -1 {
panic("requested loading chunks from persistence in a situation where we must not have persisted data for chunk descriptors in memory")
}
chunks, err := mss.loadChunks(fp, loadIndexes, s.chunkDescsOffset)
if err != nil {
// Unpin the chunks since we won't return them as pinned chunks now.
for _, cd := range pinnedChunkDescs {
cd.Unpin(mss.evictRequests)
}
chunk.Ops.WithLabelValues(chunk.Unpin).Add(float64(len(pinnedChunkDescs)))
return nopIter, err
}
for i, c := range chunks {
s.chunkDescs[loadIndexes[i]].SetChunk(c)
}
}
if !s.headChunkClosed && indexes[len(indexes)-1] == len(s.chunkDescs)-1 {
s.headChunkUsedByIterator = true
}
curriedQuarantineSeries := func(err error) {
mss.quarantineSeries(fp, s.metric, err)
}
iter := &boundedIterator{
it: s.newIterator(pinnedChunkDescs, curriedQuarantineSeries, mss.evictRequests),
start: model.Now().Add(-mss.dropAfter),
}
return iter, nil
}
示例10: TestRuleEval
func TestRuleEval(t *testing.T) {
storage, closer := local.NewTestStorage(t, 2)
defer closer.Close()
engine := promql.NewEngine(storage, nil)
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
now := model.Now()
suite := []struct {
name string
expr promql.Expr
labels model.LabelSet
result model.Vector
}{
{
name: "nolabels",
expr: &promql.NumberLiteral{Val: 1},
labels: model.LabelSet{},
result: model.Vector{&model.Sample{
Value: 1,
Timestamp: now,
Metric: model.Metric{"__name__": "nolabels"},
}},
},
{
name: "labels",
expr: &promql.NumberLiteral{Val: 1},
labels: model.LabelSet{"foo": "bar"},
result: model.Vector{&model.Sample{
Value: 1,
Timestamp: now,
Metric: model.Metric{"__name__": "labels", "foo": "bar"},
}},
},
}
for _, test := range suite {
rule := NewRecordingRule(test.name, test.expr, test.labels)
result, err := rule.eval(ctx, now, engine, "")
if err != nil {
t.Fatalf("Error evaluating %s", test.name)
}
if !reflect.DeepEqual(result, test.result) {
t.Fatalf("Error: expected %q, got %q", test.result, result)
}
}
}
示例11: executeTemplate
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
text, err := h.getTemplate(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
tmpl := template.NewTemplateExpander(text, name, data, model.Now(), h.queryEngine, h.options.ExternalURL.Path)
tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))
result, err := tmpl.ExpandHTML(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
示例12: eval
// eval runs a single evaluation cycle in which all rules are evaluated in parallel.
// In the future a single group will be evaluated sequentially to properly handle
// rule dependency.
func (g *Group) eval() {
var (
now = model.Now()
wg sync.WaitGroup
)
for _, rule := range g.rules {
wg.Add(1)
// BUG(julius): Look at fixing thundering herd.
go func(rule Rule) {
defer wg.Done()
start := time.Now()
evalTotal.Inc()
vector, err := rule.eval(now, g.opts.QueryEngine)
if err != nil {
evalFailures.Inc()
log.Warnf("Error while evaluating rule %q: %s", rule, err)
}
var rtyp ruleType
switch r := rule.(type) {
case *AlertingRule:
rtyp = ruleTypeRecording
g.sendAlerts(r, now)
case *RecordingRule:
rtyp = ruleTypeAlert
default:
panic(fmt.Errorf("unknown rule type: %T", rule))
}
evalDuration.WithLabelValues(string(rtyp)).Observe(
float64(time.Since(start)) / float64(time.Second),
)
for _, s := range vector {
g.opts.SampleAppender.Append(s)
}
}(rule)
}
wg.Wait()
}
示例13: gatherURL
func (g *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
}
format := expfmt.ResponseFormat(resp.Header)
decoder := expfmt.NewDecoder(resp.Body, format)
options := &expfmt.DecodeOptions{
Timestamp: model.Now(),
}
sampleDecoder := &expfmt.SampleDecoder{
Dec: decoder,
Opts: options,
}
for {
var samples model.Vector
err := sampleDecoder.Decode(&samples)
if err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("error getting processing samples for %s: %s",
url, err)
}
for _, sample := range samples {
tags := make(map[string]string)
for key, value := range sample.Metric {
if key == model.MetricNameLabel {
continue
}
tags[string(key)] = string(value)
}
acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]),
float64(sample.Value), tags)
}
}
return nil
}
示例14: TestTargetRecordScrapeHealth
func TestTargetRecordScrapeHealth(t *testing.T) {
var (
testTarget = newTestTarget("example.url:80", 0, model.LabelSet{model.JobLabel: "testjob"})
now = model.Now()
appender = &collectResultAppender{}
)
testTarget.report(appender, now.Time(), 2*time.Second, nil)
result := appender.result
if len(result) != 2 {
t.Fatalf("Expected two samples, got %d", len(result))
}
actual := result[0]
expected := &model.Sample{
Metric: model.Metric{
model.MetricNameLabel: scrapeHealthMetricName,
model.InstanceLabel: "example.url:80",
model.JobLabel: "testjob",
},
Timestamp: now,
Value: 1,
}
if !actual.Equal(expected) {
t.Fatalf("Expected and actual samples not equal. Expected: %v, actual: %v", expected, actual)
}
actual = result[1]
expected = &model.Sample{
Metric: model.Metric{
model.MetricNameLabel: scrapeDurationMetricName,
model.InstanceLabel: "example.url:80",
model.JobLabel: "testjob",
},
Timestamp: now,
Value: 2.0,
}
if !actual.Equal(expected) {
t.Fatalf("Expected and actual samples not equal. Expected: %v, actual: %v", expected, actual)
}
}
示例15: NewIterator
// NewIterator implements Storage.
func (s *memorySeriesStorage) NewIterator(fp model.Fingerprint) SeriesIterator {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if !ok {
// Oops, no series for fp found. That happens if, after
// preloading is done, the whole series is identified as old
// enough for purging and hence purged for good. As there is no
// data left to iterate over, return an iterator that will never
// return any values.
return nopSeriesIterator{}
}
return &boundedIterator{
it: series.newIterator(),
start: model.Now().Add(-s.dropAfter),
}
}