本文整理匯總了Golang中github.com/elastic/beats/libbeat/logp.Err函數的典型用法代碼示例。如果您正苦於以下問題:Golang Err函數的具體用法?Golang Err怎麽用?Golang Err使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Err函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Start
// Starts the given module
func (m *Module) Start(b *beat.Beat) {
defer func() {
if r := recover(); r != nil {
logp.Err("Module %s paniced and stopped running. Reason: %+v", m.Name, r)
}
}()
if !m.Enabled {
logp.Debug("helper", "Not starting module %s as not enabled.", m.Name)
return
}
logp.Info("Start Module: %v", m.Name)
err := m.Moduler.Setup()
if err != nil {
logp.Err("Error setting up module: %s. Not starting metricsets for this module.", err)
return
}
for _, metricSet := range m.MetricSets {
go metricSet.Start(b, m.wg)
m.wg.Add(1)
}
}
示例2: startFetching
// startFetching performs an immediate fetch for the MetricSet then it
// begins a continuous timer scheduled loop to fetch data. To stop the loop the
// done channel should be closed.
func (msw *metricSetWrapper) startFetching(
done <-chan struct{},
out chan<- common.MapStr,
) {
debugf("Starting %s", msw)
defer debugf("Stopped %s", msw)
// Fetch immediately.
err := msw.fetch(done, out)
if err != nil {
logp.Err("%v", err)
}
// Start timer for future fetches.
t := time.NewTicker(msw.Module().Config().Period)
defer t.Stop()
for {
select {
case <-done:
return
case <-t.C:
err := msw.fetch(done, out)
if err != nil {
logp.Err("%v", err)
}
}
}
}
示例3: PublishEvent
func (out *fileOutput) PublishEvent(
sig op.Signaler,
opts outputs.Options,
event common.MapStr,
) error {
jsonEvent, err := json.Marshal(event)
if err != nil {
// mark as success so event is not sent again.
op.SigCompleted(sig)
logp.Err("Fail to json encode event(%v): %#v", err, event)
return err
}
err = out.rotator.WriteLine(jsonEvent)
if err != nil {
if opts.Guaranteed {
logp.Critical("Unable to write events to file: %s", err)
} else {
logp.Err("Error when writing line to file: %s", err)
}
}
op.Sig(sig, err)
return err
}
示例4: createWatchUpdater
func createWatchUpdater(monitor *Monitor) func(content []byte) {
return func(content []byte) {
defer logp.Recover("Failed applying monitor watch")
// read multiple json objects from content
dec := json.NewDecoder(bytes.NewBuffer(content))
var configs []*common.Config
for dec.More() {
var obj map[string]interface{}
err := dec.Decode(&obj)
if err != nil {
logp.Err("Failed parsing json object: %v", err)
return
}
logp.Info("load watch object: %v", obj)
cfg, err := common.NewConfigFrom(obj)
if err != nil {
logp.Err("Failed normalizing json input: %v", err)
return
}
configs = append(configs, cfg)
}
// apply read configurations
if err := monitor.Update(configs); err != nil {
logp.Err("Failed applying configuration: %v", err)
}
}
}
示例5: scan
// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {
for path, info := range p.getFiles() {
logp.Debug("prospector", "Check file for harvesting: %s", path)
// Create new state for comparison
newState := file.NewState(info, path)
// Load last state
lastState := p.Prospector.states.FindPrevious(newState)
// Ignores all files which fall under ignore_older
if p.isIgnoreOlder(newState) {
err := p.handleIgnoreOlder(lastState, newState)
if err != nil {
logp.Err("Updating ignore_older state error: %s", err)
}
continue
}
// Decides if previous state exists
if lastState.IsEmpty() {
logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
err := p.Prospector.startHarvester(newState, 0)
if err != nil {
logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err)
}
} else {
p.harvestExistingFile(newState, lastState)
}
}
}
示例6: PublishEvent
func (out *fileOutput) PublishEvent(
trans outputs.Signaler,
opts outputs.Options,
event common.MapStr,
) error {
jsonEvent, err := json.Marshal(event)
if err != nil {
// mark as success so event is not sent again.
outputs.SignalCompleted(trans)
logp.Err("Fail to convert the event to JSON: %s", err)
return err
}
err = out.rotator.WriteLine(jsonEvent)
if err != nil {
if opts.Guaranteed {
logp.Critical("Unable to write events to file: %s", err)
} else {
logp.Err("Error when writing line to file: %s", err)
}
}
outputs.Signal(trans, err)
return err
}
示例7: Setup
// Setup prepares Journalbeat for the main loop (starts journalreader, etc.)
func (jb *Journalbeat) Setup(b *beat.Beat) error {
logp.Info("Journalbeat Setup")
jb.output = b.Publisher.Connect()
// Buffer channel else write to it blocks when Stop is called while
// FollowJournal waits to write next event
jb.done = make(chan int, 1)
jb.recv = make(chan sdjournal.JournalEntry)
jb.cursorChan = make(chan string)
jb.cursorChanFlush = make(chan int)
jr, err := sdjournal.NewJournalReader(sdjournal.JournalReaderConfig{
Since: time.Duration(1),
// NumFromTail: 0,
})
if err != nil {
logp.Err("Could not create JournalReader")
return err
}
jb.jr = jr
// seek to position
err = jb.seekToPosition()
if err != nil {
errMsg := fmt.Sprintf("seeking to a good position in journal failed: %v", err)
logp.Err(errMsg)
return fmt.Errorf("%s", errMsg)
}
// done with setup
return nil
}
示例8: scan
// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {
for path, info := range p.getFiles() {
logp.Debug("prospector", "Check file for harvesting: %s", path)
// Create new state for comparison
newState := file.NewState(info, path)
// Load last state
lastState := p.Prospector.states.FindPrevious(newState)
// Ignores all files which fall under ignore_older
if p.isIgnoreOlder(newState) {
logp.Debug("prospector", "Ignore file because ignore_older reached: %s", newState.Source)
// If last state is empty, it means state was removed or never created -> can be ignored
if !lastState.IsEmpty() && !lastState.Finished {
logp.Err("File is falling under ignore_older before harvesting is finished. Adjust your close_* settings: %s", newState.Source)
}
continue
}
// Decides if previous state exists
if lastState.IsEmpty() {
logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
err := p.Prospector.startHarvester(newState, 0)
if err != nil {
logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err)
}
} else {
p.harvestExistingFile(newState, lastState)
}
}
}
示例9: PublishIPs
func (out *redisOutput) PublishIPs(name string, localAddrs []string) error {
logp.Debug("output_redis", "[%s] Publish the IPs %s", name, localAddrs)
// connect to db
conn, err := out.RedisConnect(out.DbTopology)
if err != nil {
return err
}
defer func() { _ = conn.Close() }()
_, err = conn.Do("HSET", name, "ipaddrs", strings.Join(localAddrs, ","))
if err != nil {
logp.Err("[%s] Fail to set the IP addresses: %s", name, err)
return err
}
_, err = conn.Do("EXPIRE", name, int(out.TopologyExpire.Seconds()))
if err != nil {
logp.Err("[%s] Fail to set the expiration time: %s", name, err)
return err
}
out.UpdateLocalTopologyMap(conn)
return nil
}
示例10: itemStatus
func itemStatus(reader *jsonReader) (int, []byte, error) {
// skip outer dictionary
if err := reader.expectDict(); err != nil {
return 0, nil, errExpectedItemObject
}
// find first field in outer dictionary (e.g. 'create')
kind, _, err := reader.nextFieldName()
if err != nil {
logp.Err("Failed to parse bulk response item: %s", err)
return 0, nil, err
}
if kind == dictEnd {
err = errUnexpectedEmptyObject
logp.Err("Failed to parse bulk response item: %s", err)
return 0, nil, err
}
// parse actual item response code and error message
status, msg, err := itemStatusInner(reader)
// close dictionary. Expect outer dictionary to have only one element
kind, _, err = reader.step()
if err != nil {
logp.Err("Failed to parse bulk response item: %s", err)
return 0, nil, err
}
if kind != dictEnd {
err = errExcpectedObjectEnd
logp.Err("Failed to parse bulk response item: %s", err)
return 0, nil, err
}
return status, msg, nil
}
示例11: bulkCollectPublishFails
// bulkCollectPublishFails checks per item errors returning all events
// to be tried again due to error code returned for that items. If indexing an
// event failed due to some error in the event itself (e.g. does not respect mapping),
// the event will be dropped.
func bulkCollectPublishFails(
reader *jsonReader,
events []common.MapStr,
) []common.MapStr {
if err := reader.expectDict(); err != nil {
logp.Err("Failed to parse bulk respose: expected JSON object")
return nil
}
// find 'items' field in response
for {
kind, name, err := reader.nextFieldName()
if err != nil {
logp.Err("Failed to parse bulk response")
return nil
}
if kind == dictEnd {
logp.Err("Failed to parse bulk response: no 'items' field in response")
return nil
}
// found items array -> continue
if bytes.Equal(name, nameItems) {
break
}
reader.ignoreNext()
}
// check items field is an array
if err := reader.expectArray(); err != nil {
logp.Err("Failed to parse bulk respose: expected items array")
return nil
}
count := len(events)
failed := events[:0]
for i := 0; i < count; i++ {
status, msg, err := itemStatus(reader)
if err != nil {
return nil
}
if status < 300 {
continue // ok value
}
if status < 500 && status != 429 {
// hard failure, don't collect
logp.Warn("Can not index event (status=%v): %s", status, msg)
continue
}
logp.Info("Bulk item insert failed (i=%v, status=%v): %s", i, status, msg)
failed = append(failed, events[i])
}
return failed
}
示例12: startFetching
// startFetching performs an immediate fetch for the specified host then it
// begins continuous timer scheduled loop to fetch data. To stop the loop the
// done channel should be closed. On exit the method will decrement the
// WaitGroup counter.
//
// startFetching manages fetching for a single host so it should be called once
// per host.
func (msw *metricSetWrapper) startFetching(
done <-chan struct{},
wg *sync.WaitGroup,
host string,
) {
defer wg.Done()
// Fetch immediately.
err := msw.fetch(host)
if err != nil {
logp.Err("fetch error: %v", err)
}
// Start timer for future fetches.
t := time.NewTicker(msw.Module().Config().Period)
defer t.Stop()
for {
select {
case <-done:
return
case <-t.C:
err := msw.fetch(host)
if err != nil {
logp.Err("%v", err)
}
}
}
}
示例13: writeRegistry
// writeRegistry writes the new json registry file to disk.
func (r *Registrar) writeRegistry() error {
logp.Debug("registrar", "Write registry file: %s", r.registryFile)
tempfile := r.registryFile + ".new"
f, err := os.Create(tempfile)
if err != nil {
logp.Err("Failed to create tempfile (%s) for writing: %s", tempfile, err)
return err
}
// First clean up states
states := r.states.GetStates()
encoder := json.NewEncoder(f)
err = encoder.Encode(states)
if err != nil {
logp.Err("Error when encoding the states: %s", err)
return err
}
// Directly close file because of windows
f.Close()
logp.Debug("registrar", "Registry file updated. %d states written.", len(states))
registryWrites.Add(1)
statesCurrent.Set(int64(len(states)))
return file.SafeFileRotate(r.registryFile, tempfile)
}
示例14: harvestExistingFile
// harvestExistingFile continues harvesting a file with a known state if needed
func (p *ProspectorLog) harvestExistingFile(newState file.State, oldState file.State) {
logp.Debug("prospector", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset)
// No harvester is running for the file, start a new harvester
// It is important here that only the size is checked and not modification time, as modification time could be incorrect on windows
// https://blogs.technet.microsoft.com/asiasupp/2010/12/14/file-date-modified-property-are-not-updating-while-modifying-a-file-without-closing-it/
if oldState.Finished && newState.Fileinfo.Size() > oldState.Offset {
// Resume harvesting of an old file we've stopped harvesting from
// This could also be an issue with force_close_older that a new harvester is started after each scan but not needed?
// One problem with comparing modTime is that it is in seconds, and scans can happen more then once a second
logp.Debug("prospector", "Resuming harvesting of file: %s, offset: %v", newState.Source, oldState.Offset)
err := p.Prospector.startHarvester(newState, oldState.Offset)
if err != nil {
logp.Err("Harvester could not be started on existing file: %s, Err: %s", newState.Source, err)
}
return
}
// File size was reduced -> truncated file
if oldState.Finished && newState.Fileinfo.Size() < oldState.Offset {
logp.Debug("prospector", "Old file was truncated. Starting from the beginning: %s", newState.Source)
err := p.Prospector.startHarvester(newState, 0)
if err != nil {
logp.Err("Harvester could not be started on truncated file: %s, Err: %s", newState.Source, err)
}
filesTrucated.Add(1)
return
}
// Check if file was renamed
if oldState.Source != "" && oldState.Source != newState.Source {
// This does not start a new harvester as it is assume that the older harvester is still running
// or no new lines were detected. It sends only an event status update to make sure the new name is persisted.
logp.Debug("prospector", "File rename was detected: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)
if oldState.Finished {
logp.Debug("prospector", "Updating state for renamed file: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)
// Update state because of file rotation
oldState.Source = newState.Source
event := input.NewEvent(oldState)
err := p.Prospector.updateState(event)
if err != nil {
logp.Err("File rotation state update error: %s", err)
}
filesRenamed.Add(1)
} else {
logp.Debug("prospector", "File rename detected but harvester not finished yet.")
}
}
if !oldState.Finished {
// Nothing to do. Harvester is still running and file was not renamed
logp.Debug("prospector", "Harvester for file is still running: %s", newState.Source)
} else {
logp.Debug("prospector", "File didn't change: %s", newState.Source)
}
}
示例15: serializeEvents
func serializeEvents(
to []interface{},
i int,
data []outputs.Data,
) ([]outputs.Data, []interface{}) {
succeeded := data
for _, d := range data {
jsonEvent, err := json.Marshal(d.Event)
if err != nil {
logp.Err("Failed to convert the event to JSON (%v): %#v", err, d.Event)
goto failLoop
}
to = append(to, jsonEvent)
i++
}
return succeeded, to
failLoop:
succeeded = data[:i]
rest := data[i+1:]
for _, d := range rest {
jsonEvent, err := json.Marshal(d.Event)
if err != nil {
logp.Err("Failed to convert the event to JSON (%v): %#v", err, d.Event)
i++
continue
}
to = append(to, jsonEvent)
i++
}
return succeeded, to
}