本文整理匯總了Golang中github.com/mozilla-services/heka/pipeline.OutputRunner類的典型用法代碼示例。如果您正苦於以下問題:Golang OutputRunner類的具體用法?Golang OutputRunner怎麽用?Golang OutputRunner使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了OutputRunner類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Run
// Run is the plugin's main loop
//iterates over received messages, checking against
//message hostname and delivering to the output if hostname is in our config.
func (o *EmailOutput) Run(runner pipeline.OutputRunner, helper pipeline.PluginHelper) (
err error) {
var (
payload string
)
body := bytes.NewBuffer(nil)
for pack := range runner.InChan() {
payload = pack.Message.GetPayload()
if len(payload) > 100 {
payload = payload[:100]
}
body.WriteString(fmt.Sprintf("Subject: %s [%d] %[email protected]%s: ",
utils.TsTime(pack.Message.GetTimestamp()).Format(time.RFC3339),
pack.Message.GetSeverity(), pack.Message.GetLogger(),
pack.Message.GetHostname()))
body.WriteString(payload)
body.WriteString("\r\n\r\n")
body.WriteString(pack.Message.GetPayload())
pack.Recycle()
err = o.sendMail(body.Bytes())
body.Reset()
if err != nil {
return fmt.Errorf("error sending email: %s", err)
}
}
return
}
示例2: Run
func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
if or.Encoder() == nil {
return errors.New("Encoder must be specified.")
}
var (
e error
outBytes []byte
)
inChan := or.InChan()
for pack := range inChan {
outBytes, e = or.Encode(pack)
pack.Recycle()
if e != nil {
or.LogError(e)
continue
}
if outBytes == nil {
continue
}
if e = o.request(or, outBytes); e != nil {
or.LogError(e)
}
}
return
}
示例3: processKafkaErrors
func (k *KafkaOutput) processKafkaErrors(or pipeline.OutputRunner, errChan <-chan *sarama.ProducerError,
shutdownChan chan struct{}, wg *sync.WaitGroup) {
var (
ok = true
pErr *sarama.ProducerError
)
for ok {
select {
case pErr, ok = <-errChan:
if !ok {
break
}
err := pErr.Err
switch err.(type) {
case sarama.PacketEncodingError:
atomic.AddInt64(&k.kafkaEncodingErrors, 1)
or.LogError(fmt.Errorf("kafka encoding error: %s", err.Error()))
default:
atomic.AddInt64(&k.kafkaDroppedMessages, 1)
if err != nil {
msgValue, _ := pErr.Msg.Value.Encode()
or.LogError(fmt.Errorf("kafka error '%s' for message '%s'", err.Error(),
string(msgValue)))
}
}
case <-shutdownChan:
ok = false
break
}
}
wg.Done()
}
示例4: Run
func (o *UdpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
if or.Encoder() == nil {
return errors.New("Encoder required.")
}
var (
outBytes []byte
e error
)
for pack := range or.InChan() {
if outBytes, e = or.Encode(pack); e != nil {
or.LogError(fmt.Errorf("Error encoding message: %s", e.Error()))
} else if outBytes != nil {
msgSize := len(outBytes)
if msgSize > o.UdpOutputConfig.MaxMessageSize {
or.LogError(fmt.Errorf("Message has exceeded allowed UDP data size: %d > %d", msgSize, o.UdpOutputConfig.MaxMessageSize))
} else {
o.conn.Write(outBytes)
}
}
pack.Recycle()
}
return
}
示例5: Run
func (wso *WebSocketsOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
for pc := range or.InChan() {
wso.broadcast <- pc.Pack.Message
pc.Pack.Recycle()
}
return nil
}
示例6: Run
func (clo *CloudLoggingOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
var (
pack *pipeline.PipelinePack
e error
k string
m *logging.LogEntry
exist bool
ok = true
inChan = or.InChan()
groupBatch = make(map[string]*LogBatch)
outBatch *LogBatch
ticker = time.Tick(time.Duration(clo.conf.FlushInterval) * time.Millisecond)
)
clo.or = or
go clo.committer()
for ok {
select {
case pack, ok = <-inChan:
// Closed inChan => we're shutting down, flush data.
if !ok {
clo.sendGroupBatch(groupBatch)
close(clo.batchChan)
<-clo.outputExit
break
}
k, m, e = clo.Encode(pack)
pack.Recycle()
if e != nil {
or.LogError(e)
continue
}
if k != "" && m != nil {
outBatch, exist = groupBatch[k]
if !exist {
outBatch = &LogBatch{count: 0, batch: make([]*logging.LogEntry, 0, 100), name: k}
groupBatch[k] = outBatch
}
outBatch.batch = append(outBatch.batch, m)
if outBatch.count++; clo.CheckFlush(int(outBatch.count), len(outBatch.batch)) {
if len(outBatch.batch) > 0 {
outBatch.batch = clo.sendBatch(k, outBatch.batch, outBatch.count)
outBatch.count = 0
}
}
}
case <-ticker:
clo.sendGroupBatch(groupBatch)
case err = <-clo.outputExit:
ok = false
}
}
return
}
示例7: Run
func (ro *RedisMQOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
var outgoing string
for pack := range or.InChan() {
outgoing = fmt.Sprintf("%s", pack.Message.GetPayload())
ro.rdqueue.Put(outgoing)
pack.Recycle()
}
return nil
}
示例8: Run
func (cmo *CloudMonitoringOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
var (
pack *pipeline.PipelinePack
e error
m *cloudmonitoring.TimeseriesPoint
ok = true
count int64
inChan = or.InChan()
outBatch = make([]*cloudmonitoring.TimeseriesPoint, 0, 200)
ticker = time.Tick(time.Duration(cmo.conf.FlushInterval) * time.Millisecond)
)
cmo.or = or
go cmo.committer()
for ok {
select {
case pack, ok = <-inChan:
// Closed inChan => we're shutting down, flush data.
if !ok {
if len(outBatch) > 0 {
cmo.sendBatch(outBatch, count)
}
close(cmo.batchChan)
<-cmo.outputExit
break
}
m, e = cmo.Encode(pack)
pack.Recycle()
if e != nil {
or.LogError(e)
continue
}
if m != nil {
outBatch = append(outBatch, m)
if count++; cmo.CheckFlush(int(count), len(outBatch)) {
if len(outBatch) > 0 {
outBatch = cmo.sendBatch(outBatch, count)
count = 0
}
}
}
case <-ticker:
if len(outBatch) > 0 {
outBatch = cmo.sendBatch(outBatch, count)
}
count = 0
case err = <-cmo.outputExit:
ok = false
}
}
return
}
示例9: Run
func (o *UdpOutput) Run(runner pipeline.OutputRunner, helper pipeline.PluginHelper) (
err error) {
var outgoing string
for pack := range runner.InChan() {
outgoing = fmt.Sprintf("%s\n", pack.Message.GetPayload())
o.conn.Write([]byte(outgoing))
pack.Recycle()
}
return
}
示例10: Run
func (cef *CefOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
var (
facility, priority syslog.Priority
ident string
ok bool
p syslog.Priority
e error
pack *pipeline.PipelinePack
)
syslogMsg := new(SyslogMsg)
for pack = range or.InChan() {
// default values
facility, priority = syslog.LOG_LOCAL4, syslog.LOG_INFO
ident = "heka_no_ident"
priField := pack.Message.FindFirstField("cef_meta.syslog_priority")
if priField != nil {
priStr := priField.ValueString[0]
if p, ok = SYSLOG_PRIORITY[priStr]; ok {
priority = p
}
}
facField := pack.Message.FindFirstField("cef_meta.syslog_facility")
if facField != nil {
facStr := facField.ValueString[0]
if p, ok = SYSLOG_FACILITY[facStr]; ok {
facility = p
}
}
idField := pack.Message.FindFirstField("cef_meta.syslog_ident")
if idField != nil {
ident = idField.ValueString[0]
}
syslogMsg.priority = priority | facility
syslogMsg.prefix = ident
syslogMsg.payload = pack.Message.GetPayload()
pack.Recycle()
_, e = cef.syslogWriter.WriteString(syslogMsg.priority, syslogMsg.prefix,
syslogMsg.payload)
if e != nil {
or.LogError(e)
}
}
cef.syslogWriter.Close()
return
}
示例11: Run
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
defer func() {
k.producer.Close()
k.client.Close()
}()
if or.Encoder() == nil {
return errors.New("Encoder required.")
}
inChan := or.InChan()
errChan := k.producer.Errors()
var wg sync.WaitGroup
wg.Add(1)
go k.processKafkaErrors(or, errChan, &wg)
var (
pack *pipeline.PipelinePack
topic = k.config.Topic
key sarama.Encoder
)
for pack = range inChan {
atomic.AddInt64(&k.processMessageCount, 1)
if k.topicVariable != nil {
topic = getMessageVariable(pack.Message, k.topicVariable)
}
if k.hashVariable != nil {
key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
}
if msgBytes, err := or.Encode(pack); err == nil {
if msgBytes != nil {
err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes))
if err != nil {
atomic.AddInt64(&k.processMessageFailures, 1)
or.LogError(err)
}
} else {
atomic.AddInt64(&k.processMessageDiscards, 1)
}
} else {
atomic.AddInt64(&k.processMessageFailures, 1)
or.LogError(err)
}
pack.Recycle()
}
errChan <- Shutdown
wg.Wait()
return
}
示例12: Run
func (rop *RedisOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
inChan := or.InChan()
for pack := range inChan {
payload := pack.Message.GetPayload()
_, err := rop.conn.Do("LPUSH", rop.conf.Key, payload)
if err != nil {
or.LogError(err)
continue
}
pack.Recycle()
}
return nil
}
示例13: Run
func (rlo *RedisListOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
inChan := or.InChan()
for pack := range inChan {
payload := pack.Message.GetPayload()
_, err := rlo.conn.Do("LPUSH", rlo.conf.ListName, payload)
if err != nil {
or.LogError(fmt.Errorf("Redis LPUSH error: %s", err))
continue
}
pack.Recycle(nil)
}
return nil
}
示例14: Run
func (f *FirehoseOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) error {
for pack := range or.InChan() {
payload := pack.Message.GetPayload()
timestamp := time.Unix(0, pack.Message.GetTimestamp()).Format("2006-01-02 15:04:05.000")
pack.Recycle(nil)
// Verify input is valid json
object := make(map[string]interface{})
err := json.Unmarshal([]byte(payload), &object)
if err != nil {
or.LogError(err)
continue
}
if f.timestampColumn != "" {
// add Heka message's timestamp to column named in timestampColumn
object[f.timestampColumn] = timestamp
}
record, err := json.Marshal(object)
if err != nil {
or.LogError(err)
continue
}
// Send data to the firehose
err = f.client.PutRecord(record)
if err != nil {
or.LogError(err)
continue
}
}
return nil
}
示例15: SendEntries
func (k *KinesisOutput) SendEntries(or pipeline.OutputRunner, entries []*kin.PutRecordsRequestEntry, backoff time.Duration, retries int) error {
k.hasTriedToSend = true
multParams := &kin.PutRecordsInput{
Records: entries,
StreamName: aws.String(k.config.Stream),
}
data, err := k.Client.PutRecords(multParams)
// Update statistics & handle errors
if err != nil {
if or != nil {
or.LogError(fmt.Errorf("Batch: Error pushing message to Kinesis: %s", err))
}
atomic.AddInt64(&k.batchesFailed, 1)
if retries <= k.config.MaxRetries || k.config.MaxRetries == -1 {
atomic.AddInt64(&k.retryCount, 1)
time.Sleep(backoff + k.backoffIncrement)
// filter down to only the failed records:
retryEntries := []*kin.PutRecordsRequestEntry{}
for i, entry := range entries {
response := data.Records[i]
if response.ErrorCode != nil {
// incase we are rate limited push the entry to a new shard.
entry.PartitionKey = aws.String(fmt.Sprintf("%d", rand.Int63()))
retryEntries = append(retryEntries, entry)
}
}
k.SendEntries(or, retryEntries, backoff+k.backoffIncrement, retries+1)
} else {
atomic.AddInt64(&k.dropMessageCount, int64(len(entries)))
if or != nil {
or.LogError(fmt.Errorf("Batch: Hit max retries when attempting to send data"))
}
}
}
atomic.AddInt64(&k.batchesSent, 1)
return nil
}