本文整理匯總了Golang中github.com/mozilla-services/heka/pipeline.OutputRunner.UpdateCursor方法的典型用法代碼示例。如果您正苦於以下問題:Golang OutputRunner.UpdateCursor方法的具體用法?Golang OutputRunner.UpdateCursor怎麽用?Golang OutputRunner.UpdateCursor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mozilla-services/heka/pipeline.OutputRunner
的用法示例。
在下文中一共展示了OutputRunner.UpdateCursor方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Run
func (o *HttpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
if or.Encoder() == nil {
return errors.New("Encoder must be specified.")
}
var (
e error
outBytes []byte
)
inChan := or.InChan()
for pack := range inChan {
outBytes, e = or.Encode(pack)
if e != nil {
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(fmt.Errorf("can't encode: %s", e))
continue
}
if outBytes == nil {
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(nil)
continue
}
if e = o.request(or, outBytes); e != nil {
e = pipeline.NewRetryMessageError(e.Error())
pack.Recycle(e)
} else {
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(nil)
}
}
return
}
示例2: Run
func (o *UdpOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
if or.Encoder() == nil {
return errors.New("Encoder required.")
}
var (
outBytes []byte
e error
)
for pack := range or.InChan() {
if outBytes, e = or.Encode(pack); e != nil {
or.UpdateCursor(pack.QueueCursor)
e = fmt.Errorf("Error encoding message: %s", e.Error())
pack.Recycle(e)
continue
} else if outBytes != nil {
msgSize := len(outBytes)
if msgSize > o.UdpOutputConfig.MaxMessageSize {
or.UpdateCursor(pack.QueueCursor)
e = fmt.Errorf("Message has exceeded allowed UDP data size: %d > %d",
msgSize, o.UdpOutputConfig.MaxMessageSize)
pack.Recycle(e)
continue
} else {
o.conn.Write(outBytes)
}
}
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(nil)
}
return
}
示例3: Run
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
defer func() {
k.producer.Close()
k.client.Close()
}()
if or.Encoder() == nil {
return errors.New("Encoder required.")
}
inChan := or.InChan()
useBuffering := or.UsesBuffering()
errChan := k.producer.Errors()
var wg sync.WaitGroup
wg.Add(1)
go k.processKafkaErrors(or, errChan, &wg)
var (
pack *pipeline.PipelinePack
topic = k.config.Topic
key sarama.Encoder
)
for pack = range inChan {
atomic.AddInt64(&k.processMessageCount, 1)
if k.topicVariable != nil {
topic = getMessageVariable(pack.Message, k.topicVariable)
}
if k.hashVariable != nil {
key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
}
msgBytes, err := or.Encode(pack)
if err != nil {
atomic.AddInt64(&k.processMessageFailures, 1)
or.LogError(err)
// Don't retry encoding errors.
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(nil)
continue
}
if msgBytes == nil {
atomic.AddInt64(&k.processMessageDiscards, 1)
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(nil)
continue
}
err = k.producer.QueueMessage(topic, key, sarama.ByteEncoder(msgBytes))
if err != nil {
if !useBuffering {
atomic.AddInt64(&k.processMessageFailures, 1)
}
or.LogError(err)
}
pack.Recycle(err)
}
errChan <- Shutdown
wg.Wait()
return
}
示例4: Run
func (s *SandboxOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
var (
pack *pipeline.PipelinePack
retval int
inChan = or.InChan()
duration int64
startTime time.Time
ok = true
ticker = or.Ticker()
)
for ok {
select {
case pack, ok = <-inChan:
if !ok {
break
}
if s.sample {
startTime = time.Now()
}
retval = s.sb.ProcessMessage(pack)
if s.sample {
duration = time.Since(startTime).Nanoseconds()
s.reportLock.Lock()
s.processMessageDuration += duration
s.processMessageSamples++
s.reportLock.Unlock()
}
s.sample = 0 == rand.Intn(s.sampleDenominator)
or.UpdateCursor(pack.QueueCursor) // TODO: support retries?
if retval == 0 {
atomic.AddInt64(&s.processMessageCount, 1)
pack.Recycle(nil)
} else if retval < 0 {
atomic.AddInt64(&s.processMessageFailures, 1)
var e error
em := s.sb.LastError()
if len(em) > 0 {
e = errors.New(em)
}
pack.Recycle(e)
} else {
err = fmt.Errorf("FATAL: %s", s.sb.LastError())
pack.Recycle(err)
ok = false
}
case t := <-ticker:
startTime = time.Now()
if retval = s.sb.TimerEvent(t.UnixNano()); retval != 0 {
err = fmt.Errorf("FATAL: %s", s.sb.LastError())
ok = false
}
duration = time.Since(startTime).Nanoseconds()
s.reportLock.Lock()
s.timerEventDuration += duration
s.timerEventSamples++
s.reportLock.Unlock()
}
}
if err == nil && s.sbc.TimerEventOnShutdown {
if retval = s.sb.TimerEvent(time.Now().UnixNano()); retval != 0 {
err = fmt.Errorf("FATAL: %s", s.sb.LastError())
}
}
destroyErr := s.destroy()
if destroyErr != nil {
if err != nil {
or.LogError(err)
}
err = destroyErr
}
return err
}
示例5: Run
func (k *KafkaOutput) Run(or pipeline.OutputRunner, h pipeline.PluginHelper) (err error) {
defer func() {
k.producer.Close()
k.client.Close()
}()
if or.Encoder() == nil {
return errors.New("Encoder required.")
}
inChan := or.InChan()
errChan := k.producer.Errors()
pInChan := k.producer.Input()
shutdownChan := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go k.processKafkaErrors(or, errChan, shutdownChan, &wg)
var (
pack *pipeline.PipelinePack
topic = k.config.Topic
key sarama.Encoder
)
for pack = range inChan {
atomic.AddInt64(&k.processMessageCount, 1)
if k.topicVariable != nil {
topic = getMessageVariable(pack.Message, k.topicVariable)
}
if k.hashVariable != nil {
key = sarama.StringEncoder(getMessageVariable(pack.Message, k.hashVariable))
}
msgBytes, err := or.Encode(pack)
if err != nil {
atomic.AddInt64(&k.processMessageFailures, 1)
or.LogError(err)
// Don't retry encoding errors.
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(nil)
continue
}
if msgBytes == nil {
atomic.AddInt64(&k.processMessageDiscards, 1)
or.UpdateCursor(pack.QueueCursor)
pack.Recycle(nil)
continue
}
pMessage := &sarama.ProducerMessage{
Topic: topic,
Key: key,
Value: sarama.ByteEncoder(msgBytes),
}
pInChan <- pMessage
pack.Recycle(nil)
}
close(shutdownChan)
wg.Wait()
return
}