本文整理匯總了Golang中github.com/mozilla-services/heka/pipeline.InputRunner.NewDeliverer方法的典型用法代碼示例。如果您正苦於以下問題:Golang InputRunner.NewDeliverer方法的具體用法?Golang InputRunner.NewDeliverer怎麽用?Golang InputRunner.NewDeliverer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mozilla-services/heka/pipeline.InputRunner
的用法示例。
在下文中一共展示了InputRunner.NewDeliverer方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: startLogstreamInput
// Creates deliverer and stop channel and starts the provided LogstreamInput.
func (li *LogstreamerInput) startLogstreamInput(logstream *LogstreamInput, i int,
ir p.InputRunner, h p.PluginHelper) {
stop := make(chan chan bool, 1)
token := strconv.Itoa(i)
deliverer := ir.NewDeliverer(token)
sRunner := ir.NewSplitterRunner(token)
li.stopLogstreamChans = append(li.stopLogstreamChans, stop)
go logstream.Run(ir, h, stop, deliverer, sRunner)
}
示例2: fetcher
func (input *S3SplitFileInput) fetcher(runner pipeline.InputRunner, wg *sync.WaitGroup, workerId uint32) {
var (
s3Key string
startTime time.Time
duration float64
)
fetcherName := fmt.Sprintf("S3Reader%d", workerId)
deliverer := runner.NewDeliverer(fetcherName)
defer deliverer.Done()
splitterRunner := runner.NewSplitterRunner(fetcherName)
ok := true
for ok {
select {
case s3Key, ok = <-input.listChan:
if !ok {
// Channel is closed => we're shutting down, exit cleanly.
// runner.LogMessage("Fetcher all done! shutting down.")
break
}
startTime = time.Now().UTC()
err := input.readS3File(runner, &deliverer, &splitterRunner, s3Key)
atomic.AddInt64(&input.processFileCount, 1)
leftovers := splitterRunner.GetRemainingData()
lenLeftovers := len(leftovers)
if lenLeftovers > 0 {
atomic.AddInt64(&input.processFileDiscardedBytes, int64(lenLeftovers))
runner.LogError(fmt.Errorf("Trailing data, possible corruption: %d bytes left in stream at EOF: %s", lenLeftovers, s3Key))
}
if err != nil && err != io.EOF {
runner.LogError(fmt.Errorf("Error reading %s: %s", s3Key, err))
atomic.AddInt64(&input.processFileFailures, 1)
continue
}
duration = time.Now().UTC().Sub(startTime).Seconds()
runner.LogMessage(fmt.Sprintf("Successfully fetched %s in %.2fs ", s3Key, duration))
case <-input.stop:
for _ = range input.listChan {
// Drain the channel without processing the files.
// Technically the S3Iterator can still add one back on to the
// channel but this ensures there is room so it won't block.
}
ok = false
}
}
wg.Done()
}
示例3: fetcher
func (input *S3OffsetInput) fetcher(runner pipeline.InputRunner, wg *sync.WaitGroup, workerId uint32) {
var (
loc MessageLocation
startTime time.Time
duration float64
headers map[string][]string
record []byte
err error
)
headers = map[string][]string{
"Range": []string{""},
}
fetcherName := fmt.Sprintf("S3Reader%d", workerId)
deliverer := runner.NewDeliverer(fetcherName)
defer deliverer.Done()
splitterRunner := runner.NewSplitterRunner(fetcherName)
ok := true
for ok {
select {
case loc, ok = <-input.offsetChan:
if !ok {
// Channel is closed => we're shutting down, exit cleanly.
runner.LogMessage("Fetcher all done! shutting down.")
break
}
startTime = time.Now().UTC()
// Read one message from the given location
headers["Range"][0] = fmt.Sprintf("bytes=%d-%d", loc.Offset, loc.Offset+loc.Length-1)
atomic.AddInt64(&input.processMessageCount, 1)
atomic.AddInt64(&input.processMessageBytes, int64(loc.Length))
for attempt := uint32(1); attempt <= input.S3Retries; attempt++ {
record, err = getClientRecord(input.bucket, &loc, headers)
if err != nil {
runner.LogMessage(fmt.Sprintf("Error #%d fetching %s @ %d+%d: %s\n", attempt, loc.Key, loc.Offset, loc.Length, err))
} else {
break
}
}
if err != nil {
atomic.AddInt64(&input.processMessageFailures, 1)
continue
}
splitterRunner.DeliverRecord(record, deliverer)
duration = time.Now().UTC().Sub(startTime).Seconds()
runner.LogMessage(fmt.Sprintf("Successfully fetched %s in %.2fs ", loc.Key, duration))
case <-input.stop:
runner.LogMessage("Stopping fetcher...")
for _ = range input.offsetChan {
// Drain the channel without processing anything.
}
ok = false
}
}
wg.Done()
}