本文整理匯總了Golang中github.com/mozilla-services/heka/pipeline.InputRunner類的典型用法代碼示例。如果您正苦於以下問題:Golang InputRunner類的具體用法?Golang InputRunner怎麽用?Golang InputRunner使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了InputRunner類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Run
// Run is the main loop which listens for incoming requests and injects the
// messages read into the heka machinery
func (hsi *HTTPSimpleInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) {
hsi.stop = make(chan bool)
hsi.input = make(chan *pipeline.PipelinePack)
hsi.errch = make(chan error, 1)
hsi.packs = ir.InChan()
hsi.DecoderRunner = h.DecoderRunner
go hsi.listen()
var pack *pipeline.PipelinePack
INPUT:
for {
select {
case err = <-hsi.errch:
if err != nil {
return
}
case pack = <-hsi.input:
ir.Inject(pack)
case _ = <-hsi.stop:
if hsi.listener != nil {
hsi.listener.Close()
hsi.packs = nil
}
break INPUT
}
}
select {
case err = <-hsi.errch:
return
default:
close(hsi.errch)
hsi.errch = nil
}
return nil
}
示例2: Run
// Main Logstreamer Input runner. This runner kicks off all the other
// logstream inputs, and handles rescanning for updates to the filesystem that
// might affect file visibility for the logstream inputs.
func (li *LogstreamerInput) Run(ir p.InputRunner, h p.PluginHelper) (err error) {
var (
ok bool
errs *ls.MultipleError
newstreams []string
)
// Kick off all the current logstreams we know of
i := 0
for _, logstream := range li.plugins {
i++
li.startLogstreamInput(logstream, i, ir, h)
}
ok = true
rescan := time.Tick(li.rescanInterval)
// Our main rescan loop that handles shutting down
for ok {
select {
case <-li.stopChan:
ok = false
returnChans := make([]chan bool, len(li.stopLogstreamChans))
// Send out all the stop signals
for i, ch := range li.stopLogstreamChans {
ret := make(chan bool)
ch <- ret
returnChans[i] = ret
}
// Wait for all the stops
for _, ch := range returnChans {
<-ch
}
// Close our own stopChan to indicate we shut down
close(li.stopChan)
case <-rescan:
li.logstreamSetLock.Lock()
newstreams, errs = li.logstreamSet.ScanForLogstreams()
if errs.IsError() {
ir.LogError(errs)
}
for _, name := range newstreams {
stream, ok := li.logstreamSet.GetLogstream(name)
if !ok {
ir.LogError(fmt.Errorf("Found new logstream: %s, but couldn't fetch it.",
name))
continue
}
lsi := NewLogstreamInput(stream, name, li.hostName)
li.plugins[name] = lsi
i++
li.startLogstreamInput(lsi, i, ir, h)
}
li.logstreamSetLock.Unlock()
}
}
return nil
}
示例3: Run
func (input *NsqInput) Run(runner pipeline.InputRunner,
helper pipeline.PluginHelper) (err error) {
var (
dRunner pipeline.DecoderRunner
ok bool
)
if input.DecoderName != "" {
if dRunner, ok = helper.DecoderRunner(input.DecoderName,
fmt.Sprintf("%s-%s", runner.Name(), input.DecoderName)); !ok {
return fmt.Errorf("Decoder not found: %s", input.DecoderName)
}
input.decoderChan = dRunner.InChan()
}
input.runner = runner
input.packSupply = runner.InChan()
input.consumer.AddHandler(input)
err = input.consumer.ConnectToNSQDs(input.NsqdAddrs)
if err != nil {
return err
}
err = input.consumer.ConnectToNSQLookupds(input.LookupdAddrs)
if err != nil {
return err
}
<-input.consumer.StoppedChan()
return nil
}
示例4: Run
func (rpsi *RedisPubSubInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
var (
dRunner pipeline.DecoderRunner
decoder pipeline.Decoder
pack *pipeline.PipelinePack
e error
ok bool
)
// Get the InputRunner's chan to receive empty PipelinePacks
packSupply := ir.InChan()
if rpsi.conf.DecoderName != "" {
if dRunner, ok = h.DecoderRunner(rpsi.conf.DecoderName, fmt.Sprintf("%s-%s", ir.Name(), rpsi.conf.DecoderName)); !ok {
return fmt.Errorf("Decoder not found: %s", rpsi.conf.DecoderName)
}
decoder = dRunner.Decoder()
}
//Connect to the channel
psc := redis.PubSubConn{Conn: rpsi.conn}
psc.PSubscribe(rpsi.conf.Channel)
for {
switch n := psc.Receive().(type) {
case redis.PMessage:
// Grab an empty PipelinePack from the InputRunner
pack = <-packSupply
pack.Message.SetType("redis_pub_sub")
pack.Message.SetLogger(n.Channel)
pack.Message.SetPayload(string(n.Data))
pack.Message.SetTimestamp(time.Now().UnixNano())
var packs []*pipeline.PipelinePack
if decoder == nil {
packs = []*pipeline.PipelinePack{pack}
} else {
packs, e = decoder.Decode(pack)
}
if packs != nil {
for _, p := range packs {
ir.Inject(p)
}
} else {
if e != nil {
ir.LogError(fmt.Errorf("Couldn't parse Redis message: %s", n.Data))
}
pack.Recycle(nil)
}
case redis.Subscription:
ir.LogMessage(fmt.Sprintf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count))
if n.Count == 0 {
return errors.New("No channel to subscribe")
}
case error:
fmt.Printf("error: %v\n", n)
return n
}
}
return nil
}
示例5: startLogstreamInput
// Creates deliverer and stop channel and starts the provided LogstreamInput.
func (li *LogstreamerInput) startLogstreamInput(logstream *LogstreamInput, i int,
ir p.InputRunner, h p.PluginHelper) {
stop := make(chan chan bool, 1)
token := strconv.Itoa(i)
deliverer := ir.NewDeliverer(token)
sRunner := ir.NewSplitterRunner(token)
li.stopLogstreamChans = append(li.stopLogstreamChans, stop)
go logstream.Run(ir, h, stop, deliverer, sRunner)
}
示例6: splitStream
func splitStream(ir p.InputRunner, sRunner p.SplitterRunner, r io.ReadCloser) error {
var (
record []byte
longRecord []byte
err error
deliver bool
nullSplitter bool
)
// If we're using a NullSplitter we want to make sure we capture the
// entire HTTP request or response body and not be subject to what we get
// from a single Read() call.
if _, ok := sRunner.Splitter().(*p.NullSplitter); ok {
nullSplitter = true
}
for err == nil {
deliver = true
_, record, err = sRunner.GetRecordFromStream(r)
if err == io.ErrShortBuffer {
if sRunner.KeepTruncated() {
err = fmt.Errorf("record exceeded MAX_RECORD_SIZE %d and was truncated",
message.MAX_RECORD_SIZE)
} else {
deliver = false
err = fmt.Errorf("record exceeded MAX_RECORD_SIZE %d and was dropped",
message.MAX_RECORD_SIZE)
}
ir.LogError(err)
err = nil // non-fatal, keep going
} else if sRunner.IncompleteFinal() && err == io.EOF && len(record) == 0 {
record = sRunner.GetRemainingData()
}
if len(record) > 0 && deliver {
if nullSplitter {
// Concatenate all the records until EOF. This should be safe
// b/c NullSplitter means FindRecord will always return the
// full buffer contents, we don't have to worry about
// GetRecordFromStream trying to append multiple reads to a
// single record and triggering an io.ErrShortBuffer error.
longRecord = append(longRecord, record...)
} else {
sRunner.DeliverRecord(record, nil)
}
}
}
r.Close()
if err == io.EOF && nullSplitter && len(longRecord) > 0 {
sRunner.DeliverRecord(longRecord, nil)
}
return err
}
示例7: Run
func (input *FilePollingInput) Run(runner pipeline.InputRunner,
helper pipeline.PluginHelper) error {
input.runner = runner
input.hostname = helper.PipelineConfig().Hostname()
tickChan := runner.Ticker()
sRunner := runner.NewSplitterRunner("")
if !sRunner.UseMsgBytes() {
sRunner.SetPackDecorator(input.packDecorator)
}
for {
select {
case <-input.stop:
return nil
case <-tickChan:
}
f, err := os.Open(input.FilePath)
if err != nil {
runner.LogError(fmt.Errorf("Error opening file: %s", err.Error()))
continue
}
for err == nil {
err = sRunner.SplitStream(f, nil)
if err != io.EOF && err != nil {
runner.LogError(fmt.Errorf("Error reading file: %s", err.Error()))
}
}
}
return nil
}
示例8: fetcher
func (input *S3SplitFileInput) fetcher(runner pipeline.InputRunner, wg *sync.WaitGroup, workerId uint32) {
var (
s3Key string
startTime time.Time
duration float64
)
fetcherName := fmt.Sprintf("S3Reader%d", workerId)
deliverer := runner.NewDeliverer(fetcherName)
defer deliverer.Done()
splitterRunner := runner.NewSplitterRunner(fetcherName)
ok := true
for ok {
select {
case s3Key, ok = <-input.listChan:
if !ok {
// Channel is closed => we're shutting down, exit cleanly.
// runner.LogMessage("Fetcher all done! shutting down.")
break
}
startTime = time.Now().UTC()
err := input.readS3File(runner, &deliverer, &splitterRunner, s3Key)
atomic.AddInt64(&input.processFileCount, 1)
leftovers := splitterRunner.GetRemainingData()
lenLeftovers := len(leftovers)
if lenLeftovers > 0 {
atomic.AddInt64(&input.processFileDiscardedBytes, int64(lenLeftovers))
runner.LogError(fmt.Errorf("Trailing data, possible corruption: %d bytes left in stream at EOF: %s", lenLeftovers, s3Key))
}
if err != nil && err != io.EOF {
runner.LogError(fmt.Errorf("Error reading %s: %s", s3Key, err))
atomic.AddInt64(&input.processFileFailures, 1)
continue
}
duration = time.Now().UTC().Sub(startTime).Seconds()
runner.LogMessage(fmt.Sprintf("Successfully fetched %s in %.2fs ", s3Key, duration))
case <-input.stop:
for _ = range input.listChan {
// Drain the channel without processing the files.
// Technically the S3Iterator can still add one back on to the
// channel but this ensures there is room so it won't block.
}
ok = false
}
}
wg.Done()
}
示例9: Run
func (input *Sqs3Input) Run(runner pipeline.InputRunner,
helper pipeline.PluginHelper) error {
// initialize
input.runner = runner
input.sqs = sqs.New(session.New())
input.s3 = s3.New(session.New())
queue_url, err := get_queue(input.sqs, input.SqsQueue)
if err != nil { return err }
input.queue_url = queue_url
//input.hostname = helper.PipelineConfig().Hostname()
tickChan := runner.Ticker()
sRunner := runner.NewSplitterRunner("")
if !sRunner.UseMsgBytes() {
sRunner.SetPackDecorator(input.packDecorator)
}
defer sRunner.Done()
for {
select {
case <-input.stop:
return nil
case <-tickChan:
}
receipt_handle, bucket, key, err := receive_from_queue(input.sqs, input.queue_url)
if err != nil {
runner.LogError(fmt.Errorf("Error reading queue: %s", err.Error()))
continue
}
o, _, err := get_object(input.s3, bucket, key)
if err != nil {
runner.LogError(fmt.Errorf("Error opening file: %s", err.Error()))
if aws_err := awserr.Error(err); aws_err != nil {
f aws_err.Code == "NoSuchBucket" or aws_err.Code == "NoSuchKey" {
delete_message(input.sqs, input.queue_url, receipt_handle)
}
}
continue
}
for err == nil {
err = sRunner.SplitStream(o, nil)
if err != io.EOF && err != nil {
runner.LogError(fmt.Errorf("Error reading file: %s", err.Error()))
}
}
o.Close()
}
}
示例10: Run
func (rli *RedisInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
fmt.Println("Addr", rli.conf.Address)
fmt.Println("key", rli.conf.Key)
fmt.Println("batch_count:", rli.conf.Batch_count)
fmt.Println("decoder:", rli.conf.Decoder)
var (
dRunner pipeline.DecoderRunner
decoder pipeline.Decoder
ok bool
e error
reply interface{}
vals []string
msg string
)
if rli.conf.Decoder != "" {
if dRunner, ok = h.DecoderRunner(rli.conf.Decoder, fmt.Sprintf("%s-%s", ir.Name(), rli.conf.Decoder)); !ok {
return fmt.Errorf("Decoder not found: %s", rli.conf.Decoder)
}
decoder = dRunner.Decoder()
}
for {
reply, e = rli.conn.Do("BLPOP", rli.conf.Key, "0")
if e == nil {
vals, e = redis.Strings(reply, nil)
msg = vals[1]
if e == nil {
rli.InsertMessage(ir, decoder, msg)
}
}
reply, e = rli.batchlpop.Do(rli.conn, rli.conf.Key, rli.conf.Batch_count)
if e == nil {
vals, e = redis.Strings(reply, nil)
if e == nil {
for _, msg = range vals {
rli.InsertMessage(ir, decoder, msg)
}
} else {
fmt.Printf("err: %v\n", e)
}
} else {
fmt.Printf("type: %T, error: %v\n", reply, e)
return e
}
}
return nil
}
示例11: Run
func (sip *SCAMPInputPlugin) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) (err error) {
sip.service, err = scamp.NewService(sip.conf.Service, sip.conf.Name)
if err != nil {
return
}
announcer, err := scamp.NewDiscoveryAnnouncer()
if err != nil {
scamp.Error.Printf("failed to create announcer: `%s`", err)
return
}
announcer.Track(sip.service)
go announcer.AnnounceLoop()
var handlerConfig SCAMPInputHandlerConfig
for _, handlerConfig = range sip.conf.Handlers {
scamp.Trace.Printf("registering handler: `%s`", handlerConfig)
sip.service.Register(handlerConfig.Action, func(msg *scamp.Message, client *scamp.Client) {
var pack *pipeline.PipelinePack
pack = <-ir.InChan()
pack.Message.SetUuid(uuid.NewRandom())
pack.Message.SetTimestamp(time.Now().UnixNano())
pack.Message.SetPayload(string(msg.Bytes()[:]))
pack.Message.SetSeverity(int32(handlerConfig.Severity))
pack.Message.SetLogger(handlerConfig.Logger) // TODO not sure what this means
ir.Deliver(pack)
reply := scamp.NewMessage()
reply.SetMessageType(scamp.MESSAGE_TYPE_REPLY)
reply.SetEnvelope(scamp.ENVELOPE_JSON)
reply.SetRequestId(msg.RequestId)
reply.Write([]byte("{}"))
scamp.Trace.Printf("sending msg: {requestId: %d, type: `%s`, envelope: `%s`, body: `%s`}", reply.RequestId, reply.MessageType, reply.Envelope, reply.Bytes())
_, err = client.Send(reply)
if err != nil {
scamp.Error.Printf("could not reply to message: `%s`", err)
client.Close()
return
}
})
}
sip.service.Run()
return
}
示例12: Run
func (wsi *WebSocketsInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
// Get the InputRunner's chan to receive empty PipelinePacks
packs := ir.InChan()
var decoding chan<- *pipeline.PipelinePack
if wsi.conf.Decoder != "" {
// Fetch specified decoder
decoder, ok := h.DecoderSet().ByName(wsi.conf.Decoder)
if !ok {
err := fmt.Errorf("Could not find decoder", wsi.conf.Decoder)
return err
}
// Get the decoder's receiving chan
decoding = decoder.InChan()
}
var pack *pipeline.PipelinePack
var count int
// Read data from websocket broadcast chan
for b := range wsi.data {
// Grab an empty PipelinePack from the InputRunner
pack = <-packs
// Trim the excess empty bytes
count = len(b)
pack.MsgBytes = pack.MsgBytes[:count]
// Copy ws bytes into pack's bytes
copy(pack.MsgBytes, b)
if decoding != nil {
// Send pack onto decoder
decoding <- pack
} else {
// Send pack into Heka pipeline
ir.Inject(pack)
}
}
return nil
}
示例13: Run
func (rli *RedisListInput) Run(ir pipeline.InputRunner, h pipeline.PluginHelper) error {
var (
pack *pipeline.PipelinePack
packs []*pipeline.PipelinePack
)
// Get the InputRunner's chan to receive empty PipelinePacks
inChan := ir.InChan()
for {
message, err := rli.conn.Do("RPOP", rli.conf.ListName)
if err != nil {
ir.LogError(fmt.Errorf("Redis RPOP error: %s", err))
// TODO: should reconnect redis rather than close it
rli.Stop()
break
}
if message != nil {
pack = <-inChan
pack.Message.SetType("redis_list")
pack.Message.SetPayload(string(message.([]uint8)))
packs = []*pipeline.PipelinePack{pack}
if packs != nil {
for _, p := range packs {
ir.Inject(p)
}
} else {
pack.Recycle(nil)
}
} else {
time.Sleep(time.Second)
}
}
return nil
}
示例14: InsertMessage
func (rli *RedisInput) InsertMessage(ir pipeline.InputRunner, decoder pipeline.Decoder, msg string) {
var (
pack *pipeline.PipelinePack
e error
)
// Get the InputRunner's chan to receive empty PipelinePacks
packSupply := ir.InChan()
pack = <-packSupply
pack.Message.SetType(rli.conf.Key)
pack.Message.SetLogger("Redis")
pack.Message.SetPayload(msg)
pack.Message.SetTimestamp(time.Now().UnixNano())
var packs []*pipeline.PipelinePack
if decoder == nil {
packs = []*pipeline.PipelinePack{pack}
} else {
packs, e = decoder.Decode(pack)
}
if packs != nil {
for _, p := range packs {
ir.Inject(p)
}
} else {
if e != nil {
ir.LogError(fmt.Errorf("Couldn't parse %s", msg))
pack.Recycle(e)
} else {
pack.Recycle(nil)
fmt.Println("pack recycle!")
}
}
}
示例15: readS3File
// TODO: handle "no such file"
func (input *S3SplitFileInput) readS3File(runner pipeline.InputRunner, d *pipeline.Deliverer, sr *pipeline.SplitterRunner, s3Key string) (err error) {
runner.LogMessage(fmt.Sprintf("Preparing to read: %s", s3Key))
if input.bucket == nil {
runner.LogMessage(fmt.Sprintf("Dude, where's my bucket: %s", s3Key))
return
}
var lastGoodOffset uint64
var attempt uint32
RetryS3:
for attempt = 1; attempt <= input.S3Retries; attempt++ {
for r := range S3FileIterator(input.bucket, s3Key, lastGoodOffset) {
record := r.Record
err := r.Err
if err != nil && err != io.EOF {
runner.LogError(fmt.Errorf("Error in attempt %d reading %s at offset %d: %s", attempt, s3Key, lastGoodOffset, err))
atomic.AddInt64(&input.processMessageFailures, 1)
continue RetryS3
}
if len(record) > 0 {
lastGoodOffset += uint64(r.BytesRead)
atomic.AddInt64(&input.processMessageCount, 1)
atomic.AddInt64(&input.processMessageBytes, int64(len(record)))
(*sr).DeliverRecord(record, *d)
}
}
break
}
return
}