本文整理匯總了Golang中github.com/mozilla-services/heka/pipeline.FilterRunner.Name方法的典型用法代碼示例。如果您正苦於以下問題:Golang FilterRunner.Name方法的具體用法?Golang FilterRunner.Name怎麽用?Golang FilterRunner.Name使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mozilla-services/heka/pipeline.FilterRunner
的用法示例。
在下文中一共展示了FilterRunner.Name方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: restoreSandboxes
// On Heka restarts this function reloads all previously running SandboxFilters
// using the script, configuration, and preservation files in the working
// directory.
func (this *SandboxManagerFilter) restoreSandboxes(fr pipeline.FilterRunner, h pipeline.PluginHelper, dir string) {
glob := fmt.Sprintf("%s-*.toml", getNormalizedName(fr.Name()))
if matches, err := filepath.Glob(filepath.Join(dir, glob)); err == nil {
for _, fn := range matches {
var configFile pipeline.ConfigFile
if _, err = toml.DecodeFile(fn, &configFile); err != nil {
fr.LogError(fmt.Errorf("restoreSandboxes failed: %s\n", err))
continue
} else {
for _, conf := range configFile {
var runner pipeline.FilterRunner
name := path.Base(fn[:len(fn)-5])
fr.LogMessage(fmt.Sprintf("Loading: %s", name))
runner, err = this.createRunner(dir, name, conf)
if err != nil {
fr.LogError(fmt.Errorf("createRunner failed: %s\n", err.Error()))
removeAll(dir, fmt.Sprintf("%s.*", name))
break
}
err = h.PipelineConfig().AddFilterRunner(runner)
if err != nil {
fr.LogError(err)
} else {
atomic.AddInt32(&this.currentFilters, 1)
}
break // only interested in the first item
}
}
}
}
}
示例2: Run
func (this *SandboxManagerFilter) Run(fr pipeline.FilterRunner,
h pipeline.PluginHelper) (err error) {
inChan := fr.InChan()
var ok = true
var pack *pipeline.PipelinePack
var delta int64
this.restoreSandboxes(fr, h, this.workingDirectory)
for ok {
select {
case pack, ok = <-inChan:
if !ok {
break
}
atomic.AddInt64(&this.processMessageCount, 1)
delta = time.Now().UnixNano() - pack.Message.GetTimestamp()
if math.Abs(float64(delta)) >= 5e9 {
fr.LogError(fmt.Errorf("Discarded control message: %d seconds skew",
delta/1e9))
pack.Recycle()
break
}
action, _ := pack.Message.GetFieldValue("action")
switch action {
case "load":
current := int(atomic.LoadInt32(&this.currentFilters))
if current < this.maxFilters {
err := this.loadSandbox(fr, h, this.workingDirectory, pack.Message)
if err != nil {
fr.LogError(err)
}
} else {
fr.LogError(fmt.Errorf("%s attempted to load more than %d filters",
fr.Name(), this.maxFilters))
}
case "unload":
fv, _ := pack.Message.GetFieldValue("name")
if name, ok := fv.(string); ok {
name = getSandboxName(fr.Name(), name)
if this.pConfig.RemoveFilterRunner(name) {
removeAll(this.workingDirectory, fmt.Sprintf("%s.*", name))
}
}
}
pack.Recycle()
}
}
return
}
示例3: loadSandbox
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {
fv, _ := msg.GetFieldValue("config")
if config, ok := fv.(string); ok {
var configFile pipeline.ConfigFile
if _, err = toml.Decode(config, &configFile); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
} else {
for name, conf := range configFile {
name = getSandboxName(fr.Name(), name)
if _, ok := h.Filter(name); ok {
// todo support reload
return fmt.Errorf("loadSandbox failed: %s is already running", name)
}
fr.LogMessage(fmt.Sprintf("Loading: %s", name))
confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
err = ioutil.WriteFile(confFile, []byte(config), 0600)
if err != nil {
return
}
var sbc SandboxConfig
if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
}
scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
// check/clear the old state preservation file
// this avoids issues with changes to the data model since the last load
// and prevents holes in the graph from looking like anomalies
os.Remove(filepath.Join(pipeline.PrependBaseDir(DATA_DIR), name+DATA_EXT))
var runner pipeline.FilterRunner
runner, err = this.createRunner(dir, name, conf)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
err = h.PipelineConfig().AddFilterRunner(runner)
if err == nil {
this.currentFilters++
}
break // only interested in the first item
}
}
}
return
}
示例4: loadSandbox
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {
fv, _ := msg.GetFieldValue("config")
if config, ok := fv.(string); ok {
var configFile pipeline.ConfigFile
if _, err = toml.Decode(config, &configFile); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
}
for name, conf := range configFile {
name = getSandboxName(fr.Name(), name)
if _, ok := h.Filter(name); ok {
// todo support reload
return fmt.Errorf("loadSandbox failed: %s is already running", name)
}
fr.LogMessage(fmt.Sprintf("Loading: %s", name))
confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
err = ioutil.WriteFile(confFile, []byte(config), 0600)
if err != nil {
return
}
var sbc SandboxConfig
// Default, will get overwritten if necessary
sbc.ScriptType = "lua"
if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
}
scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
var runner pipeline.FilterRunner
runner, err = this.createRunner(dir, name, conf)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
err = this.pConfig.AddFilterRunner(runner)
if err == nil {
atomic.AddInt32(&this.currentFilters, 1)
}
break // only interested in the first item
}
}
return
}
示例5: Run
func (this *SandboxFilter) Run(fr pipeline.FilterRunner, h pipeline.PluginHelper) (err error) {
inChan := fr.InChan()
ticker := fr.Ticker()
var (
ok = true
terminated = false
sample = true
blocking = false
backpressure = false
pack *pipeline.PipelinePack
retval int
msgLoopCount uint
injectionCount uint
startTime time.Time
slowDuration int64 = int64(this.pConfig.Globals.MaxMsgProcessDuration)
duration int64
capacity = cap(inChan) - 1
)
// We assign to the return value of Run() for errors in the closure so that
// the plugin runner can determine what caused the SandboxFilter to return.
this.sb.InjectMessage(func(payload, payload_type, payload_name string) int {
if injectionCount == 0 {
err = pipeline.TerminatedError("exceeded InjectMessage count")
return 2
}
injectionCount--
pack := h.PipelinePack(msgLoopCount)
if pack == nil {
err = pipeline.TerminatedError(fmt.Sprintf("exceeded MaxMsgLoops = %d",
this.pConfig.Globals.MaxMsgLoops))
return 3
}
if len(payload_type) == 0 { // heka protobuf message
hostname := pack.Message.GetHostname()
err := proto.Unmarshal([]byte(payload), pack.Message)
if err == nil {
// do not allow filters to override the following
pack.Message.SetType("heka.sandbox." + pack.Message.GetType())
pack.Message.SetLogger(fr.Name())
pack.Message.SetHostname(hostname)
} else {
return 1
}
} else {
pack.Message.SetType("heka.sandbox-output")
pack.Message.SetLogger(fr.Name())
pack.Message.SetPayload(payload)
ptype, _ := message.NewField("payload_type", payload_type, "file-extension")
pack.Message.AddField(ptype)
pname, _ := message.NewField("payload_name", payload_name, "")
pack.Message.AddField(pname)
}
if !fr.Inject(pack) {
return 4
}
atomic.AddInt64(&this.injectMessageCount, 1)
return 0
})
for ok {
select {
case pack, ok = <-inChan:
if !ok {
break
}
atomic.AddInt64(&this.processMessageCount, 1)
injectionCount = this.pConfig.Globals.MaxMsgProcessInject
msgLoopCount = pack.MsgLoopCount
if this.manager != nil { // only check for backpressure on dynamic plugins
// reading a channel length is generally fast ~1ns
// we need to check the entire chain back to the router
backpressure = len(inChan) >= capacity ||
fr.MatchRunner().InChanLen() >= capacity ||
len(h.PipelineConfig().Router().InChan()) >= capacity
}
// performing the timing is expensive ~40ns but if we are
// backpressured we need a decent sample set before triggering
// termination
if sample ||
(backpressure && this.processMessageSamples < int64(capacity)) ||
this.sbc.Profile {
startTime = time.Now()
sample = true
}
retval = this.sb.ProcessMessage(pack)
if sample {
duration = time.Since(startTime).Nanoseconds()
this.reportLock.Lock()
this.processMessageDuration += duration
this.processMessageSamples++
if this.sbc.Profile {
this.profileMessageDuration = this.processMessageDuration
this.profileMessageSamples = this.processMessageSamples
if this.profileMessageSamples == int64(capacity)*10 {
this.sbc.Profile = false
// reset the normal sampling so it isn't heavily skewed by the profile values
//.........這裏部分代碼省略.........