本文整理匯總了Golang中github.com/mozilla-services/heka/message.Message.GetFieldValue方法的典型用法代碼示例。如果您正苦於以下問題:Golang Message.GetFieldValue方法的具體用法?Golang Message.GetFieldValue怎麽用?Golang Message.GetFieldValue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mozilla-services/heka/message.Message
的用法示例。
在下文中一共展示了Message.GetFieldValue方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: getField
func getField(msg *message.Message, name string) interface{} {
switch name {
case "Uuid":
if msg.Uuid == nil {
return nil
}
return msg.GetUuidString()
case "Timestamp":
return msg.Timestamp
case "Type":
return msg.Type
case "Logger":
return msg.Logger
case "Severity":
return msg.Severity
case "Payload":
return msg.Payload
case "EnvVersion":
return msg.EnvVersion
case "Pid":
return msg.Pid
case "Hostname":
return msg.Hostname
case "_hekaTimestampMicro":
if msg.Timestamp != nil {
return *msg.Timestamp / 1000 // nano -> micro
}
return nil
default:
val, _ := msg.GetFieldValue(name)
return val
}
}
示例2: convertMessageToValues
// convertMessageToValue reads a Heka Message and returns a slice of field values
func (po *PostgresOutput) convertMessageToValues(m *message.Message, insertFields []string) (fieldValues []interface{}, err error) {
fieldValues = []interface{}{}
missingFields := []string{}
for _, field := range insertFields {
// Special case: get "Timestamp" from Heka message
if field == "Timestamp" {
// Convert Heka time (Unix timestamp in nanoseconds) to Golang time
v := time.Unix(0, m.GetTimestamp())
fieldValues = append(fieldValues, v)
} else {
v, ok := m.GetFieldValue(field)
if !ok {
// If configured to do so, write NULL when a FieldValue isn't found in the Heka message
if po.allowMissingMessageFields {
v = nil
} else {
missingFields = append(missingFields, field)
continue
}
}
fieldValues = append(fieldValues, v)
}
}
if len(missingFields) > 0 {
return []interface{}{}, fmt.Errorf("message is missing expected fields: %s", strings.Join(missingFields, ", "))
}
return fieldValues, nil
}
示例3: loadSandbox
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {
fv, _ := msg.GetFieldValue("config")
if config, ok := fv.(string); ok {
var configFile pipeline.ConfigFile
if _, err = toml.Decode(config, &configFile); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
} else {
for name, conf := range configFile {
name = getSandboxName(fr.Name(), name)
if _, ok := h.Filter(name); ok {
// todo support reload
return fmt.Errorf("loadSandbox failed: %s is already running", name)
}
fr.LogMessage(fmt.Sprintf("Loading: %s", name))
confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
err = ioutil.WriteFile(confFile, []byte(config), 0600)
if err != nil {
return
}
var sbc SandboxConfig
if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
}
scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
// check/clear the old state preservation file
// this avoids issues with changes to the data model since the last load
// and prevents holes in the graph from looking like anomalies
os.Remove(filepath.Join(pipeline.PrependBaseDir(DATA_DIR), name+DATA_EXT))
var runner pipeline.FilterRunner
runner, err = this.createRunner(dir, name, conf)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
err = h.PipelineConfig().AddFilterRunner(runner)
if err == nil {
this.currentFilters++
}
break // only interested in the first item
}
}
}
return
}
示例4: Run
func (self *DashboardOutput) Run(or OutputRunner, h PluginHelper) (err error) {
inChan := or.InChan()
ticker := or.Ticker()
var (
ok = true
plc *PipelineCapture
pack *PipelinePack
msg *message.Message
)
for ok {
select {
case plc, ok = <-inChan:
if !ok {
break
}
pack = plc.Pack
msg = pack.Message
switch msg.GetType() {
case "heka.all-report":
fn := path.Join(self.workingDirectory, "heka_report.json")
overwriteFile(fn, msg.GetPayload())
case "heka.sandbox-output":
tmp, ok := msg.GetFieldValue("payload_type")
if ok {
if pt, ok := tmp.(string); ok && pt == "cbuf" {
html := path.Join(self.workingDirectory, msg.GetLogger()+".html")
_, err := os.Stat(html)
if err != nil {
overwriteFile(html, fmt.Sprintf(getCbufTemplate(), msg.GetLogger(), msg.GetLogger()))
}
fn := path.Join(self.workingDirectory, msg.GetLogger()+"."+pt)
overwriteFile(fn, msg.GetPayload())
}
}
case "heka.sandbox-terminated":
fn := path.Join(self.workingDirectory, self.terminationFile)
if file, err := os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil {
line := fmt.Sprintf("%d\t%s\t%v\n", msg.GetTimestamp()/1e9, msg.GetLogger(), msg.GetPayload())
file.WriteString(line)
file.Close()
}
}
plc.Pack.Recycle()
case <-ticker:
go h.PipelineConfig().allReportsMsg()
}
}
return
}
示例5: loadSandbox
// Parses a Heka message and extracts the information necessary to start a new
// SandboxFilter
func (this *SandboxManagerFilter) loadSandbox(fr pipeline.FilterRunner,
h pipeline.PluginHelper, dir string, msg *message.Message) (err error) {
fv, _ := msg.GetFieldValue("config")
if config, ok := fv.(string); ok {
var configFile pipeline.ConfigFile
if _, err = toml.Decode(config, &configFile); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
}
for name, conf := range configFile {
name = getSandboxName(fr.Name(), name)
if _, ok := h.Filter(name); ok {
// todo support reload
return fmt.Errorf("loadSandbox failed: %s is already running", name)
}
fr.LogMessage(fmt.Sprintf("Loading: %s", name))
confFile := filepath.Join(dir, fmt.Sprintf("%s.toml", name))
err = ioutil.WriteFile(confFile, []byte(config), 0600)
if err != nil {
return
}
var sbc SandboxConfig
// Default, will get overwritten if necessary
sbc.ScriptType = "lua"
if err = toml.PrimitiveDecode(conf, &sbc); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
}
scriptFile := filepath.Join(dir, fmt.Sprintf("%s.%s", name, sbc.ScriptType))
err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
var runner pipeline.FilterRunner
runner, err = this.createRunner(dir, name, conf)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
err = this.pConfig.AddFilterRunner(runner)
if err == nil {
atomic.AddInt32(&this.currentFilters, 1)
}
break // only interested in the first item
}
}
return
}
示例6: interpolateFlag
// Replaces a date pattern (ex: %{2012.09.19} in the index name
func interpolateFlag(e *ElasticSearchCoordinates, m *message.Message, name string) (
interpolatedValue string, err error) {
iSlice := strings.Split(name, "%{")
for i, element := range iSlice {
elEnd := strings.Index(element, "}")
if elEnd > -1 {
elVal := element[:elEnd]
switch elVal {
case "Type":
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetType(), -1)
case "Hostname":
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetHostname(), -1)
case "Pid":
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1],
strconv.Itoa(int(m.GetPid())), -1)
case "UUID":
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetUuidString(), -1)
case "Logger":
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetLogger(), -1)
case "EnvVersion":
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], m.GetEnvVersion(), -1)
case "Severity":
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1],
strconv.Itoa(int(m.GetSeverity())), -1)
default:
if fname, ok := m.GetFieldValue(elVal); ok {
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], fname.(string), -1)
} else {
var t time.Time
if e.ESIndexFromTimestamp && m.Timestamp != nil {
t = time.Unix(0, *m.Timestamp).UTC()
} else {
t = time.Now().UTC()
}
iSlice[i] = strings.Replace(iSlice[i], element[:elEnd+1], t.Format(elVal), -1)
}
}
if iSlice[i] == elVal {
err = fmt.Errorf("Could not interpolate field from config: %s", name)
}
}
}
interpolatedValue = strings.Join(iSlice, "")
return
}
示例7: loadSandbox
func (this *SandboxManagerFilter) loadSandbox(fr FilterRunner,
h PluginHelper, dir string, msg *message.Message) (err error) {
fv, _ := msg.GetFieldValue("config")
if config, ok := fv.(string); ok {
var configFile ConfigFile
if _, err = toml.Decode(config, &configFile); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
} else {
for name, conf := range configFile {
name = getSandboxName(fr.Name(), name)
if _, ok := h.Filter(name); ok {
// todo support reload
return fmt.Errorf("loadSandbox failed: %s is already running", name)
}
fr.LogMessage(fmt.Sprintf("Loading: %s", name))
confFile := path.Join(dir, fmt.Sprintf("%s.toml", name))
err = ioutil.WriteFile(confFile, []byte(config), 0600)
if err != nil {
return
}
var sbfc SandboxFilterConfig
if err = toml.PrimitiveDecode(conf, &sbfc); err != nil {
return fmt.Errorf("loadSandbox failed: %s\n", err)
}
scriptFile := path.Join(dir, fmt.Sprintf("%s.%s", name, sbfc.Sbc.ScriptType))
err = ioutil.WriteFile(scriptFile, []byte(msg.GetPayload()), 0600)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
var runner FilterRunner
runner, err = createRunner(dir, name, conf)
if err != nil {
removeAll(dir, fmt.Sprintf("%s.*", name))
return
}
err = h.PipelineConfig().AddFilterRunner(runner)
if err == nil {
this.currentFilters++
}
break // only interested in the first item
}
}
}
return
}
示例8: Run
func (self *DashboardOutput) Run(or OutputRunner, h PluginHelper) (err error) {
inChan := or.InChan()
ticker := or.Ticker()
go self.starterFunc(self)
var (
ok = true
pack *PipelinePack
msg *message.Message
)
// Maps sandbox names to plugin list items used to generate the
// sandboxes.json file.
sandboxes := make(map[string]*DashPluginListItem)
sbxsLock := new(sync.Mutex)
reNotWord, _ := regexp.Compile("\\W")
for ok {
select {
case pack, ok = <-inChan:
if !ok {
break
}
msg = pack.Message
switch msg.GetType() {
case "heka.all-report":
fn := filepath.Join(self.dataDirectory, "heka_report.json")
overwriteFile(fn, msg.GetPayload())
sbxsLock.Lock()
if err := overwritePluginListFile(self.dataDirectory, sandboxes); err != nil {
or.LogError(fmt.Errorf("Can't write plugin list file to '%s': %s",
self.dataDirectory, err))
}
sbxsLock.Unlock()
case "heka.sandbox-output":
tmp, _ := msg.GetFieldValue("payload_type")
if payloadType, ok := tmp.(string); ok {
var payloadName, nameExt string
tmp, _ := msg.GetFieldValue("payload_name")
if payloadName, ok = tmp.(string); ok {
nameExt = reNotWord.ReplaceAllString(payloadName, "")
}
if len(nameExt) > 64 {
nameExt = nameExt[:64]
}
nameExt = "." + nameExt
payloadType = reNotWord.ReplaceAllString(payloadType, "")
filterName := msg.GetLogger()
fn := filterName + nameExt + "." + payloadType
ofn := filepath.Join(self.dataDirectory, fn)
relPath := path.Join(self.relDataPath, fn) // Used for generating HTTP URLs.
overwriteFile(ofn, msg.GetPayload())
sbxsLock.Lock()
if listItem, ok := sandboxes[filterName]; !ok {
// First time we've seen this sandbox, add it to the set.
output := &DashPluginOutput{
Name: payloadName,
Filename: relPath,
}
sandboxes[filterName] = &DashPluginListItem{
Name: filterName,
Outputs: []*DashPluginOutput{output},
}
} else {
// We've seen the sandbox, see if we already have this output.
found := false
for _, output := range listItem.Outputs {
if output.Name == payloadName {
found = true
break
}
}
if !found {
output := &DashPluginOutput{
Name: payloadName,
Filename: relPath,
}
listItem.Outputs = append(listItem.Outputs, output)
}
}
sbxsLock.Unlock()
}
case "heka.sandbox-terminated":
fn := filepath.Join(self.dataDirectory, "heka_sandbox_termination.tsv")
filterName := msg.GetLogger()
if file, err := os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil {
var line string
if _, ok := msg.GetFieldValue("ProcessMessageCount"); !ok {
line = fmt.Sprintf("%d\t%s\t%v\n", msg.GetTimestamp()/1e9,
msg.GetLogger(), msg.GetPayload())
} else {
pmc, _ := msg.GetFieldValue("ProcessMessageCount")
pms, _ := msg.GetFieldValue("ProcessMessageSamples")
pmd, _ := msg.GetFieldValue("ProcessMessageAvgDuration")
mad, _ := msg.GetFieldValue("MatchAvgDuration")
fcl, _ := msg.GetFieldValue("FilterChanLength")
mcl, _ := msg.GetFieldValue("MatchChanLength")
rcl, _ := msg.GetFieldValue("RouterChanLength")
line = fmt.Sprintf("%d\t%s\t%v"+
" ProcessMessageCount:%v"+
//.........這裏部分代碼省略.........
示例9: Run
func (self *DashboardOutput) Run(or OutputRunner, h PluginHelper) (err error) {
inChan := or.InChan()
ticker := or.Ticker()
var (
ok = true
pack *PipelinePack
msg *message.Message
)
reNotWord, _ := regexp.Compile("\\W")
for ok {
select {
case pack, ok = <-inChan:
if !ok {
break
}
msg = pack.Message
switch msg.GetType() {
case "heka.all-report":
fn := path.Join(self.workingDirectory, "heka_report.json")
createPluginPages(self.workingDirectory, msg.GetPayload())
overwriteFile(fn, msg.GetPayload())
case "heka.sandbox-output":
tmp, _ := msg.GetFieldValue("payload_type")
if payloadType, ok := tmp.(string); ok {
var payloadName, nameExt string
tmp, _ := msg.GetFieldValue("payload_name")
if payloadName, ok = tmp.(string); ok {
nameExt = reNotWord.ReplaceAllString(payloadName, "")
}
if len(nameExt) > 64 {
nameExt = nameExt[:64]
}
nameExt = "." + nameExt
payloadType = reNotWord.ReplaceAllString(payloadType, "")
fn := msg.GetLogger() + nameExt + "." + payloadType
ofn := path.Join(self.workingDirectory, fn)
if payloadType == "cbuf" {
html := msg.GetLogger() + nameExt + ".html"
ohtml := path.Join(self.workingDirectory, html)
_, err := os.Stat(ohtml)
if err != nil {
overwriteFile(ohtml, fmt.Sprintf(getCbufTemplate(),
msg.GetLogger(),
payloadName,
fn))
}
overwriteFile(ofn, msg.GetPayload())
updatePluginMetadata(self.workingDirectory, msg.GetLogger(), html, payloadName)
} else {
overwriteFile(ofn, msg.GetPayload())
updatePluginMetadata(self.workingDirectory, msg.GetLogger(), fn, payloadName)
}
}
case "heka.sandbox-terminated":
fn := path.Join(self.workingDirectory, "heka_sandbox_termination.tsv")
if file, err := os.OpenFile(fn, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil {
var line string
if _, ok := msg.GetFieldValue("ProcessMessageCount"); !ok {
line = fmt.Sprintf("%d\t%s\t%v\n", msg.GetTimestamp()/1e9, msg.GetLogger(), msg.GetPayload())
} else {
pmc, _ := msg.GetFieldValue("ProcessMessageCount")
pms, _ := msg.GetFieldValue("ProcessMessageSamples")
pmd, _ := msg.GetFieldValue("ProcessMessageAvgDuration")
ms, _ := msg.GetFieldValue("MatchSamples")
mad, _ := msg.GetFieldValue("MatchAvgDuration")
fcl, _ := msg.GetFieldValue("FilterChanLength")
mcl, _ := msg.GetFieldValue("MatchChanLength")
rcl, _ := msg.GetFieldValue("RouterChanLength")
line = fmt.Sprintf("%d\t%s\t%v"+
" ProcessMessageCount:%v"+
" ProcessMessageSamples:%v"+
" ProcessMessageAvgDuration:%v"+
" MatchSamples:%v"+
" MatchAvgDuration:%v"+
" FilterChanLength:%v"+
" MatchChanLength:%v"+
" RouterChanLength:%v\n",
msg.GetTimestamp()/1e9,
msg.GetLogger(), msg.GetPayload(), pmc, pms, pmd,
ms, mad, fcl, mcl, rcl)
}
file.WriteString(line)
file.Close()
}
}
pack.Recycle()
case <-ticker:
go h.PipelineConfig().allReportsMsg()
}
}
return
}
示例10: save
// Save matching client records locally to the given output file in the given
// format.
func save(recordChannel <-chan s3splitfile.S3Record, match *message.MatcherSpecification, format string, out *os.File, done chan<- int) {
processed := 0
matched := 0
bytes := 0
msg := new(message.Message)
ok := true
for ok {
r, ok := <-recordChannel
if !ok {
// Channel is closed
done <- bytes
break
}
bytes += len(r.Record)
processed += 1
headerLen := int(r.Record[1]) + message.HEADER_FRAMING_SIZE
messageBytes := r.Record[headerLen:]
unsnappy, decodeErr := snappy.Decode(nil, messageBytes)
if decodeErr == nil {
messageBytes = unsnappy
}
if err := proto.Unmarshal(messageBytes, msg); err != nil {
fmt.Fprintf(os.Stderr, "Error unmarshalling message %d in %s, error: %s\n", processed, r.Key, err)
continue
}
if !match.Match(msg) {
continue
}
matched += 1
switch format {
case "count":
// no op
case "json":
contents, _ := json.Marshal(msg)
fmt.Fprintf(out, "%s\n", contents)
case "heka":
fmt.Fprintf(out, "%s", r.Record)
case "offsets":
// Use offsets mode for indexing the S3 files by clientId
clientId, ok := msg.GetFieldValue("clientId")
recordLength := len(r.Record) - headerLen
if ok {
fmt.Fprintf(out, "%s\t%s\t%d\t%d\n", r.Key, clientId, (r.Offset + uint64(headerLen)), recordLength)
} else {
fmt.Fprintf(os.Stderr, "Missing client id in %s @ %d+%d\n", r.Key, r.Offset, recordLength)
}
default:
fmt.Fprintf(out, "Timestamp: %s\n"+
"Type: %s\n"+
"Hostname: %s\n"+
"Pid: %d\n"+
"UUID: %s\n"+
"Logger: %s\n"+
"Payload: %s\n"+
"EnvVersion: %s\n"+
"Severity: %d\n"+
"Fields: %+v\n\n",
time.Unix(0, msg.GetTimestamp()), msg.GetType(),
msg.GetHostname(), msg.GetPid(), msg.GetUuidString(),
msg.GetLogger(), msg.GetPayload(), msg.GetEnvVersion(),
msg.GetSeverity(), msg.Fields)
}
}
fmt.Fprintf(os.Stderr, "Processed: %d, matched: %d messages (%.2f MB)\n", processed, matched, (float64(bytes) / 1024.0 / 1024.0))
}