本文整理汇总了Golang中github.com/trivago/gollum/core.PluginConfig.HasValue方法的典型用法代码示例。如果您正苦于以下问题:Golang PluginConfig.HasValue方法的具体用法?Golang PluginConfig.HasValue怎么用?Golang PluginConfig.HasValue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/trivago/gollum/core.PluginConfig
的用法示例。
在下文中一共展示了PluginConfig.HasValue方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Configure
// Configure initializes this consumer with values from a plugin config.
func (cons *File) Configure(conf core.PluginConfig) error {
err := cons.ConsumerBase.Configure(conf)
if err != nil {
return err
}
if !conf.HasValue("File") {
return core.NewConsumerError("No file configured for consumer.File")
}
cons.file = nil
cons.fileName = conf.GetString("File", "")
cons.offsetFileName = conf.GetString("OffsetFile", "")
cons.delimiter = shared.Unescape(conf.GetString("Delimiter", "\n"))
switch strings.ToLower(conf.GetString("DefaultOffset", fileOffsetEnd)) {
default:
fallthrough
case fileOffsetEnd:
cons.seek = 2
cons.seekOffset = 0
case fileOffsetStart:
cons.seek = 1
cons.seekOffset = 0
}
return nil
}
示例2: Configure
// Configure initializes this formatter with values from a plugin config.
func (format *JSON) Configure(conf core.PluginConfig) error {
format.parser = shared.NewTransitionParser()
format.state = jsonReadObject
format.initState = conf.GetString("JSONStartState", "")
format.timeRead = conf.GetString("JSONTimestampRead", "20060102150405")
format.timeWrite = conf.GetString("JSONTimestampWrite", "2006-01-02 15:04:05 MST")
format.parseLock = new(sync.Mutex)
if !conf.HasValue("JSONDirectives") {
Log.Warning.Print("JSON formatter has no JSONDirectives setting")
return nil // ### return, no directives ###
}
directiveStrings := conf.GetStringArray("JSONDirectives", []string{})
if len(directiveStrings) == 0 {
Log.Warning.Print("JSON formatter has no directives")
return nil // ### return, no directives ###
}
// Parse directives
parserFunctions := make(map[string]shared.ParsedFunc)
parserFunctions["key"] = format.readKey
parserFunctions["val"] = format.readValue
parserFunctions["esc"] = format.readEscaped
parserFunctions["dat"] = format.readDate
parserFunctions["arr"] = format.readArray
parserFunctions["obj"] = format.readObject
parserFunctions["end"] = format.readEnd
parserFunctions["arr+val"] = format.readArrayValue
parserFunctions["arr+esc"] = format.readArrayEscaped
parserFunctions["val+end"] = format.readValueEnd
parserFunctions["esc+end"] = format.readEscapedEnd
parserFunctions["dat+end"] = format.readDateEnd
directives := []shared.TransitionDirective{}
for _, dirString := range directiveStrings {
directive, err := shared.ParseTransitionDirective(dirString, parserFunctions)
if err != nil {
return fmt.Errorf("%s: %s", err.Error(), dirString) // ### return, malformed directive ###
}
if format.initState == "" {
format.initState = directive.State
}
directives = append(directives, directive)
}
format.parser.AddDirectives(directives)
return nil
}
示例3: Configure
// Configure initializes this consumer with values from a plugin config.
func (cons *Kafka) Configure(conf core.PluginConfig) error {
err := cons.ConsumerBase.Configure(conf)
if err != nil {
return err
}
if !conf.HasValue("Servers") {
return core.NewConsumerError("No servers configured for consumer.Kafka")
}
cons.servers = conf.GetStringArray("Servers", []string{})
cons.topic = conf.GetString("Topic", "default")
cons.offsetFile = conf.GetString("OffsetFile", "")
cons.persistTimeout = time.Duration(conf.GetInt("PresistTimoutMs", 5000)) * time.Millisecond
cons.offsets = make(map[int32]int64)
cons.MaxPartitionID = 0
cons.config = kafka.NewConfig()
cons.config.ClientID = conf.GetString("ClientId", "gollum")
cons.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)
cons.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
cons.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
cons.config.Net.ReadTimeout = cons.config.Net.DialTimeout
cons.config.Net.WriteTimeout = cons.config.Net.DialTimeout
cons.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
cons.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
cons.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond
cons.config.Consumer.Fetch.Min = int32(conf.GetInt("MinFetchSizeByte", 1))
cons.config.Consumer.Fetch.Max = int32(conf.GetInt("MaxFetchSizeByte", 0))
cons.config.Consumer.Fetch.Default = int32(conf.GetInt("MaxFetchSizeByte", 32768))
cons.config.Consumer.MaxWaitTime = time.Duration(conf.GetInt("FetchTimeoutMs", 250)) * time.Millisecond
offsetValue := strings.ToLower(conf.GetString("DefaultOffset", kafkaOffsetNewest))
switch offsetValue {
case kafkaOffsetNewest:
cons.defaultOffset = kafka.OffsetNewest
case kafkaOffsetOldest:
cons.defaultOffset = kafka.OffsetOldest
default:
cons.defaultOffset, _ = strconv.ParseInt(offsetValue, 10, 64)
fileContents, err := ioutil.ReadFile(cons.offsetFile)
if err != nil {
return err
}
// Decode the JSON file into the partition -> offset map
encodedOffsets := make(map[string]int64)
err = json.Unmarshal(fileContents, &encodedOffsets)
if err != nil {
return err
}
for k, v := range encodedOffsets {
id, err := strconv.Atoi(k)
if err != nil {
return err
}
cons.offsets[int32(id)] = v
}
}
return nil
}
示例4: Configure
// Configure initializes this producer with values from a plugin config.
func (prod *Kafka) Configure(conf core.PluginConfig) error {
err := prod.ProducerBase.Configure(conf)
if err != nil {
return err
}
if !conf.HasValue("Servers") {
return core.NewProducerError("No servers configured for producer.Kafka")
}
prod.servers = conf.GetStringArray("Servers", []string{})
prod.topic = conf.GetStreamMap("Topic", "")
prod.clientID = conf.GetString("ClientId", "gollum")
prod.config = kafka.NewConfig()
prod.config.ClientID = conf.GetString("ClientId", "gollum")
prod.config.ChannelBufferSize = conf.GetInt("MessageBufferCount", 256)
prod.config.Net.MaxOpenRequests = conf.GetInt("MaxOpenRequests", 5)
prod.config.Net.DialTimeout = time.Duration(conf.GetInt("ServerTimeoutSec", 30)) * time.Second
prod.config.Net.ReadTimeout = prod.config.Net.DialTimeout
prod.config.Net.WriteTimeout = prod.config.Net.DialTimeout
prod.config.Metadata.Retry.Max = conf.GetInt("ElectRetries", 3)
prod.config.Metadata.Retry.Backoff = time.Duration(conf.GetInt("ElectTimeoutMs", 250)) * time.Millisecond
prod.config.Metadata.RefreshFrequency = time.Duration(conf.GetInt("MetadataRefreshMs", 10000)) * time.Millisecond
prod.config.Producer.MaxMessageBytes = conf.GetInt("BatchSizeMaxKB", 1<<10) << 10
prod.config.Producer.RequiredAcks = kafka.RequiredAcks(conf.GetInt("RequiredAcks", int(kafka.WaitForLocal)))
prod.config.Producer.Timeout = time.Duration(conf.GetInt("TimoutMs", 1500)) * time.Millisecond
prod.config.Producer.Return.Errors = true
prod.config.Producer.Return.Successes = false
switch strings.ToLower(conf.GetString("Compression", compressNone)) {
default:
fallthrough
case compressNone:
prod.config.Producer.Compression = kafka.CompressionNone
case compressGZIP:
prod.config.Producer.Compression = kafka.CompressionGZIP
case compressSnappy:
prod.config.Producer.Compression = kafka.CompressionSnappy
}
switch strings.ToLower(conf.GetString("Partitioner", partRandom)) {
case partRandom:
prod.config.Producer.Partitioner = kafka.NewRandomPartitioner
case partRoundrobin:
prod.config.Producer.Partitioner = kafka.NewRoundRobinPartitioner
default:
fallthrough
case partHash:
prod.config.Producer.Partitioner = kafka.NewHashPartitioner
}
prod.config.Producer.Flush.Bytes = conf.GetInt("BatchSizeByte", 8192)
prod.config.Producer.Flush.Messages = conf.GetInt("BatchMinCount", 1)
prod.config.Producer.Flush.Frequency = time.Duration(conf.GetInt("BatchTimeoutSec", 3)) * time.Second
prod.config.Producer.Flush.MaxMessages = conf.GetInt("BatchMaxCount", 0)
prod.config.Producer.Retry.Max = conf.GetInt("SendRetries", 3)
prod.config.Producer.Retry.Backoff = time.Duration(conf.GetInt("SendTimeoutMs", 100)) * time.Millisecond
return nil
}